nwo stringlengths 5 86 | sha stringlengths 40 40 | path stringlengths 4 189 | language stringclasses 1 value | identifier stringlengths 1 94 | parameters stringlengths 2 4.03k | argument_list stringclasses 1 value | return_statement stringlengths 0 11.5k | docstring stringlengths 1 33.2k | docstring_summary stringlengths 0 5.15k | docstring_tokens list | function stringlengths 34 151k | function_tokens list | url stringlengths 90 278 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mingchen/protobuf-ios | 0958df34558cd54cb7b6e6ca5c8855bf3d475046 | compiler/python/google/protobuf/internal/containers.py | python | RepeatedScalarFieldContainer.__delslice__ | (self, start, stop) | Deletes the subset of items from between the specified indices. | Deletes the subset of items from between the specified indices. | [
"Deletes",
"the",
"subset",
"of",
"items",
"from",
"between",
"the",
"specified",
"indices",
"."
] | def __delslice__(self, start, stop):
"""Deletes the subset of items from between the specified indices."""
del self._values[start:stop]
self._message_listener.ByteSizeDirty() | [
"def",
"__delslice__",
"(",
"self",
",",
"start",
",",
"stop",
")",
":",
"del",
"self",
".",
"_values",
"[",
"start",
":",
"stop",
"]",
"self",
".",
"_message_listener",
".",
"ByteSizeDirty",
"(",
")"
] | https://github.com/mingchen/protobuf-ios/blob/0958df34558cd54cb7b6e6ca5c8855bf3d475046/compiler/python/google/protobuf/internal/containers.py#L156-L159 | ||
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/ops/map_fn.py | python | _most_general_compatible_type | (spec) | Returns the most general TypeSpec compatible with `spec`. | Returns the most general TypeSpec compatible with `spec`. | [
"Returns",
"the",
"most",
"general",
"TypeSpec",
"compatible",
"with",
"spec",
"."
] | def _most_general_compatible_type(spec):
"""Returns the most general TypeSpec compatible with `spec`."""
# TODO(edloper): Consider adding most_general_compatible_type to TypeSpec API
if isinstance(spec, tensor_spec.TensorSpec):
return tensor_spec.TensorSpec(None, spec.dtype)
elif isinstance(spec, ragged_tensor.RaggedTensorSpec):
# pylint: disable=protected-access
return ragged_tensor.RaggedTensorSpec(None, spec._dtype, spec._ragged_rank,
spec._row_splits_dtype)
elif isinstance(spec, sparse_tensor.SparseTensorSpec):
# pylint: disable=protected-access
return sparse_tensor.SparseTensorSpec(None, spec.dtype)
else:
return spec | [
"def",
"_most_general_compatible_type",
"(",
"spec",
")",
":",
"# TODO(edloper): Consider adding most_general_compatible_type to TypeSpec API",
"if",
"isinstance",
"(",
"spec",
",",
"tensor_spec",
".",
"TensorSpec",
")",
":",
"return",
"tensor_spec",
".",
"TensorSpec",
"(",
"None",
",",
"spec",
".",
"dtype",
")",
"elif",
"isinstance",
"(",
"spec",
",",
"ragged_tensor",
".",
"RaggedTensorSpec",
")",
":",
"# pylint: disable=protected-access",
"return",
"ragged_tensor",
".",
"RaggedTensorSpec",
"(",
"None",
",",
"spec",
".",
"_dtype",
",",
"spec",
".",
"_ragged_rank",
",",
"spec",
".",
"_row_splits_dtype",
")",
"elif",
"isinstance",
"(",
"spec",
",",
"sparse_tensor",
".",
"SparseTensorSpec",
")",
":",
"# pylint: disable=protected-access",
"return",
"sparse_tensor",
".",
"SparseTensorSpec",
"(",
"None",
",",
"spec",
".",
"dtype",
")",
"else",
":",
"return",
"spec"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/map_fn.py#L527-L540 | ||
chanyn/3Dpose_ssl | 585696676279683a279b1ecca136c0e0d02aef2a | caffe-3dssl/python/caffe/net_spec.py | python | to_proto | (*tops) | return net | Generate a NetParameter that contains all layers needed to compute
all arguments. | Generate a NetParameter that contains all layers needed to compute
all arguments. | [
"Generate",
"a",
"NetParameter",
"that",
"contains",
"all",
"layers",
"needed",
"to",
"compute",
"all",
"arguments",
"."
] | def to_proto(*tops):
"""Generate a NetParameter that contains all layers needed to compute
all arguments."""
layers = OrderedDict()
autonames = Counter()
for top in tops:
top.fn._to_proto(layers, {}, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net | [
"def",
"to_proto",
"(",
"*",
"tops",
")",
":",
"layers",
"=",
"OrderedDict",
"(",
")",
"autonames",
"=",
"Counter",
"(",
")",
"for",
"top",
"in",
"tops",
":",
"top",
".",
"fn",
".",
"_to_proto",
"(",
"layers",
",",
"{",
"}",
",",
"autonames",
")",
"net",
"=",
"caffe_pb2",
".",
"NetParameter",
"(",
")",
"net",
".",
"layer",
".",
"extend",
"(",
"layers",
".",
"values",
"(",
")",
")",
"return",
"net"
] | https://github.com/chanyn/3Dpose_ssl/blob/585696676279683a279b1ecca136c0e0d02aef2a/caffe-3dssl/python/caffe/net_spec.py#L43-L53 | |
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/examples/speech_commands/label_wav.py | python | main | (_) | Entry point for script, converts flags to arguments. | Entry point for script, converts flags to arguments. | [
"Entry",
"point",
"for",
"script",
"converts",
"flags",
"to",
"arguments",
"."
] | def main(_):
"""Entry point for script, converts flags to arguments."""
label_wav(FLAGS.wav, FLAGS.labels, FLAGS.graph, FLAGS.input_name,
FLAGS.output_name, FLAGS.how_many_labels) | [
"def",
"main",
"(",
"_",
")",
":",
"label_wav",
"(",
"FLAGS",
".",
"wav",
",",
"FLAGS",
".",
"labels",
",",
"FLAGS",
".",
"graph",
",",
"FLAGS",
".",
"input_name",
",",
"FLAGS",
".",
"output_name",
",",
"FLAGS",
".",
"how_many_labels",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/examples/speech_commands/label_wav.py#L94-L97 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_windows.py | python | Dialog.CreateSeparatedButtonSizer | (*args, **kwargs) | return _windows_.Dialog_CreateSeparatedButtonSizer(*args, **kwargs) | CreateSeparatedButtonSizer(self, long flags) -> Sizer | CreateSeparatedButtonSizer(self, long flags) -> Sizer | [
"CreateSeparatedButtonSizer",
"(",
"self",
"long",
"flags",
")",
"-",
">",
"Sizer"
] | def CreateSeparatedButtonSizer(*args, **kwargs):
"""CreateSeparatedButtonSizer(self, long flags) -> Sizer"""
return _windows_.Dialog_CreateSeparatedButtonSizer(*args, **kwargs) | [
"def",
"CreateSeparatedButtonSizer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"Dialog_CreateSeparatedButtonSizer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_windows.py#L791-L793 | |
panda3d/panda3d | 833ad89ebad58395d0af0b7ec08538e5e4308265 | direct/src/leveleditor/ObjectMgrBase.py | python | ObjectMgrBase.updateObjectAnim | (self, anim, obj, fSelectObject=True) | replace object's anim | replace object's anim | [
"replace",
"object",
"s",
"anim"
] | def updateObjectAnim(self, anim, obj, fSelectObject=True):
""" replace object's anim """
if obj[OG.OBJ_ANIM] != anim:
base.direct.deselectAllCB()
objNP = obj[OG.OBJ_NP]
# load new anim
animName = os.path.basename(anim)
newAnim = objNP.loadAnims({animName:anim})
objNP.loop(animName)
obj[OG.OBJ_ANIM] = anim
if fSelectObject:
base.direct.select(objNP, fUndo=0)
self.editor.fNeedToSave = True | [
"def",
"updateObjectAnim",
"(",
"self",
",",
"anim",
",",
"obj",
",",
"fSelectObject",
"=",
"True",
")",
":",
"if",
"obj",
"[",
"OG",
".",
"OBJ_ANIM",
"]",
"!=",
"anim",
":",
"base",
".",
"direct",
".",
"deselectAllCB",
"(",
")",
"objNP",
"=",
"obj",
"[",
"OG",
".",
"OBJ_NP",
"]",
"# load new anim",
"animName",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"anim",
")",
"newAnim",
"=",
"objNP",
".",
"loadAnims",
"(",
"{",
"animName",
":",
"anim",
"}",
")",
"objNP",
".",
"loop",
"(",
"animName",
")",
"obj",
"[",
"OG",
".",
"OBJ_ANIM",
"]",
"=",
"anim",
"if",
"fSelectObject",
":",
"base",
".",
"direct",
".",
"select",
"(",
"objNP",
",",
"fUndo",
"=",
"0",
")",
"self",
".",
"editor",
".",
"fNeedToSave",
"=",
"True"
] | https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/leveleditor/ObjectMgrBase.py#L559-L573 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/labelbook.py | python | ImageContainerBase.OnMouseMove | (self, event) | Handles the ``wx.EVT_MOTION`` event for :class:`ImageContainerBase`.
:param `event`: a :class:`MouseEvent` event to be processed. | Handles the ``wx.EVT_MOTION`` event for :class:`ImageContainerBase`. | [
"Handles",
"the",
"wx",
".",
"EVT_MOTION",
"event",
"for",
":",
"class",
":",
"ImageContainerBase",
"."
] | def OnMouseMove(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for :class:`ImageContainerBase`.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
# Check to see if we are in the pin button rect
if not self._pinBtnRect.Contains(event.GetPosition()) and self._nPinButtonStatus == INB_PIN_PRESSED:
self._nPinButtonStatus = INB_PIN_NONE
dc = wx.ClientDC(self)
self.DrawPin(dc, self._pinBtnRect, not self._bCollapsed)
imgIdx, where = self.HitTest(event.GetPosition())
# Allow hovering unless over current tab or tab is disabled
self._nHoveredImgIdx = -1
if imgIdx < len(self._pagesInfoVec) and self.GetEnabled(imgIdx) and imgIdx != self._nIndex:
self._nHoveredImgIdx = imgIdx
if not self._bCollapsed:
if self._nHoveredImgIdx >= 0 and self.HasAGWFlag(INB_WEB_HILITE):
# Change the cursor to be Hand if we have the Web hover style set
wx.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
elif not self.PointOnSash(event.GetPosition()):
# Restore the cursor if we are not currently hovering the sash
wx.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
self.Refresh() | [
"def",
"OnMouseMove",
"(",
"self",
",",
"event",
")",
":",
"style",
"=",
"self",
".",
"GetParent",
"(",
")",
".",
"GetAGWWindowStyleFlag",
"(",
")",
"if",
"style",
"&",
"INB_USE_PIN_BUTTON",
":",
"# Check to see if we are in the pin button rect",
"if",
"not",
"self",
".",
"_pinBtnRect",
".",
"Contains",
"(",
"event",
".",
"GetPosition",
"(",
")",
")",
"and",
"self",
".",
"_nPinButtonStatus",
"==",
"INB_PIN_PRESSED",
":",
"self",
".",
"_nPinButtonStatus",
"=",
"INB_PIN_NONE",
"dc",
"=",
"wx",
".",
"ClientDC",
"(",
"self",
")",
"self",
".",
"DrawPin",
"(",
"dc",
",",
"self",
".",
"_pinBtnRect",
",",
"not",
"self",
".",
"_bCollapsed",
")",
"imgIdx",
",",
"where",
"=",
"self",
".",
"HitTest",
"(",
"event",
".",
"GetPosition",
"(",
")",
")",
"# Allow hovering unless over current tab or tab is disabled",
"self",
".",
"_nHoveredImgIdx",
"=",
"-",
"1",
"if",
"imgIdx",
"<",
"len",
"(",
"self",
".",
"_pagesInfoVec",
")",
"and",
"self",
".",
"GetEnabled",
"(",
"imgIdx",
")",
"and",
"imgIdx",
"!=",
"self",
".",
"_nIndex",
":",
"self",
".",
"_nHoveredImgIdx",
"=",
"imgIdx",
"if",
"not",
"self",
".",
"_bCollapsed",
":",
"if",
"self",
".",
"_nHoveredImgIdx",
">=",
"0",
"and",
"self",
".",
"HasAGWFlag",
"(",
"INB_WEB_HILITE",
")",
":",
"# Change the cursor to be Hand if we have the Web hover style set",
"wx",
".",
"SetCursor",
"(",
"wx",
".",
"StockCursor",
"(",
"wx",
".",
"CURSOR_HAND",
")",
")",
"elif",
"not",
"self",
".",
"PointOnSash",
"(",
"event",
".",
"GetPosition",
"(",
")",
")",
":",
"# Restore the cursor if we are not currently hovering the sash",
"wx",
".",
"SetCursor",
"(",
"wx",
".",
"StockCursor",
"(",
"wx",
".",
"CURSOR_ARROW",
")",
")",
"self",
".",
"Refresh",
"(",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/labelbook.py#L974-L1011 | ||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/seq2seq/python/ops/helper.py | python | ScheduledOutputTrainingHelper.__init__ | (self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_inputs_fn=None,
auxiliary_inputs=None, name=None) | Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
the next input when sampling. If `None` (default), the RNN outputs will
be used as the next inputs.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector. | Initializer. | [
"Initializer",
"."
] | def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_inputs_fn=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
the next input when sampling. If `None` (default), the RNN outputs will
be used as the next inputs.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
self._next_inputs_fn = next_inputs_fn
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name) | [
"def",
"__init__",
"(",
"self",
",",
"inputs",
",",
"sequence_length",
",",
"sampling_probability",
",",
"time_major",
"=",
"False",
",",
"seed",
"=",
"None",
",",
"next_inputs_fn",
"=",
"None",
",",
"auxiliary_inputs",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"ScheduledOutputTrainingHelper\"",
",",
"[",
"inputs",
",",
"auxiliary_inputs",
",",
"sampling_probability",
"]",
")",
":",
"self",
".",
"_sampling_probability",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"sampling_probability",
",",
"name",
"=",
"\"sampling_probability\"",
")",
"if",
"self",
".",
"_sampling_probability",
".",
"get_shape",
"(",
")",
".",
"ndims",
"not",
"in",
"(",
"0",
",",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"sampling_probability must be either a scalar or a vector. \"",
"\"saw shape: %s\"",
"%",
"(",
"self",
".",
"_sampling_probability",
".",
"get_shape",
"(",
")",
")",
")",
"if",
"auxiliary_inputs",
"is",
"None",
":",
"maybe_concatenated_inputs",
"=",
"inputs",
"else",
":",
"inputs",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"inputs",
",",
"name",
"=",
"\"inputs\"",
")",
"auxiliary_inputs",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"auxiliary_inputs",
",",
"name",
"=",
"\"auxiliary_inputs\"",
")",
"maybe_concatenated_inputs",
"=",
"nest",
".",
"map_structure",
"(",
"lambda",
"x",
",",
"y",
":",
"array_ops",
".",
"concat",
"(",
"(",
"x",
",",
"y",
")",
",",
"-",
"1",
")",
",",
"inputs",
",",
"auxiliary_inputs",
")",
"if",
"not",
"time_major",
":",
"auxiliary_inputs",
"=",
"nest",
".",
"map_structure",
"(",
"_transpose_batch_time",
",",
"auxiliary_inputs",
")",
"self",
".",
"_auxiliary_input_tas",
"=",
"(",
"nest",
".",
"map_structure",
"(",
"_unstack_ta",
",",
"auxiliary_inputs",
")",
"if",
"auxiliary_inputs",
"is",
"not",
"None",
"else",
"None",
")",
"self",
".",
"_seed",
"=",
"seed",
"self",
".",
"_next_inputs_fn",
"=",
"next_inputs_fn",
"super",
"(",
"ScheduledOutputTrainingHelper",
",",
"self",
")",
".",
"__init__",
"(",
"inputs",
"=",
"maybe_concatenated_inputs",
",",
"sequence_length",
"=",
"sequence_length",
",",
"time_major",
"=",
"time_major",
",",
"name",
"=",
"name",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/seq2seq/python/ops/helper.py#L352-L411 | ||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/pydoc.py | python | HTMLDoc.heading | (self, title, fgcol, bgcol, extras='') | return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ') | Format a page heading. | Format a page heading. | [
"Format",
"a",
"page",
"heading",
"."
] | def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ') | [
"def",
"heading",
"(",
"self",
",",
"title",
",",
"fgcol",
",",
"bgcol",
",",
"extras",
"=",
"''",
")",
":",
"return",
"'''\n<table width=\"100%%\" cellspacing=0 cellpadding=2 border=0 summary=\"heading\">\n<tr bgcolor=\"%s\">\n<td valign=bottom> <br>\n<font color=\"%s\" face=\"helvetica, arial\"> <br>%s</font></td\n><td align=right valign=bottom\n><font color=\"%s\" face=\"helvetica, arial\">%s</font></td></tr></table>\n '''",
"%",
"(",
"bgcol",
",",
"fgcol",
",",
"title",
",",
"fgcol",
",",
"extras",
"or",
"' '",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/pydoc.py#L434-L443 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/rnn/python/ops/lstm_ops.py | python | LSTMBlockFusedCell.num_units | (self) | return self._num_units | Number of units in this cell (output dimension). | Number of units in this cell (output dimension). | [
"Number",
"of",
"units",
"in",
"this",
"cell",
"(",
"output",
"dimension",
")",
"."
] | def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units | [
"def",
"num_units",
"(",
"self",
")",
":",
"return",
"self",
".",
"_num_units"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/rnn/python/ops/lstm_ops.py#L590-L592 | |
cvxpy/cvxpy | 5165b4fb750dfd237de8659383ef24b4b2e33aaf | cvxpy/atoms/matrix_frac.py | python | MatrixFrac._domain | (self) | return [self.args[1] >> 0] | Returns constraints describing the domain of the node. | Returns constraints describing the domain of the node. | [
"Returns",
"constraints",
"describing",
"the",
"domain",
"of",
"the",
"node",
"."
] | def _domain(self) -> List[Constraint]:
"""Returns constraints describing the domain of the node.
"""
return [self.args[1] >> 0] | [
"def",
"_domain",
"(",
"self",
")",
"->",
"List",
"[",
"Constraint",
"]",
":",
"return",
"[",
"self",
".",
"args",
"[",
"1",
"]",
">>",
"0",
"]"
] | https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/atoms/matrix_frac.py#L48-L51 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/turtle.py | python | ScrolledCanvas.cget | (self, *args, **kwargs) | return self._canvas.cget(*args, **kwargs) | 'forward' method, which canvas itself has inherited... | 'forward' method, which canvas itself has inherited... | [
"forward",
"method",
"which",
"canvas",
"itself",
"has",
"inherited",
"..."
] | def cget(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
return self._canvas.cget(*args, **kwargs) | [
"def",
"cget",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_canvas",
".",
"cget",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/turtle.py#L427-L430 | |
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | PhysicsTools/HeppyCore/python/framework/services/service.py | python | Service.__init__ | (self, cfg, comp, outdir) | cfg: framework.config.Service object containing whatever parameters
you need
comp: dummy parameter
outdir: output directory for your service (feel free not to use it)
Please have a look at TFileService for more information | cfg: framework.config.Service object containing whatever parameters
you need
comp: dummy parameter
outdir: output directory for your service (feel free not to use it) | [
"cfg",
":",
"framework",
".",
"config",
".",
"Service",
"object",
"containing",
"whatever",
"parameters",
"you",
"need",
"comp",
":",
"dummy",
"parameter",
"outdir",
":",
"output",
"directory",
"for",
"your",
"service",
"(",
"feel",
"free",
"not",
"to",
"use",
"it",
")"
] | def __init__(self, cfg, comp, outdir):
'''
cfg: framework.config.Service object containing whatever parameters
you need
comp: dummy parameter
outdir: output directory for your service (feel free not to use it)
Please have a look at TFileService for more information
''' | [
"def",
"__init__",
"(",
"self",
",",
"cfg",
",",
"comp",
",",
"outdir",
")",
":"
] | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/PhysicsTools/HeppyCore/python/framework/services/service.py#L7-L15 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/special/_generate_pyx.py | python | FusedFunc._get_python_wrap | (self) | return body | Generate a python wrapper for functions which pass their
arguments as pointers. | Generate a python wrapper for functions which pass their
arguments as pointers. | [
"Generate",
"a",
"python",
"wrapper",
"for",
"functions",
"which",
"pass",
"their",
"arguments",
"as",
"pointers",
"."
] | def _get_python_wrap(self):
"""Generate a python wrapper for functions which pass their
arguments as pointers.
"""
tab = " "*4
body, callvars = [], []
for (intype, _), invar in zip(self.intypes, self.invars):
callvars.append("{} {}".format(intype, invar))
line = "def _{}_pywrap({}):".format(self.name, ", ".join(callvars))
body.append(line)
for (outtype, _), outvar in zip(self.outtypes, self.outvars):
line = "cdef {} {}".format(outtype, outvar)
body.append(tab + line)
addr_outvars = map(lambda x: "&{}".format(x), self.outvars)
line = "{}({}, {})".format(self.name, ", ".join(self.invars),
", ".join(addr_outvars))
body.append(tab + line)
line = "return {}".format(", ".join(self.outvars))
body.append(tab + line)
body = "\n".join(body)
return body | [
"def",
"_get_python_wrap",
"(",
"self",
")",
":",
"tab",
"=",
"\" \"",
"*",
"4",
"body",
",",
"callvars",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"(",
"intype",
",",
"_",
")",
",",
"invar",
"in",
"zip",
"(",
"self",
".",
"intypes",
",",
"self",
".",
"invars",
")",
":",
"callvars",
".",
"append",
"(",
"\"{} {}\"",
".",
"format",
"(",
"intype",
",",
"invar",
")",
")",
"line",
"=",
"\"def _{}_pywrap({}):\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"\", \"",
".",
"join",
"(",
"callvars",
")",
")",
"body",
".",
"append",
"(",
"line",
")",
"for",
"(",
"outtype",
",",
"_",
")",
",",
"outvar",
"in",
"zip",
"(",
"self",
".",
"outtypes",
",",
"self",
".",
"outvars",
")",
":",
"line",
"=",
"\"cdef {} {}\"",
".",
"format",
"(",
"outtype",
",",
"outvar",
")",
"body",
".",
"append",
"(",
"tab",
"+",
"line",
")",
"addr_outvars",
"=",
"map",
"(",
"lambda",
"x",
":",
"\"&{}\"",
".",
"format",
"(",
"x",
")",
",",
"self",
".",
"outvars",
")",
"line",
"=",
"\"{}({}, {})\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"\", \"",
".",
"join",
"(",
"self",
".",
"invars",
")",
",",
"\", \"",
".",
"join",
"(",
"addr_outvars",
")",
")",
"body",
".",
"append",
"(",
"tab",
"+",
"line",
")",
"line",
"=",
"\"return {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"self",
".",
"outvars",
")",
")",
"body",
".",
"append",
"(",
"tab",
"+",
"line",
")",
"body",
"=",
"\"\\n\"",
".",
"join",
"(",
"body",
")",
"return",
"body"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/special/_generate_pyx.py#L887-L908 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_controls.py | python | TextAttr.Merge | (*args, **kwargs) | return _controls_.TextAttr_Merge(*args, **kwargs) | Merge(self, TextAttr overlay) | Merge(self, TextAttr overlay) | [
"Merge",
"(",
"self",
"TextAttr",
"overlay",
")"
] | def Merge(*args, **kwargs):
"""Merge(self, TextAttr overlay)"""
return _controls_.TextAttr_Merge(*args, **kwargs) | [
"def",
"Merge",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"TextAttr_Merge",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L1916-L1918 | |
tpfister/caffe-heatmap | 4db69ef53e6b8a0b3b4ebb29328b0ab3dbf67c4e | scripts/cpp_lint.py | python | _ShouldPrintError | (category, confidence, linenum) | return True | If confidence >= verbose, category passes filter and is not suppressed. | If confidence >= verbose, category passes filter and is not suppressed. | [
"If",
"confidence",
">",
"=",
"verbose",
"category",
"passes",
"filter",
"and",
"is",
"not",
"suppressed",
"."
] | def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True | [
"def",
"_ShouldPrintError",
"(",
"category",
",",
"confidence",
",",
"linenum",
")",
":",
"# There are three ways we might decide not to print an error message:",
"# a \"NOLINT(category)\" comment appears in the source,",
"# the verbosity level isn't high enough, or the filters filter it out.",
"if",
"IsErrorSuppressedByNolint",
"(",
"category",
",",
"linenum",
")",
":",
"return",
"False",
"if",
"confidence",
"<",
"_cpplint_state",
".",
"verbose_level",
":",
"return",
"False",
"is_filtered",
"=",
"False",
"for",
"one_filter",
"in",
"_Filters",
"(",
")",
":",
"if",
"one_filter",
".",
"startswith",
"(",
"'-'",
")",
":",
"if",
"category",
".",
"startswith",
"(",
"one_filter",
"[",
"1",
":",
"]",
")",
":",
"is_filtered",
"=",
"True",
"elif",
"one_filter",
".",
"startswith",
"(",
"'+'",
")",
":",
"if",
"category",
".",
"startswith",
"(",
"one_filter",
"[",
"1",
":",
"]",
")",
":",
"is_filtered",
"=",
"False",
"else",
":",
"assert",
"False",
"# should have been checked for in SetFilter.",
"if",
"is_filtered",
":",
"return",
"False",
"return",
"True"
] | https://github.com/tpfister/caffe-heatmap/blob/4db69ef53e6b8a0b3b4ebb29328b0ab3dbf67c4e/scripts/cpp_lint.py#L961-L985 | |
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/dataset/audio/validators.py | python | check_magphase | (method) | return new_method | Wrapper method to check the parameters of Magphase. | Wrapper method to check the parameters of Magphase. | [
"Wrapper",
"method",
"to",
"check",
"the",
"parameters",
"of",
"Magphase",
"."
] | def check_magphase(method):
"""Wrapper method to check the parameters of Magphase."""
@wraps(method)
def new_method(self, *args, **kwargs):
[power], _ = parse_user_args(method, *args, **kwargs)
check_power(power)
return method(self, *args, **kwargs)
return new_method | [
"def",
"check_magphase",
"(",
"method",
")",
":",
"@",
"wraps",
"(",
"method",
")",
"def",
"new_method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"[",
"power",
"]",
",",
"_",
"=",
"parse_user_args",
"(",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"check_power",
"(",
"power",
")",
"return",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"new_method"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/dataset/audio/validators.py#L497-L506 | |
francinexue/xuefu | b6ff79747a42e020588c0c0a921048e08fe4680c | ctpx/ctp3/ctptd.py | python | CtpTd.onRtnFromBankToFutureByFuture | (self, RspTransferField) | 期货发起银行资金转期货通知 | 期货发起银行资金转期货通知 | [
"期货发起银行资金转期货通知"
] | def onRtnFromBankToFutureByFuture(self, RspTransferField):
"""期货发起银行资金转期货通知"""
pass | [
"def",
"onRtnFromBankToFutureByFuture",
"(",
"self",
",",
"RspTransferField",
")",
":",
"pass"
] | https://github.com/francinexue/xuefu/blob/b6ff79747a42e020588c0c0a921048e08fe4680c/ctpx/ctp3/ctptd.py#L475-L477 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/more-itertools/py2/more_itertools/more.py | python | split_before | (iterable, pred) | Yield lists of items from *iterable*, where each list starts with an
item where callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] | Yield lists of items from *iterable*, where each list starts with an
item where callable *pred* returns ``True``: | [
"Yield",
"lists",
"of",
"items",
"from",
"*",
"iterable",
"*",
"where",
"each",
"list",
"starts",
"with",
"an",
"item",
"where",
"callable",
"*",
"pred",
"*",
"returns",
"True",
":"
] | def split_before(iterable, pred):
"""Yield lists of items from *iterable*, where each list starts with an
item where callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
buf = []
for item in iterable:
if pred(item) and buf:
yield buf
buf = []
buf.append(item)
yield buf | [
"def",
"split_before",
"(",
"iterable",
",",
"pred",
")",
":",
"buf",
"=",
"[",
"]",
"for",
"item",
"in",
"iterable",
":",
"if",
"pred",
"(",
"item",
")",
"and",
"buf",
":",
"yield",
"buf",
"buf",
"=",
"[",
"]",
"buf",
".",
"append",
"(",
"item",
")",
"yield",
"buf"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/more-itertools/py2/more_itertools/more.py#L1033-L1050 | ||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | build/android/gyp/generate_split_manifest.py | python | Build | (main_manifest, split, has_code) | return MANIFEST_TEMPLATE % {
'package': package,
'split': split.replace('-', '_'),
'has_code': str(has_code).lower()
} | Builds a split manifest based on the manifest of the main APK.
Args:
main_manifest: the XML manifest of the main APK as a string
split: the name of the split as a string
has_code: whether this split APK will contain .dex files
Returns:
The XML split manifest as a string | Builds a split manifest based on the manifest of the main APK. | [
"Builds",
"a",
"split",
"manifest",
"based",
"on",
"the",
"manifest",
"of",
"the",
"main",
"APK",
"."
] | def Build(main_manifest, split, has_code):
"""Builds a split manifest based on the manifest of the main APK.
Args:
main_manifest: the XML manifest of the main APK as a string
split: the name of the split as a string
has_code: whether this split APK will contain .dex files
Returns:
The XML split manifest as a string
"""
doc = xml.etree.ElementTree.fromstring(main_manifest)
package = doc.get('package')
return MANIFEST_TEMPLATE % {
'package': package,
'split': split.replace('-', '_'),
'has_code': str(has_code).lower()
} | [
"def",
"Build",
"(",
"main_manifest",
",",
"split",
",",
"has_code",
")",
":",
"doc",
"=",
"xml",
".",
"etree",
".",
"ElementTree",
".",
"fromstring",
"(",
"main_manifest",
")",
"package",
"=",
"doc",
".",
"get",
"(",
"'package'",
")",
"return",
"MANIFEST_TEMPLATE",
"%",
"{",
"'package'",
":",
"package",
",",
"'split'",
":",
"split",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
",",
"'has_code'",
":",
"str",
"(",
"has_code",
")",
".",
"lower",
"(",
")",
"}"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/build/android/gyp/generate_split_manifest.py#L57-L76 | |
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/debug/lib/debug_graphs.py | python | parse_debug_node_name | (node_name) | return watched_node_name, watched_output_slot, debug_op_index, debug_op | Parse the name of a debug node.
Args:
node_name: Name of the debug node.
Returns:
1. Name of the watched node, as a str.
2. Output slot index of the watched tensor, as an int.
3. Index of the debug node, as an int.
4. Name of the debug op, as a str, e.g, "DebugIdentity".
Raises:
ValueError: If the input node name is not a valid debug node name. | Parse the name of a debug node. | [
"Parse",
"the",
"name",
"of",
"a",
"debug",
"node",
"."
] | def parse_debug_node_name(node_name):
"""Parse the name of a debug node.
Args:
node_name: Name of the debug node.
Returns:
1. Name of the watched node, as a str.
2. Output slot index of the watched tensor, as an int.
3. Index of the debug node, as an int.
4. Name of the debug op, as a str, e.g, "DebugIdentity".
Raises:
ValueError: If the input node name is not a valid debug node name.
"""
prefix = "__dbg_"
name = node_name
if not name.startswith(prefix):
raise ValueError("Invalid prefix in debug node name: '%s'" % node_name)
name = name[len(prefix):]
if name.count("_") < 2:
raise ValueError("Invalid debug node name: '%s'" % node_name)
debug_op = name[name.rindex("_") + 1:]
name = name[:name.rindex("_")]
debug_op_index = int(name[name.rindex("_") + 1:])
name = name[:name.rindex("_")]
if name.count(":") != 1:
raise ValueError("Invalid tensor name in debug node name: '%s'" % node_name)
watched_node_name = name[:name.index(":")]
watched_output_slot = int(name[name.index(":") + 1:])
return watched_node_name, watched_output_slot, debug_op_index, debug_op | [
"def",
"parse_debug_node_name",
"(",
"node_name",
")",
":",
"prefix",
"=",
"\"__dbg_\"",
"name",
"=",
"node_name",
"if",
"not",
"name",
".",
"startswith",
"(",
"prefix",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid prefix in debug node name: '%s'\"",
"%",
"node_name",
")",
"name",
"=",
"name",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"if",
"name",
".",
"count",
"(",
"\"_\"",
")",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Invalid debug node name: '%s'\"",
"%",
"node_name",
")",
"debug_op",
"=",
"name",
"[",
"name",
".",
"rindex",
"(",
"\"_\"",
")",
"+",
"1",
":",
"]",
"name",
"=",
"name",
"[",
":",
"name",
".",
"rindex",
"(",
"\"_\"",
")",
"]",
"debug_op_index",
"=",
"int",
"(",
"name",
"[",
"name",
".",
"rindex",
"(",
"\"_\"",
")",
"+",
"1",
":",
"]",
")",
"name",
"=",
"name",
"[",
":",
"name",
".",
"rindex",
"(",
"\"_\"",
")",
"]",
"if",
"name",
".",
"count",
"(",
"\":\"",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Invalid tensor name in debug node name: '%s'\"",
"%",
"node_name",
")",
"watched_node_name",
"=",
"name",
"[",
":",
"name",
".",
"index",
"(",
"\":\"",
")",
"]",
"watched_output_slot",
"=",
"int",
"(",
"name",
"[",
"name",
".",
"index",
"(",
"\":\"",
")",
"+",
"1",
":",
"]",
")",
"return",
"watched_node_name",
",",
"watched_output_slot",
",",
"debug_op_index",
",",
"debug_op"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/debug/lib/debug_graphs.py#L103-L141 | |
llvm-dcpu16/llvm-dcpu16 | ae6b01fecd03219677e391d4421df5d966d80dcf | utils/llvm-build/llvmbuild/main.py | python | cmake_quote_string | (value) | return value | cmake_quote_string(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files. | cmake_quote_string(value) -> str | [
"cmake_quote_string",
"(",
"value",
")",
"-",
">",
"str"
] | def cmake_quote_string(value):
"""
cmake_quote_string(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files.
"""
# Currently, we only handle escaping backslashes.
value = value.replace("\\", "\\\\")
return value | [
"def",
"cmake_quote_string",
"(",
"value",
")",
":",
"# Currently, we only handle escaping backslashes.",
"value",
"=",
"value",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"\\\\\\\\\"",
")",
"return",
"value"
] | https://github.com/llvm-dcpu16/llvm-dcpu16/blob/ae6b01fecd03219677e391d4421df5d966d80dcf/utils/llvm-build/llvmbuild/main.py#L12-L23 | |
psnonis/FinBERT | c0c555d833a14e2316a3701e59c0b5156f804b4e | bert-gpu/optimization.py | python | AdamWeightDecayOptimizer._get_variable_name | (self, param_name) | return param_name | Get the variable name from the tensor name. | Get the variable name from the tensor name. | [
"Get",
"the",
"variable",
"name",
"from",
"the",
"tensor",
"name",
"."
] | def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name | [
"def",
"_get_variable_name",
"(",
"self",
",",
"param_name",
")",
":",
"m",
"=",
"re",
".",
"match",
"(",
"\"^(.*):\\\\d+$\"",
",",
"param_name",
")",
"if",
"m",
"is",
"not",
"None",
":",
"param_name",
"=",
"m",
".",
"group",
"(",
"1",
")",
"return",
"param_name"
] | https://github.com/psnonis/FinBERT/blob/c0c555d833a14e2316a3701e59c0b5156f804b4e/bert-gpu/optimization.py#L203-L208 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/recfunctions.py | python | repack_fields | (a, align=False, recurse=False) | return np.dtype((a.type, dt)) | Re-pack the fields of a structured array or dtype in memory.
The memory layout of structured datatypes allows fields at arbitrary
byte offsets. This means the fields can be separated by padding bytes,
their offsets can be non-monotonically increasing, and they can overlap.
This method removes any overlaps and reorders the fields in memory so they
have increasing byte offsets, and adds or removes padding bytes depending
on the `align` option, which behaves like the `align` option to `np.dtype`.
If `align=False`, this method produces a "packed" memory layout in which
each field starts at the byte the previous field ended, and any padding
bytes are removed.
If `align=True`, this methods produces an "aligned" memory layout in which
each field's offset is a multiple of its alignment, and the total itemsize
is a multiple of the largest alignment, by adding padding bytes as needed.
Parameters
----------
a : ndarray or dtype
array or dtype for which to repack the fields.
align : boolean
If true, use an "aligned" memory layout, otherwise use a "packed" layout.
recurse : boolean
If True, also repack nested structures.
Returns
-------
repacked : ndarray or dtype
Copy of `a` with fields repacked, or `a` itself if no repacking was
needed.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
>>> dt = np.dtype('u1, <i8, <f8', align=True)
>>> dt
dtype({'names':['f0','f1','f2'], 'formats':['u1','<i8','<f8'], 'offsets':[0,8,16], 'itemsize':24}, align=True)
>>> print_offsets(dt)
offsets: [0, 8, 16]
itemsize: 24
>>> packed_dt = rfn.repack_fields(dt)
>>> packed_dt
dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
offsets: [0, 1, 9]
itemsize: 17 | Re-pack the fields of a structured array or dtype in memory. | [
"Re",
"-",
"pack",
"the",
"fields",
"of",
"a",
"structured",
"array",
"or",
"dtype",
"in",
"memory",
"."
] | def repack_fields(a, align=False, recurse=False):
"""
Re-pack the fields of a structured array or dtype in memory.
The memory layout of structured datatypes allows fields at arbitrary
byte offsets. This means the fields can be separated by padding bytes,
their offsets can be non-monotonically increasing, and they can overlap.
This method removes any overlaps and reorders the fields in memory so they
have increasing byte offsets, and adds or removes padding bytes depending
on the `align` option, which behaves like the `align` option to `np.dtype`.
If `align=False`, this method produces a "packed" memory layout in which
each field starts at the byte the previous field ended, and any padding
bytes are removed.
If `align=True`, this methods produces an "aligned" memory layout in which
each field's offset is a multiple of its alignment, and the total itemsize
is a multiple of the largest alignment, by adding padding bytes as needed.
Parameters
----------
a : ndarray or dtype
array or dtype for which to repack the fields.
align : boolean
If true, use an "aligned" memory layout, otherwise use a "packed" layout.
recurse : boolean
If True, also repack nested structures.
Returns
-------
repacked : ndarray or dtype
Copy of `a` with fields repacked, or `a` itself if no repacking was
needed.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
>>> dt = np.dtype('u1, <i8, <f8', align=True)
>>> dt
dtype({'names':['f0','f1','f2'], 'formats':['u1','<i8','<f8'], 'offsets':[0,8,16], 'itemsize':24}, align=True)
>>> print_offsets(dt)
offsets: [0, 8, 16]
itemsize: 24
>>> packed_dt = rfn.repack_fields(dt)
>>> packed_dt
dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
offsets: [0, 1, 9]
itemsize: 17
"""
if not isinstance(a, np.dtype):
dt = repack_fields(a.dtype, align=align, recurse=recurse)
return a.astype(dt, copy=False)
if a.names is None:
return a
fieldinfo = []
for name in a.names:
tup = a.fields[name]
if recurse:
fmt = repack_fields(tup[0], align=align, recurse=True)
else:
fmt = tup[0]
if len(tup) == 3:
name = (tup[2], name)
fieldinfo.append((name, fmt))
dt = np.dtype(fieldinfo, align=align)
return np.dtype((a.type, dt)) | [
"def",
"repack_fields",
"(",
"a",
",",
"align",
"=",
"False",
",",
"recurse",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"a",
",",
"np",
".",
"dtype",
")",
":",
"dt",
"=",
"repack_fields",
"(",
"a",
".",
"dtype",
",",
"align",
"=",
"align",
",",
"recurse",
"=",
"recurse",
")",
"return",
"a",
".",
"astype",
"(",
"dt",
",",
"copy",
"=",
"False",
")",
"if",
"a",
".",
"names",
"is",
"None",
":",
"return",
"a",
"fieldinfo",
"=",
"[",
"]",
"for",
"name",
"in",
"a",
".",
"names",
":",
"tup",
"=",
"a",
".",
"fields",
"[",
"name",
"]",
"if",
"recurse",
":",
"fmt",
"=",
"repack_fields",
"(",
"tup",
"[",
"0",
"]",
",",
"align",
"=",
"align",
",",
"recurse",
"=",
"True",
")",
"else",
":",
"fmt",
"=",
"tup",
"[",
"0",
"]",
"if",
"len",
"(",
"tup",
")",
"==",
"3",
":",
"name",
"=",
"(",
"tup",
"[",
"2",
"]",
",",
"name",
")",
"fieldinfo",
".",
"append",
"(",
"(",
"name",
",",
"fmt",
")",
")",
"dt",
"=",
"np",
".",
"dtype",
"(",
"fieldinfo",
",",
"align",
"=",
"align",
")",
"return",
"np",
".",
"dtype",
"(",
"(",
"a",
".",
"type",
",",
"dt",
")",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/recfunctions.py#L793-L871 | |
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/core/fromnumeric.py | python | rank | (a) | Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in Numpy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0 | Return the number of dimensions of an array. | [
"Return",
"the",
"number",
"of",
"dimensions",
"of",
"an",
"array",
"."
] | def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in Numpy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim | [
"def",
"rank",
"(",
"a",
")",
":",
"try",
":",
"return",
"a",
".",
"ndim",
"except",
"AttributeError",
":",
"return",
"asarray",
"(",
"a",
")",
".",
"ndim"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/core/fromnumeric.py#L2449-L2492 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py | python | Menu.invoke | (self, index) | return self.tk.call(self._w, 'invoke', index) | Invoke a menu item identified by INDEX and execute
the associated command. | Invoke a menu item identified by INDEX and execute
the associated command. | [
"Invoke",
"a",
"menu",
"item",
"identified",
"by",
"INDEX",
"and",
"execute",
"the",
"associated",
"command",
"."
] | def invoke(self, index):
"""Invoke a menu item identified by INDEX and execute
the associated command."""
return self.tk.call(self._w, 'invoke', index) | [
"def",
"invoke",
"(",
"self",
",",
"index",
")",
":",
"return",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"_w",
",",
"'invoke'",
",",
"index",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py#L2940-L2943 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/protobuf/py2/google/protobuf/internal/containers.py | python | RepeatedCompositeFieldContainer.append | (self, value) | Appends one element by copying the message. | Appends one element by copying the message. | [
"Appends",
"one",
"element",
"by",
"copying",
"the",
"message",
"."
] | def append(self, value):
"""Appends one element by copying the message."""
new_element = self._message_descriptor._concrete_class()
new_element._SetListener(self._message_listener)
new_element.CopyFrom(value)
self._values.append(new_element)
if not self._message_listener.dirty:
self._message_listener.Modified() | [
"def",
"append",
"(",
"self",
",",
"value",
")",
":",
"new_element",
"=",
"self",
".",
"_message_descriptor",
".",
"_concrete_class",
"(",
")",
"new_element",
".",
"_SetListener",
"(",
"self",
".",
"_message_listener",
")",
"new_element",
".",
"CopyFrom",
"(",
"value",
")",
"self",
".",
"_values",
".",
"append",
"(",
"new_element",
")",
"if",
"not",
"self",
".",
"_message_listener",
".",
"dirty",
":",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/protobuf/py2/google/protobuf/internal/containers.py#L387-L394 | ||
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/fluid/compiler.py | python | IpuStrategy.enable_fp16 | (self) | return self._ipu_strategy.enable_fp16 | Get the boolean of float16 mode or not from IpuStrategy instance. | Get the boolean of float16 mode or not from IpuStrategy instance. | [
"Get",
"the",
"boolean",
"of",
"float16",
"mode",
"or",
"not",
"from",
"IpuStrategy",
"instance",
"."
] | def enable_fp16(self):
"""
Get the boolean of float16 mode or not from IpuStrategy instance.
"""
return self._ipu_strategy.enable_fp16 | [
"def",
"enable_fp16",
"(",
"self",
")",
":",
"return",
"self",
".",
"_ipu_strategy",
".",
"enable_fp16"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/compiler.py#L712-L716 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/tpu.py | python | replicate | (computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
maximum_shapes=None) | return split_compile_and_replicate(
computation,
inputs,
infeed_queue,
device_assignment,
name,
maximum_shapes=maximum_shapes)[1] | Builds a graph operator that runs a replicated TPU computation.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimension list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. The structure of `maximum_shapes` needs to be the same as
`inputs[0]`.
Returns:
A list of outputs, indexed by `[replica_num]` each output can be a nested
structure same as what computation() returns with a few exceptions.
Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`. | Builds a graph operator that runs a replicated TPU computation. | [
"Builds",
"a",
"graph",
"operator",
"that",
"runs",
"a",
"replicated",
"TPU",
"computation",
"."
] | def replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
maximum_shapes=None):
"""Builds a graph operator that runs a replicated TPU computation.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimension list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. The structure of `maximum_shapes` needs to be the same as
`inputs[0]`.
Returns:
A list of outputs, indexed by `[replica_num]` each output can be a nested
structure same as what computation() returns with a few exceptions.
Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
return split_compile_and_replicate(
computation,
inputs,
infeed_queue,
device_assignment,
name,
maximum_shapes=maximum_shapes)[1] | [
"def",
"replicate",
"(",
"computation",
",",
"inputs",
"=",
"None",
",",
"infeed_queue",
"=",
"None",
",",
"device_assignment",
"=",
"None",
",",
"name",
"=",
"None",
",",
"maximum_shapes",
"=",
"None",
")",
":",
"return",
"split_compile_and_replicate",
"(",
"computation",
",",
"inputs",
",",
"infeed_queue",
",",
"device_assignment",
",",
"name",
",",
"maximum_shapes",
"=",
"maximum_shapes",
")",
"[",
"1",
"]"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/tpu.py#L579-L639 | |
apiaryio/snowcrash | b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3 | tools/gyp/pylib/gyp/msvs_emulation.py | python | VerifyMissingSources | (sources, build_dir, generator_flags, gyp_to_ninja) | Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails. | Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails. | [
"Emulate",
"behavior",
"of",
"msvs_error_on_missing_sources",
"present",
"in",
"the",
"msvs",
"generator",
":",
"Check",
"that",
"all",
"regular",
"source",
"files",
"i",
".",
"e",
".",
"not",
"created",
"at",
"run",
"time",
"exist",
"on",
"disk",
".",
"Missing",
"files",
"cause",
"needless",
"recompilation",
"when",
"building",
"via",
"VS",
"and",
"we",
"want",
"this",
"check",
"to",
"match",
"for",
"people",
"/",
"bots",
"that",
"build",
"using",
"ninja",
"so",
"they",
"re",
"not",
"surprised",
"when",
"the",
"VS",
"build",
"fails",
"."
] | def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up)) | [
"def",
"VerifyMissingSources",
"(",
"sources",
",",
"build_dir",
",",
"generator_flags",
",",
"gyp_to_ninja",
")",
":",
"if",
"int",
"(",
"generator_flags",
".",
"get",
"(",
"'msvs_error_on_missing_sources'",
",",
"0",
")",
")",
":",
"no_specials",
"=",
"filter",
"(",
"lambda",
"x",
":",
"'$'",
"not",
"in",
"x",
",",
"sources",
")",
"relative",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"build_dir",
",",
"gyp_to_ninja",
"(",
"s",
")",
")",
"for",
"s",
"in",
"no_specials",
"]",
"missing",
"=",
"filter",
"(",
"lambda",
"x",
":",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"x",
")",
",",
"relative",
")",
"if",
"missing",
":",
"# They'll look like out\\Release\\..\\..\\stuff\\things.cc, so normalize the",
"# path for a slightly less crazy looking output.",
"cleaned_up",
"=",
"[",
"os",
".",
"path",
".",
"normpath",
"(",
"x",
")",
"for",
"x",
"in",
"missing",
"]",
"raise",
"Exception",
"(",
"'Missing input files:\\n%s'",
"%",
"'\\n'",
".",
"join",
"(",
"cleaned_up",
")",
")"
] | https://github.com/apiaryio/snowcrash/blob/b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3/tools/gyp/pylib/gyp/msvs_emulation.py#L1060-L1074 | ||
domino-team/openwrt-cc | 8b181297c34d14d3ca521cc9f31430d561dbc688 | package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/tools/gyp/pylib/gyp/generator/ninja.py | python | NinjaWriter.WriteLink | (self, spec, config_name, config, link_deps) | Write out a link step. Fills out target.binary. | Write out a link step. Fills out target.binary. | [
"Write",
"out",
"a",
"link",
"step",
".",
"Fills",
"out",
"target",
".",
"binary",
"."
] | def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output | [
"def",
"WriteLink",
"(",
"self",
",",
"spec",
",",
"config_name",
",",
"config",
",",
"link_deps",
")",
":",
"if",
"self",
".",
"flavor",
"!=",
"'mac'",
"or",
"len",
"(",
"self",
".",
"archs",
")",
"==",
"1",
":",
"return",
"self",
".",
"WriteLinkForArch",
"(",
"self",
".",
"ninja",
",",
"spec",
",",
"config_name",
",",
"config",
",",
"link_deps",
")",
"else",
":",
"output",
"=",
"self",
".",
"ComputeOutput",
"(",
"spec",
")",
"inputs",
"=",
"[",
"self",
".",
"WriteLinkForArch",
"(",
"self",
".",
"arch_subninjas",
"[",
"arch",
"]",
",",
"spec",
",",
"config_name",
",",
"config",
",",
"link_deps",
"[",
"arch",
"]",
",",
"arch",
"=",
"arch",
")",
"for",
"arch",
"in",
"self",
".",
"archs",
"]",
"extra_bindings",
"=",
"[",
"]",
"build_output",
"=",
"output",
"if",
"not",
"self",
".",
"is_mac_bundle",
":",
"self",
".",
"AppendPostbuildVariable",
"(",
"extra_bindings",
",",
"spec",
",",
"output",
",",
"output",
")",
"# TODO(yyanagisawa): more work needed to fix:",
"# https://code.google.com/p/gyp/issues/detail?id=411",
"if",
"(",
"spec",
"[",
"'type'",
"]",
"in",
"(",
"'shared_library'",
",",
"'loadable_module'",
")",
"and",
"not",
"self",
".",
"is_mac_bundle",
")",
":",
"extra_bindings",
".",
"append",
"(",
"(",
"'lib'",
",",
"output",
")",
")",
"self",
".",
"ninja",
".",
"build",
"(",
"[",
"output",
",",
"output",
"+",
"'.TOC'",
"]",
",",
"'solipo'",
",",
"inputs",
",",
"variables",
"=",
"extra_bindings",
")",
"else",
":",
"self",
".",
"ninja",
".",
"build",
"(",
"build_output",
",",
"'lipo'",
",",
"inputs",
",",
"variables",
"=",
"extra_bindings",
")",
"return",
"output"
] | https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/tools/gyp/pylib/gyp/generator/ninja.py#L1057-L1082 | ||
microsoft/clang | 86d4513d3e0daa4d5a29b0b1de7c854ca15f9fe5 | utils/check_cfc/check_cfc.py | python | main_is_frozen | () | return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") or # old py2exe
imp.is_frozen("__main__")) | Returns True when running as a py2exe executable. | Returns True when running as a py2exe executable. | [
"Returns",
"True",
"when",
"running",
"as",
"a",
"py2exe",
"executable",
"."
] | def main_is_frozen():
"""Returns True when running as a py2exe executable."""
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") or # old py2exe
imp.is_frozen("__main__")) | [
"def",
"main_is_frozen",
"(",
")",
":",
"return",
"(",
"hasattr",
"(",
"sys",
",",
"\"frozen\"",
")",
"or",
"# new py2exe",
"hasattr",
"(",
"sys",
",",
"\"importers\"",
")",
"or",
"# old py2exe",
"imp",
".",
"is_frozen",
"(",
"\"__main__\"",
")",
")"
] | https://github.com/microsoft/clang/blob/86d4513d3e0daa4d5a29b0b1de7c854ca15f9fe5/utils/check_cfc/check_cfc.py#L81-L85 | |
apache/trafodion | 8455c839ad6b6d7b6e04edda5715053095b78046 | core/sqf/src/seatrans/hbase-trx/src/main/python/thrift1/gen-py/hbase/Hbase.py | python | Client.deleteTable | (self, tableName) | Deletes a table
@throws IOError if table doesn't exist on server or there was some other
problem
Parameters:
- tableName: name of table to delete | Deletes a table | [
"Deletes",
"a",
"table"
] | def deleteTable(self, tableName):
"""
Deletes a table
@throws IOError if table doesn't exist on server or there was some other
problem
Parameters:
- tableName: name of table to delete
"""
self.send_deleteTable(tableName)
self.recv_deleteTable() | [
"def",
"deleteTable",
"(",
"self",
",",
"tableName",
")",
":",
"self",
".",
"send_deleteTable",
"(",
"tableName",
")",
"self",
".",
"recv_deleteTable",
"(",
")"
] | https://github.com/apache/trafodion/blob/8455c839ad6b6d7b6e04edda5715053095b78046/core/sqf/src/seatrans/hbase-trx/src/main/python/thrift1/gen-py/hbase/Hbase.py#L930-L941 | ||
tensor-compiler/taco | d0654a84137169883973c40a951dfdb89883fd9c | python_bindings/pytaco/pytensor/taco_tensor.py | python | tensor_sub | (t1, t2, out_format, dtype=None) | return _compute_bin_elt_wise_op(operator.sub, t1, t2, out_format, dtype) | Computes the element wise subtraction of two tensors.
* If the two tensors are equal order, performs the operation element-wise
* If the two tensors have order N and M and N > M, requires the last M dimensions of the tensor with
order N be equal to the dimensions of the tensor with order M in order to broadcast.
The ``__sub__`` method in the tensor class is implemented using this method.
Parameters
-----------
t1, t2: tensors, array_like
tensors or array_like input operands.
out_format: format, mode_format
* If a :class:`format` is specified, the result tensor is stored in the format out_format.
* If a :class:`mode_format` is specified, the result the result tensor has a with all of the dimensions
stored in the :class:`mode_format` passed in.
dtype: Datatype, optional
The datatype of the output tensor.
Notes
--------
The inner dimensions of the input tensor is broadcasted along the dimensions of whichever tensor has a higher
order.
Returns
---------
difference: tensor
The element wise difference of the input tensors broadcasted as required. | Computes the element wise subtraction of two tensors. | [
"Computes",
"the",
"element",
"wise",
"subtraction",
"of",
"two",
"tensors",
"."
] | def tensor_sub(t1, t2, out_format, dtype=None):
"""
Computes the element wise subtraction of two tensors.
* If the two tensors are equal order, performs the operation element-wise
* If the two tensors have order N and M and N > M, requires the last M dimensions of the tensor with
order N be equal to the dimensions of the tensor with order M in order to broadcast.
The ``__sub__`` method in the tensor class is implemented using this method.
Parameters
-----------
t1, t2: tensors, array_like
tensors or array_like input operands.
out_format: format, mode_format
* If a :class:`format` is specified, the result tensor is stored in the format out_format.
* If a :class:`mode_format` is specified, the result the result tensor has a with all of the dimensions
stored in the :class:`mode_format` passed in.
dtype: Datatype, optional
The datatype of the output tensor.
Notes
--------
The inner dimensions of the input tensor is broadcasted along the dimensions of whichever tensor has a higher
order.
Returns
---------
difference: tensor
The element wise difference of the input tensors broadcasted as required.
"""
return _compute_bin_elt_wise_op(operator.sub, t1, t2, out_format, dtype) | [
"def",
"tensor_sub",
"(",
"t1",
",",
"t2",
",",
"out_format",
",",
"dtype",
"=",
"None",
")",
":",
"return",
"_compute_bin_elt_wise_op",
"(",
"operator",
".",
"sub",
",",
"t1",
",",
"t2",
",",
"out_format",
",",
"dtype",
")"
] | https://github.com/tensor-compiler/taco/blob/d0654a84137169883973c40a951dfdb89883fd9c/python_bindings/pytaco/pytensor/taco_tensor.py#L948-L984 | |
mhammond/pywin32 | 44afd86ba8485194df93234639243252deeb40d5 | adodbapi/remote.py | python | Connection._i_am_closing | (self, crsr) | message from a cursor giving connection a chance to clean up | message from a cursor giving connection a chance to clean up | [
"message",
"from",
"a",
"cursor",
"giving",
"connection",
"a",
"chance",
"to",
"clean",
"up"
] | def _i_am_closing(self, crsr):
"message from a cursor giving connection a chance to clean up"
try:
del self.cursors[crsr.id]
except:
pass | [
"def",
"_i_am_closing",
"(",
"self",
",",
"crsr",
")",
":",
"try",
":",
"del",
"self",
".",
"cursors",
"[",
"crsr",
".",
"id",
"]",
"except",
":",
"pass"
] | https://github.com/mhammond/pywin32/blob/44afd86ba8485194df93234639243252deeb40d5/adodbapi/remote.py#L343-L348 | ||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/third_party/boto/boto/s3/bucket.py | python | Bucket.get_location | (self) | Returns the LocationConstraint for the bucket.
:rtype: str
:return: The LocationConstraint for the bucket or the empty
string if no constraint was specified when bucket was created. | Returns the LocationConstraint for the bucket. | [
"Returns",
"the",
"LocationConstraint",
"for",
"the",
"bucket",
"."
] | def get_location(self):
"""
Returns the LocationConstraint for the bucket.
:rtype: str
:return: The LocationConstraint for the bucket or the empty
string if no constraint was specified when bucket was created.
"""
response = self.connection.make_request('GET', self.name,
query_args='location')
body = response.read()
if response.status == 200:
rs = ResultSet(self)
h = handler.XmlHandler(rs, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return rs.LocationConstraint
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body) | [
"def",
"get_location",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"connection",
".",
"make_request",
"(",
"'GET'",
",",
"self",
".",
"name",
",",
"query_args",
"=",
"'location'",
")",
"body",
"=",
"response",
".",
"read",
"(",
")",
"if",
"response",
".",
"status",
"==",
"200",
":",
"rs",
"=",
"ResultSet",
"(",
"self",
")",
"h",
"=",
"handler",
".",
"XmlHandler",
"(",
"rs",
",",
"self",
")",
"if",
"not",
"isinstance",
"(",
"body",
",",
"bytes",
")",
":",
"body",
"=",
"body",
".",
"encode",
"(",
"'utf-8'",
")",
"xml",
".",
"sax",
".",
"parseString",
"(",
"body",
",",
"h",
")",
"return",
"rs",
".",
"LocationConstraint",
"else",
":",
"raise",
"self",
".",
"connection",
".",
"provider",
".",
"storage_response_error",
"(",
"response",
".",
"status",
",",
"response",
".",
"reason",
",",
"body",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/s3/bucket.py#L1124-L1144 | ||
gimli-org/gimli | 17aa2160de9b15ababd9ef99e89b1bc3277bbb23 | versioneer.py | python | register_vcs_handler | (vcs, method) | return decorate | Decorator to mark a method as the handler for a particular VCS. | Decorator to mark a method as the handler for a particular VCS. | [
"Decorator",
"to",
"mark",
"a",
"method",
"as",
"the",
"handler",
"for",
"a",
"particular",
"VCS",
"."
] | def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate | [
"def",
"register_vcs_handler",
"(",
"vcs",
",",
"method",
")",
":",
"# decorator",
"def",
"decorate",
"(",
"f",
")",
":",
"\"\"\"Store f in HANDLERS[vcs][method].\"\"\"",
"if",
"vcs",
"not",
"in",
"HANDLERS",
":",
"HANDLERS",
"[",
"vcs",
"]",
"=",
"{",
"}",
"HANDLERS",
"[",
"vcs",
"]",
"[",
"method",
"]",
"=",
"f",
"return",
"f",
"return",
"decorate"
] | https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/versioneer.py#L373-L381 | |
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/xml/dom/expatbuilder.py | python | Namespaces.start_namespace_decl_handler | (self, prefix, uri) | Push this namespace declaration on our storage. | Push this namespace declaration on our storage. | [
"Push",
"this",
"namespace",
"declaration",
"on",
"our",
"storage",
"."
] | def start_namespace_decl_handler(self, prefix, uri):
"""Push this namespace declaration on our storage."""
self._ns_ordered_prefixes.append((prefix, uri)) | [
"def",
"start_namespace_decl_handler",
"(",
"self",
",",
"prefix",
",",
"uri",
")",
":",
"self",
".",
"_ns_ordered_prefixes",
".",
"append",
"(",
"(",
"prefix",
",",
"uri",
")",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/xml/dom/expatbuilder.py#L739-L741 | ||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/examples/learn/random_forest_mnist.py | python | train_and_eval | () | Train and evaluate the model. | Train and evaluate the model. | [
"Train",
"and",
"evaluate",
"the",
"model",
"."
] | def train_and_eval():
"""Train and evaluate the model."""
model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir
print('model directory = %s' % model_dir)
est = build_estimator(model_dir)
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)
train_input_fn = numpy_io.numpy_input_fn(
x={'images': mnist.train.images},
y=mnist.train.labels.astype(numpy.int32),
batch_size=FLAGS.batch_size,
num_epochs=None,
shuffle=True)
est.fit(input_fn=train_input_fn, steps=None)
metric_name = 'accuracy'
metric = {
metric_name:
metric_spec.MetricSpec(
eval_metrics.get_metric(metric_name),
prediction_key=eval_metrics.get_prediction_key(metric_name))
}
test_input_fn = numpy_io.numpy_input_fn(
x={'images': mnist.test.images},
y=mnist.test.labels.astype(numpy.int32),
num_epochs=1,
batch_size=FLAGS.batch_size,
shuffle=False)
results = est.evaluate(input_fn=test_input_fn, metrics=metric)
for key in sorted(results):
print('%s: %s' % (key, results[key])) | [
"def",
"train_and_eval",
"(",
")",
":",
"model_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"if",
"not",
"FLAGS",
".",
"model_dir",
"else",
"FLAGS",
".",
"model_dir",
"print",
"(",
"'model directory = %s'",
"%",
"model_dir",
")",
"est",
"=",
"build_estimator",
"(",
"model_dir",
")",
"mnist",
"=",
"input_data",
".",
"read_data_sets",
"(",
"FLAGS",
".",
"data_dir",
",",
"one_hot",
"=",
"False",
")",
"train_input_fn",
"=",
"numpy_io",
".",
"numpy_input_fn",
"(",
"x",
"=",
"{",
"'images'",
":",
"mnist",
".",
"train",
".",
"images",
"}",
",",
"y",
"=",
"mnist",
".",
"train",
".",
"labels",
".",
"astype",
"(",
"numpy",
".",
"int32",
")",
",",
"batch_size",
"=",
"FLAGS",
".",
"batch_size",
",",
"num_epochs",
"=",
"None",
",",
"shuffle",
"=",
"True",
")",
"est",
".",
"fit",
"(",
"input_fn",
"=",
"train_input_fn",
",",
"steps",
"=",
"None",
")",
"metric_name",
"=",
"'accuracy'",
"metric",
"=",
"{",
"metric_name",
":",
"metric_spec",
".",
"MetricSpec",
"(",
"eval_metrics",
".",
"get_metric",
"(",
"metric_name",
")",
",",
"prediction_key",
"=",
"eval_metrics",
".",
"get_prediction_key",
"(",
"metric_name",
")",
")",
"}",
"test_input_fn",
"=",
"numpy_io",
".",
"numpy_input_fn",
"(",
"x",
"=",
"{",
"'images'",
":",
"mnist",
".",
"test",
".",
"images",
"}",
",",
"y",
"=",
"mnist",
".",
"test",
".",
"labels",
".",
"astype",
"(",
"numpy",
".",
"int32",
")",
",",
"num_epochs",
"=",
"1",
",",
"batch_size",
"=",
"FLAGS",
".",
"batch_size",
",",
"shuffle",
"=",
"False",
")",
"results",
"=",
"est",
".",
"evaluate",
"(",
"input_fn",
"=",
"test_input_fn",
",",
"metrics",
"=",
"metric",
")",
"for",
"key",
"in",
"sorted",
"(",
"results",
")",
":",
"print",
"(",
"'%s: %s'",
"%",
"(",
"key",
",",
"results",
"[",
"key",
"]",
")",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/examples/learn/random_forest_mnist.py#L51-L85 | ||
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/contrib/factorization/python/ops/clustering_ops.py | python | KMeans.training_graph | (self) | return all_scores, cluster_idx, scores, training_op | Generate a training graph for kmeans algorithm.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
training_op: an op that runs an iteration of training. | Generate a training graph for kmeans algorithm. | [
"Generate",
"a",
"training",
"graph",
"for",
"kmeans",
"algorithm",
"."
] | def training_graph(self):
"""Generate a training graph for kmeans algorithm.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
inputs = self._inputs
cluster_centers_var, total_counts = self._init_clusters()
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = tf.nn.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
training_op = self._mini_batch_training_op(
inputs, cluster_idx, cluster_centers, cluster_centers_var,
total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(inputs, cluster_idx,
cluster_centers_var)
return all_scores, cluster_idx, scores, training_op | [
"def",
"training_graph",
"(",
"self",
")",
":",
"# Implementation of kmeans.",
"inputs",
"=",
"self",
".",
"_inputs",
"cluster_centers_var",
",",
"total_counts",
"=",
"self",
".",
"_init_clusters",
"(",
")",
"cluster_centers",
"=",
"cluster_centers_var",
"if",
"self",
".",
"_distance_metric",
"==",
"COSINE_DISTANCE",
":",
"inputs",
"=",
"self",
".",
"_l2_normalize_data",
"(",
"inputs",
")",
"if",
"not",
"self",
".",
"_clusters_l2_normalized",
"(",
")",
":",
"cluster_centers",
"=",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"cluster_centers",
",",
"dim",
"=",
"1",
")",
"all_scores",
",",
"scores",
",",
"cluster_idx",
"=",
"self",
".",
"_infer_graph",
"(",
"inputs",
",",
"cluster_centers",
")",
"if",
"self",
".",
"_use_mini_batch",
":",
"training_op",
"=",
"self",
".",
"_mini_batch_training_op",
"(",
"inputs",
",",
"cluster_idx",
",",
"cluster_centers",
",",
"cluster_centers_var",
",",
"total_counts",
")",
"else",
":",
"assert",
"cluster_centers",
"==",
"cluster_centers_var",
"training_op",
"=",
"self",
".",
"_full_batch_training_op",
"(",
"inputs",
",",
"cluster_idx",
",",
"cluster_centers_var",
")",
"return",
"all_scores",
",",
"cluster_idx",
",",
"scores",
",",
"training_op"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/factorization/python/ops/clustering_ops.py#L271-L305 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_misc.py | python | DateTime.ParseISOCombined | (*args, **kwargs) | return _misc_.DateTime_ParseISOCombined(*args, **kwargs) | ParseISOCombined(self, String datetime, char sep='T') -> bool | ParseISOCombined(self, String datetime, char sep='T') -> bool | [
"ParseISOCombined",
"(",
"self",
"String",
"datetime",
"char",
"sep",
"=",
"T",
")",
"-",
">",
"bool"
] | def ParseISOCombined(*args, **kwargs):
"""ParseISOCombined(self, String datetime, char sep='T') -> bool"""
return _misc_.DateTime_ParseISOCombined(*args, **kwargs) | [
"def",
"ParseISOCombined",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"DateTime_ParseISOCombined",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L4146-L4148 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py2/pandas/core/dtypes/common.py | python | infer_dtype_from_object | (dtype) | return infer_dtype_from_object(np.dtype(dtype)) | Get a numpy dtype.type-style object for a dtype object.
This methods also includes handling of the datetime64[ns] and
datetime64[ns, TZ] objects.
If no dtype can be found, we return ``object``.
Parameters
----------
dtype : dtype, type
The dtype object whose numpy dtype.type-style
object we want to extract.
Returns
-------
dtype_object : The extracted numpy dtype.type-style object. | Get a numpy dtype.type-style object for a dtype object. | [
"Get",
"a",
"numpy",
"dtype",
".",
"type",
"-",
"style",
"object",
"for",
"a",
"dtype",
"object",
"."
] | def infer_dtype_from_object(dtype):
"""
Get a numpy dtype.type-style object for a dtype object.
This methods also includes handling of the datetime64[ns] and
datetime64[ns, TZ] objects.
If no dtype can be found, we return ``object``.
Parameters
----------
dtype : dtype, type
The dtype object whose numpy dtype.type-style
object we want to extract.
Returns
-------
dtype_object : The extracted numpy dtype.type-style object.
"""
if isinstance(dtype, type) and issubclass(dtype, np.generic):
# Type object from a dtype
return dtype
elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)):
# dtype object
try:
_validate_date_like_dtype(dtype)
except TypeError:
# Should still pass if we don't have a date-like
pass
return dtype.type
try:
dtype = pandas_dtype(dtype)
except TypeError:
pass
if is_extension_array_dtype(dtype):
return dtype.type
elif isinstance(dtype, string_types):
# TODO(jreback)
# should deprecate these
if dtype in ['datetimetz', 'datetime64tz']:
return DatetimeTZDtype.type
elif dtype in ['period']:
raise NotImplementedError
if dtype == 'datetime' or dtype == 'timedelta':
dtype += '64'
try:
return infer_dtype_from_object(getattr(np, dtype))
except (AttributeError, TypeError):
# Handles cases like _get_dtype(int) i.e.,
# Python objects that are valid dtypes
# (unlike user-defined types, in general)
#
# TypeError handles the float16 type code of 'e'
# further handle internal types
pass
return infer_dtype_from_object(np.dtype(dtype)) | [
"def",
"infer_dtype_from_object",
"(",
"dtype",
")",
":",
"if",
"isinstance",
"(",
"dtype",
",",
"type",
")",
"and",
"issubclass",
"(",
"dtype",
",",
"np",
".",
"generic",
")",
":",
"# Type object from a dtype",
"return",
"dtype",
"elif",
"isinstance",
"(",
"dtype",
",",
"(",
"np",
".",
"dtype",
",",
"PandasExtensionDtype",
",",
"ExtensionDtype",
")",
")",
":",
"# dtype object",
"try",
":",
"_validate_date_like_dtype",
"(",
"dtype",
")",
"except",
"TypeError",
":",
"# Should still pass if we don't have a date-like",
"pass",
"return",
"dtype",
".",
"type",
"try",
":",
"dtype",
"=",
"pandas_dtype",
"(",
"dtype",
")",
"except",
"TypeError",
":",
"pass",
"if",
"is_extension_array_dtype",
"(",
"dtype",
")",
":",
"return",
"dtype",
".",
"type",
"elif",
"isinstance",
"(",
"dtype",
",",
"string_types",
")",
":",
"# TODO(jreback)",
"# should deprecate these",
"if",
"dtype",
"in",
"[",
"'datetimetz'",
",",
"'datetime64tz'",
"]",
":",
"return",
"DatetimeTZDtype",
".",
"type",
"elif",
"dtype",
"in",
"[",
"'period'",
"]",
":",
"raise",
"NotImplementedError",
"if",
"dtype",
"==",
"'datetime'",
"or",
"dtype",
"==",
"'timedelta'",
":",
"dtype",
"+=",
"'64'",
"try",
":",
"return",
"infer_dtype_from_object",
"(",
"getattr",
"(",
"np",
",",
"dtype",
")",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
")",
":",
"# Handles cases like _get_dtype(int) i.e.,",
"# Python objects that are valid dtypes",
"# (unlike user-defined types, in general)",
"#",
"# TypeError handles the float16 type code of 'e'",
"# further handle internal types",
"pass",
"return",
"infer_dtype_from_object",
"(",
"np",
".",
"dtype",
"(",
"dtype",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/dtypes/common.py#L1892-L1953 | |
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/contrib/metrics/python/ops/metric_ops.py | python | streaming_mean | (values, weights=None, metrics_collections=None,
updates_collections=None, name=None) | Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A tensor representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple. | Computes the (weighted) mean of the given values. | [
"Computes",
"the",
"(",
"weighted",
")",
"mean",
"of",
"the",
"given",
"values",
"."
] | def streaming_mean(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A tensor representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', [values, weights]):
values = math_ops.to_float(values)
total = _create_local('total', shape=[])
count = _create_local('count', shape=[])
if weights is not None:
weights = math_ops.to_float(weights)
values = math_ops.mul(values, weights)
num_values = math_ops.reduce_sum(_broadcast_weights(weights, values))
else:
num_values = math_ops.to_float(array_ops.size(values))
total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
count_compute_op = state_ops.assign_add(count, num_values)
mean = _safe_div(total, count, 'value')
with ops.control_dependencies([total_compute_op, count_compute_op]):
update_op = _safe_div(total, count, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean, update_op | [
"def",
"streaming_mean",
"(",
"values",
",",
"weights",
"=",
"None",
",",
"metrics_collections",
"=",
"None",
",",
"updates_collections",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"variable_scope",
".",
"variable_scope",
"(",
"name",
",",
"'mean'",
",",
"[",
"values",
",",
"weights",
"]",
")",
":",
"values",
"=",
"math_ops",
".",
"to_float",
"(",
"values",
")",
"total",
"=",
"_create_local",
"(",
"'total'",
",",
"shape",
"=",
"[",
"]",
")",
"count",
"=",
"_create_local",
"(",
"'count'",
",",
"shape",
"=",
"[",
"]",
")",
"if",
"weights",
"is",
"not",
"None",
":",
"weights",
"=",
"math_ops",
".",
"to_float",
"(",
"weights",
")",
"values",
"=",
"math_ops",
".",
"mul",
"(",
"values",
",",
"weights",
")",
"num_values",
"=",
"math_ops",
".",
"reduce_sum",
"(",
"_broadcast_weights",
"(",
"weights",
",",
"values",
")",
")",
"else",
":",
"num_values",
"=",
"math_ops",
".",
"to_float",
"(",
"array_ops",
".",
"size",
"(",
"values",
")",
")",
"total_compute_op",
"=",
"state_ops",
".",
"assign_add",
"(",
"total",
",",
"math_ops",
".",
"reduce_sum",
"(",
"values",
")",
")",
"count_compute_op",
"=",
"state_ops",
".",
"assign_add",
"(",
"count",
",",
"num_values",
")",
"mean",
"=",
"_safe_div",
"(",
"total",
",",
"count",
",",
"'value'",
")",
"with",
"ops",
".",
"control_dependencies",
"(",
"[",
"total_compute_op",
",",
"count_compute_op",
"]",
")",
":",
"update_op",
"=",
"_safe_div",
"(",
"total",
",",
"count",
",",
"'update_op'",
")",
"if",
"metrics_collections",
":",
"ops",
".",
"add_to_collections",
"(",
"metrics_collections",
",",
"mean",
")",
"if",
"updates_collections",
":",
"ops",
".",
"add_to_collections",
"(",
"updates_collections",
",",
"update_op",
")",
"return",
"mean",
",",
"update_op"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/metrics/python/ops/metric_ops.py#L325-L387 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/hypertreelist.py | python | TreeListMainWindow.GetBestColumnWidth | (self, column, parent=None) | return max(10, width) | Returns the best column's width based on the items width in this column.
:param `column`: an integer specifying the column index;
:param `parent`: an instance of :class:`TreeListItem`. | Returns the best column's width based on the items width in this column. | [
"Returns",
"the",
"best",
"column",
"s",
"width",
"based",
"on",
"the",
"items",
"width",
"in",
"this",
"column",
"."
] | def GetBestColumnWidth(self, column, parent=None):
"""
Returns the best column's width based on the items width in this column.
:param `column`: an integer specifying the column index;
:param `parent`: an instance of :class:`TreeListItem`.
"""
maxWidth, h = self.GetClientSize()
width = 0
if maxWidth < 5:
# Not shown on screen
maxWidth = 1000
# get root if on item
if not parent:
parent = self.GetRootItem()
# add root width
if not self.HasAGWFlag(wx.TR_HIDE_ROOT):
w = self.GetItemWidth(parent, column)
if width < w:
width = w
if width > maxWidth:
return maxWidth
item, cookie = self.GetFirstChild(parent)
while item:
w = self.GetItemWidth(item, column)
if width < w:
width = w
if width > maxWidth:
return maxWidth
# check the children of this item
if item.IsExpanded():
w = self.GetBestColumnWidth(column, item)
if width < w:
width = w
if width > maxWidth:
return maxWidth
# next sibling
item, cookie = self.GetNextChild(parent, cookie)
return max(10, width) | [
"def",
"GetBestColumnWidth",
"(",
"self",
",",
"column",
",",
"parent",
"=",
"None",
")",
":",
"maxWidth",
",",
"h",
"=",
"self",
".",
"GetClientSize",
"(",
")",
"width",
"=",
"0",
"if",
"maxWidth",
"<",
"5",
":",
"# Not shown on screen",
"maxWidth",
"=",
"1000",
"# get root if on item",
"if",
"not",
"parent",
":",
"parent",
"=",
"self",
".",
"GetRootItem",
"(",
")",
"# add root width",
"if",
"not",
"self",
".",
"HasAGWFlag",
"(",
"wx",
".",
"TR_HIDE_ROOT",
")",
":",
"w",
"=",
"self",
".",
"GetItemWidth",
"(",
"parent",
",",
"column",
")",
"if",
"width",
"<",
"w",
":",
"width",
"=",
"w",
"if",
"width",
">",
"maxWidth",
":",
"return",
"maxWidth",
"item",
",",
"cookie",
"=",
"self",
".",
"GetFirstChild",
"(",
"parent",
")",
"while",
"item",
":",
"w",
"=",
"self",
".",
"GetItemWidth",
"(",
"item",
",",
"column",
")",
"if",
"width",
"<",
"w",
":",
"width",
"=",
"w",
"if",
"width",
">",
"maxWidth",
":",
"return",
"maxWidth",
"# check the children of this item",
"if",
"item",
".",
"IsExpanded",
"(",
")",
":",
"w",
"=",
"self",
".",
"GetBestColumnWidth",
"(",
"column",
",",
"item",
")",
"if",
"width",
"<",
"w",
":",
"width",
"=",
"w",
"if",
"width",
">",
"maxWidth",
":",
"return",
"maxWidth",
"# next sibling",
"item",
",",
"cookie",
"=",
"self",
".",
"GetNextChild",
"(",
"parent",
",",
"cookie",
")",
"return",
"max",
"(",
"10",
",",
"width",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/hypertreelist.py#L4004-L4050 | |
nodejs/nan | 8db8c8f544f2b6ce1b0859ef6ecdd0a3873a9e62 | cpplint.py | python | CheckSpacing | (filename, clean_lines, linenum, nesting_state, error) | Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found. | Checks for the correctness of various spacing issues in the code. | [
"Checks",
"for",
"the",
"correctness",
"of",
"various",
"spacing",
"issues",
"in",
"the",
"code",
"."
] | def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
if Search(r'\w\s+\[', line) and not Search(r'(?:auto&?|delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop') | [
"def",
"CheckSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
":",
"# Don't use \"elided\" lines here, otherwise we can't check commented lines.",
"# Don't want to use \"raw\" either, because we don't want to check inside C++11",
"# raw strings,",
"raw",
"=",
"clean_lines",
".",
"lines_without_raw_strings",
"line",
"=",
"raw",
"[",
"linenum",
"]",
"# Before nixing comments, check if the line is blank for no good",
"# reason. This includes the first line after a block is opened, and",
"# blank lines at the end of a function (ie, right before a line like '}'",
"#",
"# Skip all the blank line checks if we are immediately inside a",
"# namespace body. In other words, don't issue blank line warnings",
"# for this block:",
"# namespace {",
"#",
"# }",
"#",
"# A warning about missing end of namespace comments will be issued instead.",
"#",
"# Also skip blank line checks for 'extern \"C\"' blocks, which are formatted",
"# like namespaces.",
"if",
"(",
"IsBlankLine",
"(",
"line",
")",
"and",
"not",
"nesting_state",
".",
"InNamespaceBody",
"(",
")",
"and",
"not",
"nesting_state",
".",
"InExternC",
"(",
")",
")",
":",
"elided",
"=",
"clean_lines",
".",
"elided",
"prev_line",
"=",
"elided",
"[",
"linenum",
"-",
"1",
"]",
"prevbrace",
"=",
"prev_line",
".",
"rfind",
"(",
"'{'",
")",
"# TODO(unknown): Don't complain if line before blank line, and line after,",
"# both start with alnums and are indented the same amount.",
"# This ignores whitespace at the start of a namespace block",
"# because those are not usually indented.",
"if",
"prevbrace",
"!=",
"-",
"1",
"and",
"prev_line",
"[",
"prevbrace",
":",
"]",
".",
"find",
"(",
"'}'",
")",
"==",
"-",
"1",
":",
"# OK, we have a blank line at the start of a code block. Before we",
"# complain, we check if it is an exception to the rule: The previous",
"# non-empty line has the parameters of a function header that are indented",
"# 4 spaces (because they did not fit in a 80 column line when placed on",
"# the same line as the function name). We also check for the case where",
"# the previous line is indented 6 spaces, which may happen when the",
"# initializers of a constructor do not fit into a 80 column line.",
"exception",
"=",
"False",
"if",
"Match",
"(",
"r' {6}\\w'",
",",
"prev_line",
")",
":",
"# Initializer list?",
"# We are looking for the opening column of initializer list, which",
"# should be indented 4 spaces to cause 6 space indentation afterwards.",
"search_position",
"=",
"linenum",
"-",
"2",
"while",
"(",
"search_position",
">=",
"0",
"and",
"Match",
"(",
"r' {6}\\w'",
",",
"elided",
"[",
"search_position",
"]",
")",
")",
":",
"search_position",
"-=",
"1",
"exception",
"=",
"(",
"search_position",
">=",
"0",
"and",
"elided",
"[",
"search_position",
"]",
"[",
":",
"5",
"]",
"==",
"' :'",
")",
"else",
":",
"# Search for the function arguments or an initializer list. We use a",
"# simple heuristic here: If the line is indented 4 spaces; and we have a",
"# closing paren, without the opening paren, followed by an opening brace",
"# or colon (for initializer lists) we assume that it is the last line of",
"# a function header. If we have a colon indented 4 spaces, it is an",
"# initializer list.",
"exception",
"=",
"(",
"Match",
"(",
"r' {4}\\w[^\\(]*\\)\\s*(const\\s*)?(\\{\\s*$|:)'",
",",
"prev_line",
")",
"or",
"Match",
"(",
"r' {4}:'",
",",
"prev_line",
")",
")",
"if",
"not",
"exception",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/blank_line'",
",",
"2",
",",
"'Redundant blank line at the start of a code block '",
"'should be deleted.'",
")",
"# Ignore blank lines at the end of a block in a long if-else",
"# chain, like this:",
"# if (condition1) {",
"# // Something followed by a blank line",
"#",
"# } else if (condition2) {",
"# // Something else",
"# }",
"if",
"linenum",
"+",
"1",
"<",
"clean_lines",
".",
"NumLines",
"(",
")",
":",
"next_line",
"=",
"raw",
"[",
"linenum",
"+",
"1",
"]",
"if",
"(",
"next_line",
"and",
"Match",
"(",
"r'\\s*}'",
",",
"next_line",
")",
"and",
"next_line",
".",
"find",
"(",
"'} else '",
")",
"==",
"-",
"1",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/blank_line'",
",",
"3",
",",
"'Redundant blank line at the end of a code block '",
"'should be deleted.'",
")",
"matched",
"=",
"Match",
"(",
"r'\\s*(public|protected|private):'",
",",
"prev_line",
")",
"if",
"matched",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/blank_line'",
",",
"3",
",",
"'Do not leave a blank line after \"%s:\"'",
"%",
"matched",
".",
"group",
"(",
"1",
")",
")",
"# Next, check comments",
"next_line_start",
"=",
"0",
"if",
"linenum",
"+",
"1",
"<",
"clean_lines",
".",
"NumLines",
"(",
")",
":",
"next_line",
"=",
"raw",
"[",
"linenum",
"+",
"1",
"]",
"next_line_start",
"=",
"len",
"(",
"next_line",
")",
"-",
"len",
"(",
"next_line",
".",
"lstrip",
"(",
")",
")",
"CheckComment",
"(",
"line",
",",
"filename",
",",
"linenum",
",",
"next_line_start",
",",
"error",
")",
"# get rid of comments and strings",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# You shouldn't have spaces before your brackets, except maybe after",
"# 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.",
"if",
"Search",
"(",
"r'\\w\\s+\\['",
",",
"line",
")",
"and",
"not",
"Search",
"(",
"r'(?:auto&?|delete|return)\\s+\\['",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/braces'",
",",
"5",
",",
"'Extra space before ['",
")",
"# In range-based for, we wanted spaces before and after the colon, but",
"# not around \"::\" tokens that might appear.",
"if",
"(",
"Search",
"(",
"r'for *\\(.*[^:]:[^: ]'",
",",
"line",
")",
"or",
"Search",
"(",
"r'for *\\(.*[^: ]:[^:]'",
",",
"line",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/forcolon'",
",",
"2",
",",
"'Missing space around colon in range-based for loop'",
")"
] | https://github.com/nodejs/nan/blob/8db8c8f544f2b6ce1b0859ef6ecdd0a3873a9e62/cpplint.py#L3408-L3533 | ||
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/calendar.py | python | TextCalendar.formatmonthname | (self, theyear, themonth, width, withyear=True) | return s.center(width) | Return a formatted month name. | Return a formatted month name. | [
"Return",
"a",
"formatted",
"month",
"name",
"."
] | def formatmonthname(self, theyear, themonth, width, withyear=True):
"""
Return a formatted month name.
"""
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width) | [
"def",
"formatmonthname",
"(",
"self",
",",
"theyear",
",",
"themonth",
",",
"width",
",",
"withyear",
"=",
"True",
")",
":",
"s",
"=",
"month_name",
"[",
"themonth",
"]",
"if",
"withyear",
":",
"s",
"=",
"\"%s %r\"",
"%",
"(",
"s",
",",
"theyear",
")",
"return",
"s",
".",
"center",
"(",
"width",
")"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/calendar.py#L299-L306 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_gdi.py | python | GraphicsGradientStop.SetColour | (*args, **kwargs) | return _gdi_.GraphicsGradientStop_SetColour(*args, **kwargs) | SetColour(self, Colour col) | SetColour(self, Colour col) | [
"SetColour",
"(",
"self",
"Colour",
"col",
")"
] | def SetColour(*args, **kwargs):
"""SetColour(self, Colour col)"""
return _gdi_.GraphicsGradientStop_SetColour(*args, **kwargs) | [
"def",
"SetColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"GraphicsGradientStop_SetColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_gdi.py#L5894-L5896 | |
eclipse/sumo | 7132a9b8b6eea734bdec38479026b4d8c4336d03 | tools/contributed/sumopy/agilepy/lib_wx/wxmisc.py | python | KeyHandler.init_keys | (self, parent=None) | Sets events and variables for parent.
If no parent is defined then self is assumed to be parent. | Sets events and variables for parent.
If no parent is defined then self is assumed to be parent. | [
"Sets",
"events",
"and",
"variables",
"for",
"parent",
".",
"If",
"no",
"parent",
"is",
"defined",
"then",
"self",
"is",
"assumed",
"to",
"be",
"parent",
"."
] | def init_keys(self, parent=None):
"""
Sets events and variables for parent.
If no parent is defined then self is assumed to be parent.
"""
if parent is None:
parent = self
self.key_pressed = '' # string code of currently pressed key
wx.EVT_ENTER_WINDOW(self, self.on_enter_window)
wx.EVT_KEY_DOWN(self, self.on_key_down)
wx.EVT_KEY_UP(self, self.on_key_up) | [
"def",
"init_keys",
"(",
"self",
",",
"parent",
"=",
"None",
")",
":",
"if",
"parent",
"is",
"None",
":",
"parent",
"=",
"self",
"self",
".",
"key_pressed",
"=",
"''",
"# string code of currently pressed key",
"wx",
".",
"EVT_ENTER_WINDOW",
"(",
"self",
",",
"self",
".",
"on_enter_window",
")",
"wx",
".",
"EVT_KEY_DOWN",
"(",
"self",
",",
"self",
".",
"on_key_down",
")",
"wx",
".",
"EVT_KEY_UP",
"(",
"self",
",",
"self",
".",
"on_key_up",
")"
] | https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/agilepy/lib_wx/wxmisc.py#L625-L636 | ||
oracle/graaljs | 36a56e8e993d45fc40939a3a4d9c0c24990720f1 | graal-nodejs/tools/cpplint.py | python | CheckRedundantOverrideOrFinal | (filename, clean_lines, linenum, error) | Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Check if line contains a redundant "override" or "final" virt-specifier. | [
"Check",
"if",
"line",
"contains",
"a",
"redundant",
"override",
"or",
"final",
"virt",
"-",
"specifier",
"."
] | def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"')) | [
"def",
"CheckRedundantOverrideOrFinal",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"# Look for closing parenthesis nearby. We need one to confirm where",
"# the declarator ends and where the virt-specifier starts to avoid",
"# false positives.",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"declarator_end",
"=",
"line",
".",
"rfind",
"(",
"')'",
")",
"if",
"declarator_end",
">=",
"0",
":",
"fragment",
"=",
"line",
"[",
"declarator_end",
":",
"]",
"else",
":",
"if",
"linenum",
">",
"1",
"and",
"clean_lines",
".",
"elided",
"[",
"linenum",
"-",
"1",
"]",
".",
"rfind",
"(",
"')'",
")",
">=",
"0",
":",
"fragment",
"=",
"line",
"else",
":",
"return",
"# Check that at most one of \"override\" or \"final\" is present, not both",
"if",
"Search",
"(",
"r'\\boverride\\b'",
",",
"fragment",
")",
"and",
"Search",
"(",
"r'\\bfinal\\b'",
",",
"fragment",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/inheritance'",
",",
"4",
",",
"(",
"'\"override\" is redundant since function is '",
"'already declared as \"final\"'",
")",
")"
] | https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/tools/cpplint.py#L6337-L6363 | ||
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/numpy/math_ops.py | python | _get_histogramdd_count | (ndim, bin_edges, sample, weights) | return count | Returns count for histogramdd. | Returns count for histogramdd. | [
"Returns",
"count",
"for",
"histogramdd",
"."
] | def _get_histogramdd_count(ndim, bin_edges, sample, weights):
"""Returns count for histogramdd."""
data_indices = []
nbin = ()
flattened_bin_size = 1
for i in F.make_range(ndim):
data_to_bins = searchsorted(bin_edges[i], sample[:, i], 'right')
bin_size = _type_convert(int, bin_edges[i].size)
data_to_bins = where_(sample[:, i] == bin_edges[i][-1], _to_tensor(bin_size - 1), data_to_bins)
data_indices.append(data_to_bins)
nbin += (bin_size + 1,)
flattened_bin_size *= (bin_size + 1)
factor = F.reshape(_to_tensor(_factor_flattened_hist(nbin)), (ndim, 1))
stacked_indices = stack(data_indices) * factor
if _get_device() == 'Ascend':
stacked_indices = F.cast(stacked_indices, mstype.float32)
flattened_hist = F.reduce_sum(stacked_indices.astype(mstype.float32), 0)
count = bincount(flattened_hist.astype(mstype.int32), weights, length=flattened_bin_size)
count = F.reshape(count, nbin)
slices = _list_comprehensions(ndim, F.make_slice(1, -1, 1), True)
count = count[slices]
return count | [
"def",
"_get_histogramdd_count",
"(",
"ndim",
",",
"bin_edges",
",",
"sample",
",",
"weights",
")",
":",
"data_indices",
"=",
"[",
"]",
"nbin",
"=",
"(",
")",
"flattened_bin_size",
"=",
"1",
"for",
"i",
"in",
"F",
".",
"make_range",
"(",
"ndim",
")",
":",
"data_to_bins",
"=",
"searchsorted",
"(",
"bin_edges",
"[",
"i",
"]",
",",
"sample",
"[",
":",
",",
"i",
"]",
",",
"'right'",
")",
"bin_size",
"=",
"_type_convert",
"(",
"int",
",",
"bin_edges",
"[",
"i",
"]",
".",
"size",
")",
"data_to_bins",
"=",
"where_",
"(",
"sample",
"[",
":",
",",
"i",
"]",
"==",
"bin_edges",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
",",
"_to_tensor",
"(",
"bin_size",
"-",
"1",
")",
",",
"data_to_bins",
")",
"data_indices",
".",
"append",
"(",
"data_to_bins",
")",
"nbin",
"+=",
"(",
"bin_size",
"+",
"1",
",",
")",
"flattened_bin_size",
"*=",
"(",
"bin_size",
"+",
"1",
")",
"factor",
"=",
"F",
".",
"reshape",
"(",
"_to_tensor",
"(",
"_factor_flattened_hist",
"(",
"nbin",
")",
")",
",",
"(",
"ndim",
",",
"1",
")",
")",
"stacked_indices",
"=",
"stack",
"(",
"data_indices",
")",
"*",
"factor",
"if",
"_get_device",
"(",
")",
"==",
"'Ascend'",
":",
"stacked_indices",
"=",
"F",
".",
"cast",
"(",
"stacked_indices",
",",
"mstype",
".",
"float32",
")",
"flattened_hist",
"=",
"F",
".",
"reduce_sum",
"(",
"stacked_indices",
".",
"astype",
"(",
"mstype",
".",
"float32",
")",
",",
"0",
")",
"count",
"=",
"bincount",
"(",
"flattened_hist",
".",
"astype",
"(",
"mstype",
".",
"int32",
")",
",",
"weights",
",",
"length",
"=",
"flattened_bin_size",
")",
"count",
"=",
"F",
".",
"reshape",
"(",
"count",
",",
"nbin",
")",
"slices",
"=",
"_list_comprehensions",
"(",
"ndim",
",",
"F",
".",
"make_slice",
"(",
"1",
",",
"-",
"1",
",",
"1",
")",
",",
"True",
")",
"count",
"=",
"count",
"[",
"slices",
"]",
"return",
"count"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/numpy/math_ops.py#L4745-L4767 | |
epfml/sent2vec | 770bd2d475c35eccc9a2452592e7c4304ac89fb9 | wikiTokenize.py | python | tokenize | (tknzr, sentence, to_lower=True) | return sentence | Arguments:
- tknzr: a tokenizer implementing the NLTK tokenizer interface
- sentence: a string to be tokenized
- to_lower: lowercasing or not | Arguments:
- tknzr: a tokenizer implementing the NLTK tokenizer interface
- sentence: a string to be tokenized
- to_lower: lowercasing or not | [
"Arguments",
":",
"-",
"tknzr",
":",
"a",
"tokenizer",
"implementing",
"the",
"NLTK",
"tokenizer",
"interface",
"-",
"sentence",
":",
"a",
"string",
"to",
"be",
"tokenized",
"-",
"to_lower",
":",
"lowercasing",
"or",
"not"
] | def tokenize(tknzr, sentence, to_lower=True):
"""Arguments:
- tknzr: a tokenizer implementing the NLTK tokenizer interface
- sentence: a string to be tokenized
- to_lower: lowercasing or not
"""
sentence = sentence.strip()
sentence = ' '.join([format_token(x) for x in tknzr.tokenize(sentence)])
if to_lower:
sentence = sentence.lower()
sentence = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))','<url>',sentence) #replace urls by <url>
sentence = re.sub('(\@ [^\s]+)','<user>',sentence) #replace @user268 by <user>
filter(lambda word: ' ' not in word, sentence)
return sentence | [
"def",
"tokenize",
"(",
"tknzr",
",",
"sentence",
",",
"to_lower",
"=",
"True",
")",
":",
"sentence",
"=",
"sentence",
".",
"strip",
"(",
")",
"sentence",
"=",
"' '",
".",
"join",
"(",
"[",
"format_token",
"(",
"x",
")",
"for",
"x",
"in",
"tknzr",
".",
"tokenize",
"(",
"sentence",
")",
"]",
")",
"if",
"to_lower",
":",
"sentence",
"=",
"sentence",
".",
"lower",
"(",
")",
"sentence",
"=",
"re",
".",
"sub",
"(",
"'((www\\.[^\\s]+)|(https?://[^\\s]+)|(http?://[^\\s]+))'",
",",
"'<url>'",
",",
"sentence",
")",
"#replace urls by <url>",
"sentence",
"=",
"re",
".",
"sub",
"(",
"'(\\@ [^\\s]+)'",
",",
"'<user>'",
",",
"sentence",
")",
"#replace @user268 by <user>",
"filter",
"(",
"lambda",
"word",
":",
"' '",
"not",
"in",
"word",
",",
"sentence",
")",
"return",
"sentence"
] | https://github.com/epfml/sent2vec/blob/770bd2d475c35eccc9a2452592e7c4304ac89fb9/wikiTokenize.py#L8-L21 | |
telefonicaid/fiware-orion | 27c3202b9ddcfb9e3635a0af8d373f76e89b1d24 | scripts/cpplint.py | python | _IsTestFilename | (filename) | Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise. | Determines if the given filename has a suffix that identifies it as a test. | [
"Determines",
"if",
"the",
"given",
"filename",
"has",
"a",
"suffix",
"that",
"identifies",
"it",
"as",
"a",
"test",
"."
] | def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False | [
"def",
"_IsTestFilename",
"(",
"filename",
")",
":",
"if",
"(",
"filename",
".",
"endswith",
"(",
"'_test.cc'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'_unittest.cc'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'_regtest.cc'",
")",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | https://github.com/telefonicaid/fiware-orion/blob/27c3202b9ddcfb9e3635a0af8d373f76e89b1d24/scripts/cpplint.py#L2322-L2336 | ||
TGAC/KAT | e8870331de2b4bb0a1b3b91c6afb8fb9d59e9216 | deps/boost/tools/build/src/tools/gcc.py | python | init_link_flags | (toolset, linker, condition) | Now, the vendor specific flags.
The parameter linker can be either gnu, darwin, osf, hpux or sun. | Now, the vendor specific flags.
The parameter linker can be either gnu, darwin, osf, hpux or sun. | [
"Now",
"the",
"vendor",
"specific",
"flags",
".",
"The",
"parameter",
"linker",
"can",
"be",
"either",
"gnu",
"darwin",
"osf",
"hpux",
"or",
"sun",
"."
] | def init_link_flags(toolset, linker, condition):
"""
Now, the vendor specific flags.
The parameter linker can be either gnu, darwin, osf, hpux or sun.
"""
toolset_link = toolset + '.link'
if linker == 'gnu':
# Strip the binary when no debugging is needed. We use --strip-all flag
# as opposed to -s since icc (intel's compiler) is generally
# option-compatible with and inherits from the gcc toolset, but does not
# support -s.
# FIXME: what does unchecked translate to?
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,--strip-all']) # : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
flags(toolset_link, 'START-GROUP', condition, ['-Wl,--start-group'])# : unchecked ;
flags(toolset_link, 'END-GROUP', condition, ['-Wl,--end-group']) # : unchecked ;
# gnu ld has the ability to change the search behaviour for libraries
# referenced by -l switch. These modifiers are -Bstatic and -Bdynamic
# and change search for -l switches that follow them. The following list
# shows the tried variants.
# The search stops at the first variant that has a match.
# *nix: -Bstatic -lxxx
# libxxx.a
#
# *nix: -Bdynamic -lxxx
# libxxx.so
# libxxx.a
#
# windows (mingw,cygwin) -Bstatic -lxxx
# libxxx.a
# xxx.lib
#
# windows (mingw,cygwin) -Bdynamic -lxxx
# libxxx.dll.a
# xxx.dll.a
# libxxx.a
# xxx.lib
# cygxxx.dll (*)
# libxxx.dll
# xxx.dll
# libxxx.a
#
# (*) This is for cygwin
# Please note that -Bstatic and -Bdynamic are not a guarantee that a
# static or dynamic lib indeed gets linked in. The switches only change
# search patterns!
# On *nix mixing shared libs with static runtime is not a good idea.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bdynamic']) # : unchecked ;
# On windows allow mixing of static and dynamic libs with static
# runtime.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bdynamic']) # : unchecked ;
flags(toolset_link, 'OPTIONS',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
elif linker == 'darwin':
# On Darwin, the -s option to ld does not work unless we pass -static,
# and passing -static unconditionally is a bad idea. So, don't pass -s.
# at all, darwin.jam will use separate 'strip' invocation.
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
elif linker == 'osf':
# No --strip-all, just -s.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# This does not supports -R.
flags(toolset_link, 'RPATH_OPTION', condition, ['-rpath']) # : unchecked ;
# -rpath-link is not supported at all.
elif linker == 'sun':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# Solaris linker does not have a separate -rpath-link, but allows to use
# -L for the same purpose.
flags(toolset_link, 'LINKPATH', condition, ['<xdll-path>']) # : unchecked ;
# This permits shared libraries with non-PIC code on Solaris.
# VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the
# following is not needed. Whether -fPIC should be hardcoded, is a
# separate question.
# AH, 2004/10/16: it is still necessary because some tests link against
# static libraries that were compiled without PIC.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-mimpure-text'])
# : unchecked ;
elif linker == 'hpux':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition),
['-Wl,-s']) # : unchecked ;
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition),
['-fPIC']) # : unchecked ;
else:
# FIXME:
errors.user_error(
"$(toolset) initialization: invalid linker '$(linker)' " +
"The value '$(linker)' specified for <linker> is not recognized. " +
"Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'") | [
"def",
"init_link_flags",
"(",
"toolset",
",",
"linker",
",",
"condition",
")",
":",
"toolset_link",
"=",
"toolset",
"+",
"'.link'",
"if",
"linker",
"==",
"'gnu'",
":",
"# Strip the binary when no debugging is needed. We use --strip-all flag",
"# as opposed to -s since icc (intel's compiler) is generally",
"# option-compatible with and inherits from the gcc toolset, but does not",
"# support -s.",
"# FIXME: what does unchecked translate to?",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<debug-symbols>off'",
",",
"condition",
")",
",",
"[",
"'-Wl,--strip-all'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'RPATH'",
",",
"condition",
",",
"[",
"'<dll-path>'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'RPATH_LINK'",
",",
"condition",
",",
"[",
"'<xdll-path>'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'START-GROUP'",
",",
"condition",
",",
"[",
"'-Wl,--start-group'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'END-GROUP'",
",",
"condition",
",",
"[",
"'-Wl,--end-group'",
"]",
")",
"# : unchecked ;",
"# gnu ld has the ability to change the search behaviour for libraries",
"# referenced by -l switch. These modifiers are -Bstatic and -Bdynamic",
"# and change search for -l switches that follow them. The following list",
"# shows the tried variants.",
"# The search stops at the first variant that has a match.",
"# *nix: -Bstatic -lxxx",
"# libxxx.a",
"#",
"# *nix: -Bdynamic -lxxx",
"# libxxx.so",
"# libxxx.a",
"#",
"# windows (mingw,cygwin) -Bstatic -lxxx",
"# libxxx.a",
"# xxx.lib",
"#",
"# windows (mingw,cygwin) -Bdynamic -lxxx",
"# libxxx.dll.a",
"# xxx.dll.a",
"# libxxx.a",
"# xxx.lib",
"# cygxxx.dll (*)",
"# libxxx.dll",
"# xxx.dll",
"# libxxx.a",
"#",
"# (*) This is for cygwin",
"# Please note that -Bstatic and -Bdynamic are not a guarantee that a",
"# static or dynamic lib indeed gets linked in. The switches only change",
"# search patterns!",
"# On *nix mixing shared libs with static runtime is not a good idea.",
"flags",
"(",
"toolset_link",
",",
"'FINDLIBS-ST-PFX'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<runtime-link>shared'",
",",
"condition",
")",
",",
"[",
"'-Wl,-Bstatic'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'FINDLIBS-SA-PFX'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<runtime-link>shared'",
",",
"condition",
")",
",",
"[",
"'-Wl,-Bdynamic'",
"]",
")",
"# : unchecked ;",
"# On windows allow mixing of static and dynamic libs with static",
"# runtime.",
"flags",
"(",
"toolset_link",
",",
"'FINDLIBS-ST-PFX'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<runtime-link>static/<target-os>windows'",
",",
"condition",
")",
",",
"[",
"'-Wl,-Bstatic'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'FINDLIBS-SA-PFX'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<runtime-link>static/<target-os>windows'",
",",
"condition",
")",
",",
"[",
"'-Wl,-Bdynamic'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<runtime-link>static/<target-os>windows'",
",",
"condition",
")",
",",
"[",
"'-Wl,-Bstatic'",
"]",
")",
"# : unchecked ;",
"elif",
"linker",
"==",
"'darwin'",
":",
"# On Darwin, the -s option to ld does not work unless we pass -static,",
"# and passing -static unconditionally is a bad idea. So, don't pass -s.",
"# at all, darwin.jam will use separate 'strip' invocation.",
"flags",
"(",
"toolset_link",
",",
"'RPATH'",
",",
"condition",
",",
"[",
"'<dll-path>'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'RPATH_LINK'",
",",
"condition",
",",
"[",
"'<xdll-path>'",
"]",
")",
"# : unchecked ;",
"elif",
"linker",
"==",
"'osf'",
":",
"# No --strip-all, just -s.",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<debug-symbols>off'",
",",
"condition",
")",
",",
"[",
"'-Wl,-s'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'RPATH'",
",",
"condition",
",",
"[",
"'<dll-path>'",
"]",
")",
"# : unchecked ;",
"# This does not supports -R.",
"flags",
"(",
"toolset_link",
",",
"'RPATH_OPTION'",
",",
"condition",
",",
"[",
"'-rpath'",
"]",
")",
"# : unchecked ;",
"# -rpath-link is not supported at all.",
"elif",
"linker",
"==",
"'sun'",
":",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<debug-symbols>off'",
",",
"condition",
")",
",",
"[",
"'-Wl,-s'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'RPATH'",
",",
"condition",
",",
"[",
"'<dll-path>'",
"]",
")",
"# : unchecked ;",
"# Solaris linker does not have a separate -rpath-link, but allows to use",
"# -L for the same purpose.",
"flags",
"(",
"toolset_link",
",",
"'LINKPATH'",
",",
"condition",
",",
"[",
"'<xdll-path>'",
"]",
")",
"# : unchecked ;",
"# This permits shared libraries with non-PIC code on Solaris.",
"# VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the",
"# following is not needed. Whether -fPIC should be hardcoded, is a",
"# separate question.",
"# AH, 2004/10/16: it is still necessary because some tests link against",
"# static libraries that were compiled without PIC.",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<link>shared'",
",",
"condition",
")",
",",
"[",
"'-mimpure-text'",
"]",
")",
"# : unchecked ;",
"elif",
"linker",
"==",
"'hpux'",
":",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<debug-symbols>off'",
",",
"condition",
")",
",",
"[",
"'-Wl,-s'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<link>shared'",
",",
"condition",
")",
",",
"[",
"'-fPIC'",
"]",
")",
"# : unchecked ;",
"else",
":",
"# FIXME:",
"errors",
".",
"user_error",
"(",
"\"$(toolset) initialization: invalid linker '$(linker)' \"",
"+",
"\"The value '$(linker)' specified for <linker> is not recognized. \"",
"+",
"\"Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'\"",
")"
] | https://github.com/TGAC/KAT/blob/e8870331de2b4bb0a1b3b91c6afb8fb9d59e9216/deps/boost/tools/build/src/tools/gcc.py#L494-L608 | ||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/gslib/commands/perfdiag.py | python | PerfDiagCommand._RunLatencyTests | (self) | Runs latency tests. | Runs latency tests. | [
"Runs",
"latency",
"tests",
"."
] | def _RunLatencyTests(self):
"""Runs latency tests."""
# Stores timing information for each category of operation.
self.results['latency'] = defaultdict(list)
for i in range(self.num_objects):
self.logger.info('\nRunning latency iteration %d...', i+1)
for fpath in self.latency_files:
file_data = temp_file_dict[fpath]
url = self.bucket_url.Clone()
url.object_name = os.path.basename(fpath)
file_size = file_data.size
readable_file_size = MakeHumanReadable(file_size)
self.logger.info(
"\nFile of size %s located on disk at '%s' being diagnosed in the "
"cloud at '%s'.", readable_file_size, fpath, url)
upload_target = StorageUrlToUploadObjectMetadata(url)
def _Upload():
io_fp = cStringIO.StringIO(file_data.data)
with self._Time('UPLOAD_%d' % file_size, self.results['latency']):
self.gsutil_api.UploadObject(
io_fp, upload_target, size=file_size, provider=self.provider,
fields=['name'])
self._RunOperation(_Upload)
def _Metadata():
with self._Time('METADATA_%d' % file_size, self.results['latency']):
return self.gsutil_api.GetObjectMetadata(
url.bucket_name, url.object_name,
provider=self.provider, fields=['name', 'contentType',
'mediaLink', 'size'])
# Download will get the metadata first if we don't pass it in.
download_metadata = self._RunOperation(_Metadata)
serialization_data = GetDownloadSerializationData(download_metadata)
def _Download():
with self._Time('DOWNLOAD_%d' % file_size, self.results['latency']):
self.gsutil_api.GetObjectMedia(
url.bucket_name, url.object_name, self.discard_sink,
provider=self.provider, serialization_data=serialization_data)
self._RunOperation(_Download)
def _Delete():
with self._Time('DELETE_%d' % file_size, self.results['latency']):
self.gsutil_api.DeleteObject(url.bucket_name, url.object_name,
provider=self.provider)
self._RunOperation(_Delete) | [
"def",
"_RunLatencyTests",
"(",
"self",
")",
":",
"# Stores timing information for each category of operation.",
"self",
".",
"results",
"[",
"'latency'",
"]",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_objects",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'\\nRunning latency iteration %d...'",
",",
"i",
"+",
"1",
")",
"for",
"fpath",
"in",
"self",
".",
"latency_files",
":",
"file_data",
"=",
"temp_file_dict",
"[",
"fpath",
"]",
"url",
"=",
"self",
".",
"bucket_url",
".",
"Clone",
"(",
")",
"url",
".",
"object_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fpath",
")",
"file_size",
"=",
"file_data",
".",
"size",
"readable_file_size",
"=",
"MakeHumanReadable",
"(",
"file_size",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"\\nFile of size %s located on disk at '%s' being diagnosed in the \"",
"\"cloud at '%s'.\"",
",",
"readable_file_size",
",",
"fpath",
",",
"url",
")",
"upload_target",
"=",
"StorageUrlToUploadObjectMetadata",
"(",
"url",
")",
"def",
"_Upload",
"(",
")",
":",
"io_fp",
"=",
"cStringIO",
".",
"StringIO",
"(",
"file_data",
".",
"data",
")",
"with",
"self",
".",
"_Time",
"(",
"'UPLOAD_%d'",
"%",
"file_size",
",",
"self",
".",
"results",
"[",
"'latency'",
"]",
")",
":",
"self",
".",
"gsutil_api",
".",
"UploadObject",
"(",
"io_fp",
",",
"upload_target",
",",
"size",
"=",
"file_size",
",",
"provider",
"=",
"self",
".",
"provider",
",",
"fields",
"=",
"[",
"'name'",
"]",
")",
"self",
".",
"_RunOperation",
"(",
"_Upload",
")",
"def",
"_Metadata",
"(",
")",
":",
"with",
"self",
".",
"_Time",
"(",
"'METADATA_%d'",
"%",
"file_size",
",",
"self",
".",
"results",
"[",
"'latency'",
"]",
")",
":",
"return",
"self",
".",
"gsutil_api",
".",
"GetObjectMetadata",
"(",
"url",
".",
"bucket_name",
",",
"url",
".",
"object_name",
",",
"provider",
"=",
"self",
".",
"provider",
",",
"fields",
"=",
"[",
"'name'",
",",
"'contentType'",
",",
"'mediaLink'",
",",
"'size'",
"]",
")",
"# Download will get the metadata first if we don't pass it in.",
"download_metadata",
"=",
"self",
".",
"_RunOperation",
"(",
"_Metadata",
")",
"serialization_data",
"=",
"GetDownloadSerializationData",
"(",
"download_metadata",
")",
"def",
"_Download",
"(",
")",
":",
"with",
"self",
".",
"_Time",
"(",
"'DOWNLOAD_%d'",
"%",
"file_size",
",",
"self",
".",
"results",
"[",
"'latency'",
"]",
")",
":",
"self",
".",
"gsutil_api",
".",
"GetObjectMedia",
"(",
"url",
".",
"bucket_name",
",",
"url",
".",
"object_name",
",",
"self",
".",
"discard_sink",
",",
"provider",
"=",
"self",
".",
"provider",
",",
"serialization_data",
"=",
"serialization_data",
")",
"self",
".",
"_RunOperation",
"(",
"_Download",
")",
"def",
"_Delete",
"(",
")",
":",
"with",
"self",
".",
"_Time",
"(",
"'DELETE_%d'",
"%",
"file_size",
",",
"self",
".",
"results",
"[",
"'latency'",
"]",
")",
":",
"self",
".",
"gsutil_api",
".",
"DeleteObject",
"(",
"url",
".",
"bucket_name",
",",
"url",
".",
"object_name",
",",
"provider",
"=",
"self",
".",
"provider",
")",
"self",
".",
"_RunOperation",
"(",
"_Delete",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/gslib/commands/perfdiag.py#L688-L737 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/src/eclib/outbuff.py | python | TaskObject.__init__ | (self, parent, task, *args, **kwargs) | Initialize the TaskObject. All *args and **kwargs are passed
to the task.
@param parent: Parent Window/EventHandler to receive the events
generated by the process.
@param task: callable should be a generator object and must be iterable | Initialize the TaskObject. All *args and **kwargs are passed
to the task. | [
"Initialize",
"the",
"TaskObject",
".",
"All",
"*",
"args",
"and",
"**",
"kwargs",
"are",
"passed",
"to",
"the",
"task",
"."
] | def __init__(self, parent, task, *args, **kwargs):
"""Initialize the TaskObject. All *args and **kwargs are passed
to the task.
@param parent: Parent Window/EventHandler to receive the events
generated by the process.
@param task: callable should be a generator object and must be iterable
"""
super(TaskObject, self).__init__()
assert isinstance(parent, OutputBuffer)
# Attributes
self.cancel = False # Abort task
self._parent = parent # Parent Window/Event Handler
self.task = task # Task method to run
self._args = args
self._kwargs = kwargs | [
"def",
"__init__",
"(",
"self",
",",
"parent",
",",
"task",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"TaskObject",
",",
"self",
")",
".",
"__init__",
"(",
")",
"assert",
"isinstance",
"(",
"parent",
",",
"OutputBuffer",
")",
"# Attributes",
"self",
".",
"cancel",
"=",
"False",
"# Abort task",
"self",
".",
"_parent",
"=",
"parent",
"# Parent Window/Event Handler",
"self",
".",
"task",
"=",
"task",
"# Task method to run",
"self",
".",
"_args",
"=",
"args",
"self",
".",
"_kwargs",
"=",
"kwargs"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/eclib/outbuff.py#L1038-L1056 | ||
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset13.py | python | convert_softmax | (node, **kwargs) | Map MXNet's softmax operator attributes to onnx's Softmax operator
and return the created node. | Map MXNet's softmax operator attributes to onnx's Softmax operator
and return the created node. | [
"Map",
"MXNet",
"s",
"softmax",
"operator",
"attributes",
"to",
"onnx",
"s",
"Softmax",
"operator",
"and",
"return",
"the",
"created",
"node",
"."
] | def convert_softmax(node, **kwargs):
"""Map MXNet's softmax operator attributes to onnx's Softmax operator
and return the created node.
"""
from onnx.helper import make_node
from onnx import TensorProto
name, input_nodes, attrs = get_inputs(node, kwargs)
input_dtypes = get_input_dtypes(node, kwargs)
axis = int(attrs.get("axis", -1))
temperature = str(attrs.get("temperature", 'None'))
if temperature == 'None':
temperature = 1.
else:
temperature = float(temperature)
use_length = str(attrs.get("use_length", 'None'))
use_length = use_length in ['1', 'True']
dtype = input_dtypes[0]
dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]
data = input_nodes[0]
create_tensor([0], name+"_0", kwargs["initializer"])
if axis == -1 and temperature == 1.:
nodes = []
if use_length:
# magic number, this is fp16 min
create_tensor([-65500.0], name+"_mask_val", kwargs["initializer"], dtype=dtype)
create_tensor([1], name+"_1", kwargs["initializer"])
create_tensor([-1], name+"_-1", kwargs["initializer"])
create_const_scalar_node(name+"_0_s", np.int64(0), kwargs)
create_const_scalar_node(name+"_1_s", np.int64(1), kwargs)
nodes += [
make_node("Shape", [data], [name+"_shape"]),
make_node("Shape", [name+"_shape"], [name+"_dim"]),
make_node("Sub", [name+"_dim", name+"_1"], [name+"_dim_m1"]),
make_node("Slice", [name+"_shape", name+"_dim_m1", name+"_dim"],
[name+"_dim_last_"]),
make_node("Squeeze", [name+"_dim_last_", name+"_0"], [name+"_dim_last"]),
make_node("Range", [name+"_0_s", name+"_dim_last", name+"_1_s"], [name+"_range"]),
make_node("Cast", [input_nodes[1]], [name+"_len"], to=int(TensorProto.INT64)),
make_node("Unsqueeze", [name+"_len", name+"_-1"], [name+"_len_unsqueezed"]),
make_node("Less", [name+"_range", name+"_len_unsqueezed"], [name+"_less"]),
make_node("Where", [name+'_less', data, name+"_mask_val"], [name+"_data_masked"])
]
data = name+"_data_masked"
nodes += [
make_node("Softmax", [data], [name], axis=-1)
]
return nodes
create_tensor([axis], name+"_axes", kwargs["initializer"])
create_tensor([temperature], name+"_tmp", kwargs["initializer"], dtype=dtype)
nodes = [
make_node("Div", [data, name+"_tmp"], [name+'_data']),
]
if len(input_nodes) == 1:
nodes += [
make_node("Softmax", [name+'_data'], [name], axis=axis)
]
return nodes
elif use_length:
length = input_nodes[1]
create_tensor([1], name+"_1", kwargs["initializer"])
create_const_scalar_node(name+'_-1_s', np.int64(-1), kwargs)
create_const_scalar_node(name+'_0_s', np.int64(0), kwargs)
create_const_scalar_node(name+'_1_s', np.int64(1), kwargs)
nodes += [
# cast data type
make_node("Cast", [length], [name+"_length"], to=int(TensorProto.INT64)),
make_node("Cast", [name+"_0"], [name+"_0_itype"], to=dtype_t),
make_node("Cast", [name+"_1"], [name+"_1_itype"], to=dtype_t),
# softmax output
make_node("Softmax", [name+'_data'], [name+"_softmax_out"], axis=axis),
# update axis
make_node("Shape", [data], [name+"_shape0_out"]),
make_node("Shape", [name+"_shape0_out"], [name+"_in_dim"]),
make_node("Add", [name+"_in_dim", name+"_axes"], [name+"_dim+axis"]),
make_node("Less", [name+"_axes", name+"_0_s"], [name+"_less0_out"]),
make_node("Where", [name+"_less0_out", name+"_dim+axis", name+"_axes"], [name+"_final_axis"]),
# data mask
make_node("Add", [name+"_final_axis", name+"_1_s"], [name+"_final_axis+1"]),
make_node("Slice", [name+"_shape0_out", name+"_final_axis", name+"_final_axis+1"], [name+"_axis_dim"]),
make_node("Squeeze", [name+"_axis_dim", name+"_0"], [name+"_axis_dim_s"]),
make_node("Range", [name+"_0_s", name+"_axis_dim_s", name+"_1_s"], [name+"_range0_out"]),
# one hot for axis
make_node("Squeeze", [name+"_in_dim", name+"_0"], [name+"_in_dim_s"]),
make_node("Range", [name+"_0_s", name+"_in_dim_s", name+"_1_s"], [name+"_range1_out"]),
make_node("Equal", [name+"_range1_out", name+"_final_axis"], [name+"_equal_out"]),
make_node("Cast", [name+"_equal_out"], [name+"_one_hot"], to=int(TensorProto.INT64)),
# reshape data mask for less
make_node("Sub", [name+"_axis_dim_s", name+"_1_s"], [name+"_sub0_out"]),
make_node("Mul", [name+"_one_hot", name+"_sub0_out"], [name+"_mul0_out"]),
make_node("Add", [name+"_mul0_out", name+"_1_s"], [name+"_add0_out"]),
make_node('Reshape', [name+"_range0_out", name+"_add0_out"], [name+"_reshape0_out"]),
# reshape length for less
make_node("Mul", [name+"_one_hot", name+"_-1_s"], [name+"_mul1_out"]),
make_node("Add", [name+"_mul1_out", name+"_1_s"], [name+"_add1_out"]),
make_node("Sub", [name+"_shape0_out", name+"_1_s"], [name+"_sub1_out"]),
make_node("Mul", [name+"_add1_out", name+"_sub1_out"], [name+"_mul2_out"]),
make_node("Add", [name+"_mul2_out", name+"_1_s"], [name+"_add2_out"]),
make_node('Reshape', [name+"_length", name+"_add2_out"], [name+"_reshape1_out"]),
# mask output
make_node("Less", [name+"_reshape0_out", name+"_reshape1_out"], [name+"_less_out"]),
make_node("Cast", [name+"_less_out"], [name+"_mask"], to=dtype_t),
make_node("Mul", [name+"_softmax_out", name+"_mask"], [name+"_mul3_out"]),
make_node("ReduceSum", [name+"_mul3_out", name+"_axes"], [name+"_rsum1_out"], keepdims=1),
make_node("Equal", [name+"_rsum1_out", name+"_0_itype"], [name+"_equal1_out"]),
make_node("Where", [name+"_equal1_out", name+"_1_itype", name+"_rsum1_out"], [name+"_where_out"]),
make_node("Div", [name+"_mul3_out", name+"_where_out"], [name], name=name)
]
return nodes
else:
raise NotImplementedError("use_length must be true when both data and length are paased in.") | [
"def",
"convert_softmax",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"onnx",
".",
"helper",
"import",
"make_node",
"from",
"onnx",
"import",
"TensorProto",
"name",
",",
"input_nodes",
",",
"attrs",
"=",
"get_inputs",
"(",
"node",
",",
"kwargs",
")",
"input_dtypes",
"=",
"get_input_dtypes",
"(",
"node",
",",
"kwargs",
")",
"axis",
"=",
"int",
"(",
"attrs",
".",
"get",
"(",
"\"axis\"",
",",
"-",
"1",
")",
")",
"temperature",
"=",
"str",
"(",
"attrs",
".",
"get",
"(",
"\"temperature\"",
",",
"'None'",
")",
")",
"if",
"temperature",
"==",
"'None'",
":",
"temperature",
"=",
"1.",
"else",
":",
"temperature",
"=",
"float",
"(",
"temperature",
")",
"use_length",
"=",
"str",
"(",
"attrs",
".",
"get",
"(",
"\"use_length\"",
",",
"'None'",
")",
")",
"use_length",
"=",
"use_length",
"in",
"[",
"'1'",
",",
"'True'",
"]",
"dtype",
"=",
"input_dtypes",
"[",
"0",
"]",
"dtype_t",
"=",
"onnx",
".",
"mapping",
".",
"NP_TYPE_TO_TENSOR_TYPE",
"[",
"dtype",
"]",
"data",
"=",
"input_nodes",
"[",
"0",
"]",
"create_tensor",
"(",
"[",
"0",
"]",
",",
"name",
"+",
"\"_0\"",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
")",
"if",
"axis",
"==",
"-",
"1",
"and",
"temperature",
"==",
"1.",
":",
"nodes",
"=",
"[",
"]",
"if",
"use_length",
":",
"# magic number, this is fp16 min",
"create_tensor",
"(",
"[",
"-",
"65500.0",
"]",
",",
"name",
"+",
"\"_mask_val\"",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
",",
"dtype",
"=",
"dtype",
")",
"create_tensor",
"(",
"[",
"1",
"]",
",",
"name",
"+",
"\"_1\"",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
")",
"create_tensor",
"(",
"[",
"-",
"1",
"]",
",",
"name",
"+",
"\"_-1\"",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
")",
"create_const_scalar_node",
"(",
"name",
"+",
"\"_0_s\"",
",",
"np",
".",
"int64",
"(",
"0",
")",
",",
"kwargs",
")",
"create_const_scalar_node",
"(",
"name",
"+",
"\"_1_s\"",
",",
"np",
".",
"int64",
"(",
"1",
")",
",",
"kwargs",
")",
"nodes",
"+=",
"[",
"make_node",
"(",
"\"Shape\"",
",",
"[",
"data",
"]",
",",
"[",
"name",
"+",
"\"_shape\"",
"]",
")",
",",
"make_node",
"(",
"\"Shape\"",
",",
"[",
"name",
"+",
"\"_shape\"",
"]",
",",
"[",
"name",
"+",
"\"_dim\"",
"]",
")",
",",
"make_node",
"(",
"\"Sub\"",
",",
"[",
"name",
"+",
"\"_dim\"",
",",
"name",
"+",
"\"_1\"",
"]",
",",
"[",
"name",
"+",
"\"_dim_m1\"",
"]",
")",
",",
"make_node",
"(",
"\"Slice\"",
",",
"[",
"name",
"+",
"\"_shape\"",
",",
"name",
"+",
"\"_dim_m1\"",
",",
"name",
"+",
"\"_dim\"",
"]",
",",
"[",
"name",
"+",
"\"_dim_last_\"",
"]",
")",
",",
"make_node",
"(",
"\"Squeeze\"",
",",
"[",
"name",
"+",
"\"_dim_last_\"",
",",
"name",
"+",
"\"_0\"",
"]",
",",
"[",
"name",
"+",
"\"_dim_last\"",
"]",
")",
",",
"make_node",
"(",
"\"Range\"",
",",
"[",
"name",
"+",
"\"_0_s\"",
",",
"name",
"+",
"\"_dim_last\"",
",",
"name",
"+",
"\"_1_s\"",
"]",
",",
"[",
"name",
"+",
"\"_range\"",
"]",
")",
",",
"make_node",
"(",
"\"Cast\"",
",",
"[",
"input_nodes",
"[",
"1",
"]",
"]",
",",
"[",
"name",
"+",
"\"_len\"",
"]",
",",
"to",
"=",
"int",
"(",
"TensorProto",
".",
"INT64",
")",
")",
",",
"make_node",
"(",
"\"Unsqueeze\"",
",",
"[",
"name",
"+",
"\"_len\"",
",",
"name",
"+",
"\"_-1\"",
"]",
",",
"[",
"name",
"+",
"\"_len_unsqueezed\"",
"]",
")",
",",
"make_node",
"(",
"\"Less\"",
",",
"[",
"name",
"+",
"\"_range\"",
",",
"name",
"+",
"\"_len_unsqueezed\"",
"]",
",",
"[",
"name",
"+",
"\"_less\"",
"]",
")",
",",
"make_node",
"(",
"\"Where\"",
",",
"[",
"name",
"+",
"'_less'",
",",
"data",
",",
"name",
"+",
"\"_mask_val\"",
"]",
",",
"[",
"name",
"+",
"\"_data_masked\"",
"]",
")",
"]",
"data",
"=",
"name",
"+",
"\"_data_masked\"",
"nodes",
"+=",
"[",
"make_node",
"(",
"\"Softmax\"",
",",
"[",
"data",
"]",
",",
"[",
"name",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"]",
"return",
"nodes",
"create_tensor",
"(",
"[",
"axis",
"]",
",",
"name",
"+",
"\"_axes\"",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
")",
"create_tensor",
"(",
"[",
"temperature",
"]",
",",
"name",
"+",
"\"_tmp\"",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
",",
"dtype",
"=",
"dtype",
")",
"nodes",
"=",
"[",
"make_node",
"(",
"\"Div\"",
",",
"[",
"data",
",",
"name",
"+",
"\"_tmp\"",
"]",
",",
"[",
"name",
"+",
"'_data'",
"]",
")",
",",
"]",
"if",
"len",
"(",
"input_nodes",
")",
"==",
"1",
":",
"nodes",
"+=",
"[",
"make_node",
"(",
"\"Softmax\"",
",",
"[",
"name",
"+",
"'_data'",
"]",
",",
"[",
"name",
"]",
",",
"axis",
"=",
"axis",
")",
"]",
"return",
"nodes",
"elif",
"use_length",
":",
"length",
"=",
"input_nodes",
"[",
"1",
"]",
"create_tensor",
"(",
"[",
"1",
"]",
",",
"name",
"+",
"\"_1\"",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
")",
"create_const_scalar_node",
"(",
"name",
"+",
"'_-1_s'",
",",
"np",
".",
"int64",
"(",
"-",
"1",
")",
",",
"kwargs",
")",
"create_const_scalar_node",
"(",
"name",
"+",
"'_0_s'",
",",
"np",
".",
"int64",
"(",
"0",
")",
",",
"kwargs",
")",
"create_const_scalar_node",
"(",
"name",
"+",
"'_1_s'",
",",
"np",
".",
"int64",
"(",
"1",
")",
",",
"kwargs",
")",
"nodes",
"+=",
"[",
"# cast data type",
"make_node",
"(",
"\"Cast\"",
",",
"[",
"length",
"]",
",",
"[",
"name",
"+",
"\"_length\"",
"]",
",",
"to",
"=",
"int",
"(",
"TensorProto",
".",
"INT64",
")",
")",
",",
"make_node",
"(",
"\"Cast\"",
",",
"[",
"name",
"+",
"\"_0\"",
"]",
",",
"[",
"name",
"+",
"\"_0_itype\"",
"]",
",",
"to",
"=",
"dtype_t",
")",
",",
"make_node",
"(",
"\"Cast\"",
",",
"[",
"name",
"+",
"\"_1\"",
"]",
",",
"[",
"name",
"+",
"\"_1_itype\"",
"]",
",",
"to",
"=",
"dtype_t",
")",
",",
"# softmax output",
"make_node",
"(",
"\"Softmax\"",
",",
"[",
"name",
"+",
"'_data'",
"]",
",",
"[",
"name",
"+",
"\"_softmax_out\"",
"]",
",",
"axis",
"=",
"axis",
")",
",",
"# update axis",
"make_node",
"(",
"\"Shape\"",
",",
"[",
"data",
"]",
",",
"[",
"name",
"+",
"\"_shape0_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Shape\"",
",",
"[",
"name",
"+",
"\"_shape0_out\"",
"]",
",",
"[",
"name",
"+",
"\"_in_dim\"",
"]",
")",
",",
"make_node",
"(",
"\"Add\"",
",",
"[",
"name",
"+",
"\"_in_dim\"",
",",
"name",
"+",
"\"_axes\"",
"]",
",",
"[",
"name",
"+",
"\"_dim+axis\"",
"]",
")",
",",
"make_node",
"(",
"\"Less\"",
",",
"[",
"name",
"+",
"\"_axes\"",
",",
"name",
"+",
"\"_0_s\"",
"]",
",",
"[",
"name",
"+",
"\"_less0_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Where\"",
",",
"[",
"name",
"+",
"\"_less0_out\"",
",",
"name",
"+",
"\"_dim+axis\"",
",",
"name",
"+",
"\"_axes\"",
"]",
",",
"[",
"name",
"+",
"\"_final_axis\"",
"]",
")",
",",
"# data mask",
"make_node",
"(",
"\"Add\"",
",",
"[",
"name",
"+",
"\"_final_axis\"",
",",
"name",
"+",
"\"_1_s\"",
"]",
",",
"[",
"name",
"+",
"\"_final_axis+1\"",
"]",
")",
",",
"make_node",
"(",
"\"Slice\"",
",",
"[",
"name",
"+",
"\"_shape0_out\"",
",",
"name",
"+",
"\"_final_axis\"",
",",
"name",
"+",
"\"_final_axis+1\"",
"]",
",",
"[",
"name",
"+",
"\"_axis_dim\"",
"]",
")",
",",
"make_node",
"(",
"\"Squeeze\"",
",",
"[",
"name",
"+",
"\"_axis_dim\"",
",",
"name",
"+",
"\"_0\"",
"]",
",",
"[",
"name",
"+",
"\"_axis_dim_s\"",
"]",
")",
",",
"make_node",
"(",
"\"Range\"",
",",
"[",
"name",
"+",
"\"_0_s\"",
",",
"name",
"+",
"\"_axis_dim_s\"",
",",
"name",
"+",
"\"_1_s\"",
"]",
",",
"[",
"name",
"+",
"\"_range0_out\"",
"]",
")",
",",
"# one hot for axis",
"make_node",
"(",
"\"Squeeze\"",
",",
"[",
"name",
"+",
"\"_in_dim\"",
",",
"name",
"+",
"\"_0\"",
"]",
",",
"[",
"name",
"+",
"\"_in_dim_s\"",
"]",
")",
",",
"make_node",
"(",
"\"Range\"",
",",
"[",
"name",
"+",
"\"_0_s\"",
",",
"name",
"+",
"\"_in_dim_s\"",
",",
"name",
"+",
"\"_1_s\"",
"]",
",",
"[",
"name",
"+",
"\"_range1_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Equal\"",
",",
"[",
"name",
"+",
"\"_range1_out\"",
",",
"name",
"+",
"\"_final_axis\"",
"]",
",",
"[",
"name",
"+",
"\"_equal_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Cast\"",
",",
"[",
"name",
"+",
"\"_equal_out\"",
"]",
",",
"[",
"name",
"+",
"\"_one_hot\"",
"]",
",",
"to",
"=",
"int",
"(",
"TensorProto",
".",
"INT64",
")",
")",
",",
"# reshape data mask for less",
"make_node",
"(",
"\"Sub\"",
",",
"[",
"name",
"+",
"\"_axis_dim_s\"",
",",
"name",
"+",
"\"_1_s\"",
"]",
",",
"[",
"name",
"+",
"\"_sub0_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Mul\"",
",",
"[",
"name",
"+",
"\"_one_hot\"",
",",
"name",
"+",
"\"_sub0_out\"",
"]",
",",
"[",
"name",
"+",
"\"_mul0_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Add\"",
",",
"[",
"name",
"+",
"\"_mul0_out\"",
",",
"name",
"+",
"\"_1_s\"",
"]",
",",
"[",
"name",
"+",
"\"_add0_out\"",
"]",
")",
",",
"make_node",
"(",
"'Reshape'",
",",
"[",
"name",
"+",
"\"_range0_out\"",
",",
"name",
"+",
"\"_add0_out\"",
"]",
",",
"[",
"name",
"+",
"\"_reshape0_out\"",
"]",
")",
",",
"# reshape length for less",
"make_node",
"(",
"\"Mul\"",
",",
"[",
"name",
"+",
"\"_one_hot\"",
",",
"name",
"+",
"\"_-1_s\"",
"]",
",",
"[",
"name",
"+",
"\"_mul1_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Add\"",
",",
"[",
"name",
"+",
"\"_mul1_out\"",
",",
"name",
"+",
"\"_1_s\"",
"]",
",",
"[",
"name",
"+",
"\"_add1_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Sub\"",
",",
"[",
"name",
"+",
"\"_shape0_out\"",
",",
"name",
"+",
"\"_1_s\"",
"]",
",",
"[",
"name",
"+",
"\"_sub1_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Mul\"",
",",
"[",
"name",
"+",
"\"_add1_out\"",
",",
"name",
"+",
"\"_sub1_out\"",
"]",
",",
"[",
"name",
"+",
"\"_mul2_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Add\"",
",",
"[",
"name",
"+",
"\"_mul2_out\"",
",",
"name",
"+",
"\"_1_s\"",
"]",
",",
"[",
"name",
"+",
"\"_add2_out\"",
"]",
")",
",",
"make_node",
"(",
"'Reshape'",
",",
"[",
"name",
"+",
"\"_length\"",
",",
"name",
"+",
"\"_add2_out\"",
"]",
",",
"[",
"name",
"+",
"\"_reshape1_out\"",
"]",
")",
",",
"# mask output",
"make_node",
"(",
"\"Less\"",
",",
"[",
"name",
"+",
"\"_reshape0_out\"",
",",
"name",
"+",
"\"_reshape1_out\"",
"]",
",",
"[",
"name",
"+",
"\"_less_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Cast\"",
",",
"[",
"name",
"+",
"\"_less_out\"",
"]",
",",
"[",
"name",
"+",
"\"_mask\"",
"]",
",",
"to",
"=",
"dtype_t",
")",
",",
"make_node",
"(",
"\"Mul\"",
",",
"[",
"name",
"+",
"\"_softmax_out\"",
",",
"name",
"+",
"\"_mask\"",
"]",
",",
"[",
"name",
"+",
"\"_mul3_out\"",
"]",
")",
",",
"make_node",
"(",
"\"ReduceSum\"",
",",
"[",
"name",
"+",
"\"_mul3_out\"",
",",
"name",
"+",
"\"_axes\"",
"]",
",",
"[",
"name",
"+",
"\"_rsum1_out\"",
"]",
",",
"keepdims",
"=",
"1",
")",
",",
"make_node",
"(",
"\"Equal\"",
",",
"[",
"name",
"+",
"\"_rsum1_out\"",
",",
"name",
"+",
"\"_0_itype\"",
"]",
",",
"[",
"name",
"+",
"\"_equal1_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Where\"",
",",
"[",
"name",
"+",
"\"_equal1_out\"",
",",
"name",
"+",
"\"_1_itype\"",
",",
"name",
"+",
"\"_rsum1_out\"",
"]",
",",
"[",
"name",
"+",
"\"_where_out\"",
"]",
")",
",",
"make_node",
"(",
"\"Div\"",
",",
"[",
"name",
"+",
"\"_mul3_out\"",
",",
"name",
"+",
"\"_where_out\"",
"]",
",",
"[",
"name",
"]",
",",
"name",
"=",
"name",
")",
"]",
"return",
"nodes",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"use_length must be true when both data and length are paased in.\"",
")"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset13.py#L565-L681 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py | python | _Stream.read | (self, size=None) | return buf | Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF. | Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF. | [
"Return",
"the",
"next",
"size",
"number",
"of",
"bytes",
"from",
"the",
"stream",
".",
"If",
"size",
"is",
"not",
"defined",
"return",
"all",
"bytes",
"of",
"the",
"stream",
"up",
"to",
"EOF",
"."
] | def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf | [
"def",
"read",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"size",
"is",
"None",
":",
"t",
"=",
"[",
"]",
"while",
"True",
":",
"buf",
"=",
"self",
".",
"_read",
"(",
"self",
".",
"bufsize",
")",
"if",
"not",
"buf",
":",
"break",
"t",
".",
"append",
"(",
"buf",
")",
"buf",
"=",
"\"\"",
".",
"join",
"(",
"t",
")",
"else",
":",
"buf",
"=",
"self",
".",
"_read",
"(",
"size",
")",
"self",
".",
"pos",
"+=",
"len",
"(",
"buf",
")",
"return",
"buf"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L565-L581 | |
vesoft-inc/nebula | 25a06217ebaf169e1f0e5ff6a797ba6f0c41fc35 | .linters/cpp/cpplint.py | python | CheckForFunctionLengths | (filename, clean_lines, linenum,
function_state, error) | Reports for long function bodies.
For an overview why this is done, see:
https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found. | Reports for long function bodies. | [
"Reports",
"for",
"long",
"function",
"bodies",
"."
] | def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
if Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() | [
"def",
"CheckForFunctionLengths",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"function_state",
",",
"error",
")",
":",
"lines",
"=",
"clean_lines",
".",
"lines",
"line",
"=",
"lines",
"[",
"linenum",
"]",
"joined_line",
"=",
"''",
"starting_func",
"=",
"False",
"regexp",
"=",
"r'(\\w(\\w|::|\\*|\\&|\\s)*)\\('",
"# decls * & space::name( ...",
"match_result",
"=",
"Match",
"(",
"regexp",
",",
"line",
")",
"if",
"match_result",
":",
"# If the name is all caps and underscores, figure it's a macro and",
"# ignore it, unless it's TEST or TEST_F.",
"function_name",
"=",
"match_result",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
"if",
"function_name",
"==",
"'TEST'",
"or",
"function_name",
"==",
"'TEST_F'",
"or",
"(",
"not",
"Match",
"(",
"r'[A-Z_]+$'",
",",
"function_name",
")",
")",
":",
"starting_func",
"=",
"True",
"if",
"starting_func",
":",
"body_found",
"=",
"False",
"for",
"start_linenum",
"in",
"xrange",
"(",
"linenum",
",",
"clean_lines",
".",
"NumLines",
"(",
")",
")",
":",
"start_line",
"=",
"lines",
"[",
"start_linenum",
"]",
"joined_line",
"+=",
"' '",
"+",
"start_line",
".",
"lstrip",
"(",
")",
"if",
"Search",
"(",
"r'(;|})'",
",",
"start_line",
")",
":",
"# Declarations and trivial functions",
"body_found",
"=",
"True",
"break",
"# ... ignore",
"if",
"Search",
"(",
"r'{'",
",",
"start_line",
")",
":",
"body_found",
"=",
"True",
"function",
"=",
"Search",
"(",
"r'((\\w|:)*)\\('",
",",
"line",
")",
".",
"group",
"(",
"1",
")",
"if",
"Match",
"(",
"r'TEST'",
",",
"function",
")",
":",
"# Handle TEST... macros",
"parameter_regexp",
"=",
"Search",
"(",
"r'(\\(.*\\))'",
",",
"joined_line",
")",
"if",
"parameter_regexp",
":",
"# Ignore bad syntax",
"function",
"+=",
"parameter_regexp",
".",
"group",
"(",
"1",
")",
"else",
":",
"function",
"+=",
"'()'",
"function_state",
".",
"Begin",
"(",
"function",
")",
"break",
"if",
"not",
"body_found",
":",
"# No body for the function (or evidence of a non-function) was found.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/fn_size'",
",",
"5",
",",
"'Lint failed to find start of function body.'",
")",
"elif",
"Match",
"(",
"r'^\\}\\s*$'",
",",
"line",
")",
":",
"# function end",
"function_state",
".",
"Check",
"(",
"error",
",",
"filename",
",",
"linenum",
")",
"function_state",
".",
"End",
"(",
")",
"elif",
"not",
"Match",
"(",
"r'^\\s*$'",
",",
"line",
")",
":",
"function_state",
".",
"Count",
"(",
")"
] | https://github.com/vesoft-inc/nebula/blob/25a06217ebaf169e1f0e5ff6a797ba6f0c41fc35/.linters/cpp/cpplint.py#L3284-L3349 | ||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/all_reduce/python/all_reduce.py | python | _apply_unary_to_chunks | (f, chunks_by_dev) | return output | Apply a unary op to each tensor in chunks_by_dev, on same device.
Args:
f: a unary function over T @{tf.Tensor}.
chunks_by_dev: list of lists of T @{tf.Tensor}.
Returns:
new list of lists of T @{tf.Tensor} with the same structure as
chunks_by_dev containing the derived tensors. | Apply a unary op to each tensor in chunks_by_dev, on same device. | [
"Apply",
"a",
"unary",
"op",
"to",
"each",
"tensor",
"in",
"chunks_by_dev",
"on",
"same",
"device",
"."
] | def _apply_unary_to_chunks(f, chunks_by_dev):
"""Apply a unary op to each tensor in chunks_by_dev, on same device.
Args:
f: a unary function over T @{tf.Tensor}.
chunks_by_dev: list of lists of T @{tf.Tensor}.
Returns:
new list of lists of T @{tf.Tensor} with the same structure as
chunks_by_dev containing the derived tensors.
"""
output = []
for x in chunks_by_dev:
with ops.colocate_with(x[0]):
output.append([f(t) for t in x])
return output | [
"def",
"_apply_unary_to_chunks",
"(",
"f",
",",
"chunks_by_dev",
")",
":",
"output",
"=",
"[",
"]",
"for",
"x",
"in",
"chunks_by_dev",
":",
"with",
"ops",
".",
"colocate_with",
"(",
"x",
"[",
"0",
"]",
")",
":",
"output",
".",
"append",
"(",
"[",
"f",
"(",
"t",
")",
"for",
"t",
"in",
"x",
"]",
")",
"return",
"output"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/all_reduce/python/all_reduce.py#L359-L374 | |
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | deps/src/libxml2-2.9.1/python/libxml2class.py | python | URI.fragment | (self) | return ret | Get the fragment part from an URI | Get the fragment part from an URI | [
"Get",
"the",
"fragment",
"part",
"from",
"an",
"URI"
] | def fragment(self):
"""Get the fragment part from an URI """
ret = libxml2mod.xmlURIGetFragment(self._o)
return ret | [
"def",
"fragment",
"(",
"self",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlURIGetFragment",
"(",
"self",
".",
"_o",
")",
"return",
"ret"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2class.py#L6184-L6187 | |
opengauss-mirror/openGauss-server | e383f1b77720a00ddbe4c0655bc85914d9b02a2b | src/gausskernel/dbmind/tools/ai_manager/tools/common_tools.py | python | CommonTools.remote_mkdir_with_mode | (path, mode, ip, username, password) | Create directory with defined mode if not exist. | Create directory with defined mode if not exist. | [
"Create",
"directory",
"with",
"defined",
"mode",
"if",
"not",
"exist",
"."
] | def remote_mkdir_with_mode(path, mode, ip, username, password):
"""
Create directory with defined mode if not exist.
"""
cmd = Constant.SHELL_CMD_DICT['createDirSimple'] % (path, mode)
status, output = CommonTools.remote_execute_cmd(ip, username, password, cmd)
if status != 0 and 'exist' not in output:
raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'remote mkdir', output))
else:
return status, output | [
"def",
"remote_mkdir_with_mode",
"(",
"path",
",",
"mode",
",",
"ip",
",",
"username",
",",
"password",
")",
":",
"cmd",
"=",
"Constant",
".",
"SHELL_CMD_DICT",
"[",
"'createDirSimple'",
"]",
"%",
"(",
"path",
",",
"mode",
")",
"status",
",",
"output",
"=",
"CommonTools",
".",
"remote_execute_cmd",
"(",
"ip",
",",
"username",
",",
"password",
",",
"cmd",
")",
"if",
"status",
"!=",
"0",
"and",
"'exist'",
"not",
"in",
"output",
":",
"raise",
"Exception",
"(",
"Errors",
".",
"EXECUTE_RESULT",
"[",
"'gauss_0401'",
"]",
"%",
"(",
"cmd",
",",
"'remote mkdir'",
",",
"output",
")",
")",
"else",
":",
"return",
"status",
",",
"output"
] | https://github.com/opengauss-mirror/openGauss-server/blob/e383f1b77720a00ddbe4c0655bc85914d9b02a2b/src/gausskernel/dbmind/tools/ai_manager/tools/common_tools.py#L112-L121 | ||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/framework/ops.py | python | Operation._id | (self) | return self._id_value | The unique integer id of this operation. | The unique integer id of this operation. | [
"The",
"unique",
"integer",
"id",
"of",
"this",
"operation",
"."
] | def _id(self):
"""The unique integer id of this operation."""
return self._id_value | [
"def",
"_id",
"(",
"self",
")",
":",
"return",
"self",
".",
"_id_value"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/framework/ops.py#L1658-L1660 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/botocore/regions.py | python | BaseEndpointResolver.get_available_partitions | (self) | Lists the partitions available to the endpoint resolver.
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"]). | Lists the partitions available to the endpoint resolver. | [
"Lists",
"the",
"partitions",
"available",
"to",
"the",
"endpoint",
"resolver",
"."
] | def get_available_partitions(self):
"""Lists the partitions available to the endpoint resolver.
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"]).
"""
raise NotImplementedError | [
"def",
"get_available_partitions",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/botocore/regions.py#L60-L65 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/_pydecimal.py | python | _log10_lb | (c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}) | return 100*len(str_c) - correction[str_c[0]] | Compute a lower bound for 100*log10(c) for a positive integer c. | Compute a lower bound for 100*log10(c) for a positive integer c. | [
"Compute",
"a",
"lower",
"bound",
"for",
"100",
"*",
"log10",
"(",
"c",
")",
"for",
"a",
"positive",
"integer",
"c",
"."
] | def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]] | [
"def",
"_log10_lb",
"(",
"c",
",",
"correction",
"=",
"{",
"'1'",
":",
"100",
",",
"'2'",
":",
"70",
",",
"'3'",
":",
"53",
",",
"'4'",
":",
"40",
",",
"'5'",
":",
"31",
",",
"'6'",
":",
"23",
",",
"'7'",
":",
"16",
",",
"'8'",
":",
"10",
",",
"'9'",
":",
"5",
"}",
")",
":",
"if",
"c",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"The argument to _log10_lb should be nonnegative.\"",
")",
"str_c",
"=",
"str",
"(",
"c",
")",
"return",
"100",
"*",
"len",
"(",
"str_c",
")",
"-",
"correction",
"[",
"str_c",
"[",
"0",
"]",
"]"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/_pydecimal.py#L6002-L6009 | |
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/python/debug/cli/curses_ui.py | python | CursesUI.run_ui | (self,
init_command=None,
title=None,
title_color=None,
enable_mouse_on_start=True) | return exit_token | Run the CLI: See the doc of base_ui.BaseUI.run_ui for more details. | Run the CLI: See the doc of base_ui.BaseUI.run_ui for more details. | [
"Run",
"the",
"CLI",
":",
"See",
"the",
"doc",
"of",
"base_ui",
".",
"BaseUI",
".",
"run_ui",
"for",
"more",
"details",
"."
] | def run_ui(self,
init_command=None,
title=None,
title_color=None,
enable_mouse_on_start=True):
"""Run the CLI: See the doc of base_ui.BaseUI.run_ui for more details."""
# Only one instance of the Curses UI can be running at a time, since
# otherwise they would try to both read from the same keystrokes, and write
# to the same screen.
self._single_instance_lock.acquire()
self._screen_launch(enable_mouse_on_start=enable_mouse_on_start)
# Optional initial command.
if init_command is not None:
self._dispatch_command(init_command)
if title is not None:
self._title(title, title_color=title_color)
# CLI main loop.
exit_token = self._ui_loop()
if self._on_ui_exit:
self._on_ui_exit()
self._screen_terminate()
self._single_instance_lock.release()
return exit_token | [
"def",
"run_ui",
"(",
"self",
",",
"init_command",
"=",
"None",
",",
"title",
"=",
"None",
",",
"title_color",
"=",
"None",
",",
"enable_mouse_on_start",
"=",
"True",
")",
":",
"# Only one instance of the Curses UI can be running at a time, since",
"# otherwise they would try to both read from the same keystrokes, and write",
"# to the same screen.",
"self",
".",
"_single_instance_lock",
".",
"acquire",
"(",
")",
"self",
".",
"_screen_launch",
"(",
"enable_mouse_on_start",
"=",
"enable_mouse_on_start",
")",
"# Optional initial command.",
"if",
"init_command",
"is",
"not",
"None",
":",
"self",
".",
"_dispatch_command",
"(",
"init_command",
")",
"if",
"title",
"is",
"not",
"None",
":",
"self",
".",
"_title",
"(",
"title",
",",
"title_color",
"=",
"title_color",
")",
"# CLI main loop.",
"exit_token",
"=",
"self",
".",
"_ui_loop",
"(",
")",
"if",
"self",
".",
"_on_ui_exit",
":",
"self",
".",
"_on_ui_exit",
"(",
")",
"self",
".",
"_screen_terminate",
"(",
")",
"self",
".",
"_single_instance_lock",
".",
"release",
"(",
")",
"return",
"exit_token"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/debug/cli/curses_ui.py#L480-L511 | |
herbstluftwm/herbstluftwm | 23ef0274bd4d317208eae5fea72b21478a71431b | doc/format-doc.py | python | ObjectDocPrinter.run | (self, clsname, path=[]) | print the documentation for a given class. However,
if the documentation for it has already been generated,
only insert a link to ot using clsname2anchor | print the documentation for a given class. However,
if the documentation for it has already been generated,
only insert a link to ot using clsname2anchor | [
"print",
"the",
"documentation",
"for",
"a",
"given",
"class",
".",
"However",
"if",
"the",
"documentation",
"for",
"it",
"has",
"already",
"been",
"generated",
"only",
"insert",
"a",
"link",
"to",
"ot",
"using",
"clsname2anchor"
] | def run(self, clsname, path=[]):
"""print the documentation for a given class. However,
if the documentation for it has already been generated,
only insert a link to ot using clsname2anchor
"""
reference_cls_doc = self.reference_to_class_doc(clsname, path)
if reference_cls_doc is not None:
identifier, text = reference_cls_doc
print(f'For attributes and children, see <<{identifier},{text}>>')
return
# otherwise, print it here:
identifier = self.class_doc_id(clsname)
depth = len(path)
objdoc = self.jsondoc['objects'][clsname]
print(f'[[{identifier}]]', end='' if depth > 1 else '\n')
if 'doc' in objdoc:
doc_txt = cpp_source_doc_to_asciidoc(objdoc['doc'], depth=depth)
if depth > 1:
print(multiline_for_bulletitem(doc_txt))
else:
print(doc_txt)
print('')
if path == []:
bulletprefix = ''
ws_prefix = ''
else:
bulletprefix = depth * ' ' + (depth - 1) * '*'
ws_prefix = depth * ' ' + ' ' # whitespace prefix
for _, attr in objdoc['attributes'].items():
if attr['default_value'] is not None:
default_val = ' [defaultvalue]#= ' + escape_string_value(attr['default_value']) + '#'
else:
default_val = ''
if attr.get('doc', None) is not None:
docstr = ': ' + cpp_source_doc_to_asciidoc(attr['doc'], depth=(depth + 1))
else:
docstr = ''
# add multiple formats to the entry name such that the colors work
# both in html and in the man page output
print('')
print(f"{ws_prefix}{bulletprefix}* '[datatype]#{attr['type']}#' *+[entryname]#{attr['name']}#+*{default_val}{docstr}")
for _, child in objdoc['children'].items():
docstr = cpp_source_doc_to_asciidoc(child['doc'].strip(), depth=(depth + 1)) \
if 'doc' in child else ''
# class_doc = self.jsondoc['objects'][child['type']].get('doc', '')
if len(docstr) > 0:
if not docstr.endswith('.'):
docstr += '.'
docstr += ' '
if depth > 0:
# add multiple format indicators, as for the
# attribute name above
if child['name'] is not None:
itemname = f"*+[entryname]#{child['name']}#+*"
else:
itemname = f"'[entryname]#{child['name_pattern']}#'"
bullet = '*'
else:
itemname = f"{child['name']}"
bullet = '\n==='
if depth == 0 and self.class_doc_empty(child['type']):
# do not list subsystems that are entirely empty
# at the moment
continue
if child['type'] not in self.abstractclass:
print('')
print(f"{ws_prefix}{bulletprefix}{bullet} {itemname}: {docstr}", end='')
self.run(child['type'], path=path + [child['name']])
else:
for _, subclass in self.jsondoc['objects'].items():
if child['type'] in subclass['inherits-from']:
classname = splitcamelcase(subclass['classname'])
print(f"{ws_prefix}{bulletprefix}{bullet} {itemname} can be a {classname}. {docstr} ", end='')
self.run(subclass['classname'], path=path + [child['name']]) | [
"def",
"run",
"(",
"self",
",",
"clsname",
",",
"path",
"=",
"[",
"]",
")",
":",
"reference_cls_doc",
"=",
"self",
".",
"reference_to_class_doc",
"(",
"clsname",
",",
"path",
")",
"if",
"reference_cls_doc",
"is",
"not",
"None",
":",
"identifier",
",",
"text",
"=",
"reference_cls_doc",
"print",
"(",
"f'For attributes and children, see <<{identifier},{text}>>'",
")",
"return",
"# otherwise, print it here:",
"identifier",
"=",
"self",
".",
"class_doc_id",
"(",
"clsname",
")",
"depth",
"=",
"len",
"(",
"path",
")",
"objdoc",
"=",
"self",
".",
"jsondoc",
"[",
"'objects'",
"]",
"[",
"clsname",
"]",
"print",
"(",
"f'[[{identifier}]]'",
",",
"end",
"=",
"''",
"if",
"depth",
">",
"1",
"else",
"'\\n'",
")",
"if",
"'doc'",
"in",
"objdoc",
":",
"doc_txt",
"=",
"cpp_source_doc_to_asciidoc",
"(",
"objdoc",
"[",
"'doc'",
"]",
",",
"depth",
"=",
"depth",
")",
"if",
"depth",
">",
"1",
":",
"print",
"(",
"multiline_for_bulletitem",
"(",
"doc_txt",
")",
")",
"else",
":",
"print",
"(",
"doc_txt",
")",
"print",
"(",
"''",
")",
"if",
"path",
"==",
"[",
"]",
":",
"bulletprefix",
"=",
"''",
"ws_prefix",
"=",
"''",
"else",
":",
"bulletprefix",
"=",
"depth",
"*",
"' '",
"+",
"(",
"depth",
"-",
"1",
")",
"*",
"'*'",
"ws_prefix",
"=",
"depth",
"*",
"' '",
"+",
"' '",
"# whitespace prefix",
"for",
"_",
",",
"attr",
"in",
"objdoc",
"[",
"'attributes'",
"]",
".",
"items",
"(",
")",
":",
"if",
"attr",
"[",
"'default_value'",
"]",
"is",
"not",
"None",
":",
"default_val",
"=",
"' [defaultvalue]#= '",
"+",
"escape_string_value",
"(",
"attr",
"[",
"'default_value'",
"]",
")",
"+",
"'#'",
"else",
":",
"default_val",
"=",
"''",
"if",
"attr",
".",
"get",
"(",
"'doc'",
",",
"None",
")",
"is",
"not",
"None",
":",
"docstr",
"=",
"': '",
"+",
"cpp_source_doc_to_asciidoc",
"(",
"attr",
"[",
"'doc'",
"]",
",",
"depth",
"=",
"(",
"depth",
"+",
"1",
")",
")",
"else",
":",
"docstr",
"=",
"''",
"# add multiple formats to the entry name such that the colors work",
"# both in html and in the man page output",
"print",
"(",
"''",
")",
"print",
"(",
"f\"{ws_prefix}{bulletprefix}* '[datatype]#{attr['type']}#' *+[entryname]#{attr['name']}#+*{default_val}{docstr}\"",
")",
"for",
"_",
",",
"child",
"in",
"objdoc",
"[",
"'children'",
"]",
".",
"items",
"(",
")",
":",
"docstr",
"=",
"cpp_source_doc_to_asciidoc",
"(",
"child",
"[",
"'doc'",
"]",
".",
"strip",
"(",
")",
",",
"depth",
"=",
"(",
"depth",
"+",
"1",
")",
")",
"if",
"'doc'",
"in",
"child",
"else",
"''",
"# class_doc = self.jsondoc['objects'][child['type']].get('doc', '')",
"if",
"len",
"(",
"docstr",
")",
">",
"0",
":",
"if",
"not",
"docstr",
".",
"endswith",
"(",
"'.'",
")",
":",
"docstr",
"+=",
"'.'",
"docstr",
"+=",
"' '",
"if",
"depth",
">",
"0",
":",
"# add multiple format indicators, as for the",
"# attribute name above",
"if",
"child",
"[",
"'name'",
"]",
"is",
"not",
"None",
":",
"itemname",
"=",
"f\"*+[entryname]#{child['name']}#+*\"",
"else",
":",
"itemname",
"=",
"f\"'[entryname]#{child['name_pattern']}#'\"",
"bullet",
"=",
"'*'",
"else",
":",
"itemname",
"=",
"f\"{child['name']}\"",
"bullet",
"=",
"'\\n==='",
"if",
"depth",
"==",
"0",
"and",
"self",
".",
"class_doc_empty",
"(",
"child",
"[",
"'type'",
"]",
")",
":",
"# do not list subsystems that are entirely empty",
"# at the moment",
"continue",
"if",
"child",
"[",
"'type'",
"]",
"not",
"in",
"self",
".",
"abstractclass",
":",
"print",
"(",
"''",
")",
"print",
"(",
"f\"{ws_prefix}{bulletprefix}{bullet} {itemname}: {docstr}\"",
",",
"end",
"=",
"''",
")",
"self",
".",
"run",
"(",
"child",
"[",
"'type'",
"]",
",",
"path",
"=",
"path",
"+",
"[",
"child",
"[",
"'name'",
"]",
"]",
")",
"else",
":",
"for",
"_",
",",
"subclass",
"in",
"self",
".",
"jsondoc",
"[",
"'objects'",
"]",
".",
"items",
"(",
")",
":",
"if",
"child",
"[",
"'type'",
"]",
"in",
"subclass",
"[",
"'inherits-from'",
"]",
":",
"classname",
"=",
"splitcamelcase",
"(",
"subclass",
"[",
"'classname'",
"]",
")",
"print",
"(",
"f\"{ws_prefix}{bulletprefix}{bullet} {itemname} can be a {classname}. {docstr} \"",
",",
"end",
"=",
"''",
")",
"self",
".",
"run",
"(",
"subclass",
"[",
"'classname'",
"]",
",",
"path",
"=",
"path",
"+",
"[",
"child",
"[",
"'name'",
"]",
"]",
")"
] | https://github.com/herbstluftwm/herbstluftwm/blob/23ef0274bd4d317208eae5fea72b21478a71431b/doc/format-doc.py#L195-L269 | ||
alibaba/weex_js_engine | 2bdf4b6f020c1fc99c63f649718f6faf7e27fdde | jni/v8core/v8/build/gyp/pylib/gyp/generator/make.py | python | EscapeMakeVariableExpansion | (s) | return s.replace('$', '$$') | Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally. | Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally. | [
"Make",
"has",
"its",
"own",
"variable",
"expansion",
"syntax",
"using",
"$",
".",
"We",
"must",
"escape",
"it",
"for",
"string",
"to",
"be",
"interpreted",
"literally",
"."
] | def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$') | [
"def",
"EscapeMakeVariableExpansion",
"(",
"s",
")",
":",
"return",
"s",
".",
"replace",
"(",
"'$'",
",",
"'$$'",
")"
] | https://github.com/alibaba/weex_js_engine/blob/2bdf4b6f020c1fc99c63f649718f6faf7e27fdde/jni/v8core/v8/build/gyp/pylib/gyp/generator/make.py#L579-L582 | |
NeoGeographyToolkit/StereoPipeline | eedf54a919fb5cce1ab0e280bb0df4050763aa11 | src/asp/Python/asp_system_utils.py | python | get_num_cpus | () | return num_cpus | Return the number of CPUs on the current machine. | Return the number of CPUs on the current machine. | [
"Return",
"the",
"number",
"of",
"CPUs",
"on",
"the",
"current",
"machine",
"."
] | def get_num_cpus():
"""Return the number of CPUs on the current machine."""
import sys
if sys.version_info < (2, 6, 0):
num_cpus = 8
else:
from multiprocessing import cpu_count
num_cpus = cpu_count()
return num_cpus | [
"def",
"get_num_cpus",
"(",
")",
":",
"import",
"sys",
"if",
"sys",
".",
"version_info",
"<",
"(",
"2",
",",
"6",
",",
"0",
")",
":",
"num_cpus",
"=",
"8",
"else",
":",
"from",
"multiprocessing",
"import",
"cpu_count",
"num_cpus",
"=",
"cpu_count",
"(",
")",
"return",
"num_cpus"
] | https://github.com/NeoGeographyToolkit/StereoPipeline/blob/eedf54a919fb5cce1ab0e280bb0df4050763aa11/src/asp/Python/asp_system_utils.py#L60-L70 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/prompt-toolkit/py3/prompt_toolkit/input/win32_pipe.py | python | Win32PipeInput.close | (self) | Close pipe handles. | Close pipe handles. | [
"Close",
"pipe",
"handles",
"."
] | def close(self) -> None:
"Close pipe handles."
windll.kernel32.CloseHandle(self._event)
self._closed = True | [
"def",
"close",
"(",
"self",
")",
"->",
"None",
":",
"windll",
".",
"kernel32",
".",
"CloseHandle",
"(",
"self",
".",
"_event",
")",
"self",
".",
"_closed",
"=",
"True"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py3/prompt_toolkit/input/win32_pipe.py#L126-L129 | ||
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/symtable.py | python | SymbolTable.has_exec | (self) | return bool(self._table.optimized & (OPT_EXEC | OPT_BARE_EXEC)) | Return true if the scope uses exec | Return true if the scope uses exec | [
"Return",
"true",
"if",
"the",
"scope",
"uses",
"exec"
] | def has_exec(self):
"""Return true if the scope uses exec"""
return bool(self._table.optimized & (OPT_EXEC | OPT_BARE_EXEC)) | [
"def",
"has_exec",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"_table",
".",
"optimized",
"&",
"(",
"OPT_EXEC",
"|",
"OPT_BARE_EXEC",
")",
")"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/symtable.py#L90-L92 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/grid.py | python | GridTableBase._setOORInfo | (*args, **kwargs) | return _grid.GridTableBase__setOORInfo(*args, **kwargs) | _setOORInfo(self, PyObject _self) | _setOORInfo(self, PyObject _self) | [
"_setOORInfo",
"(",
"self",
"PyObject",
"_self",
")"
] | def _setOORInfo(*args, **kwargs):
"""_setOORInfo(self, PyObject _self)"""
return _grid.GridTableBase__setOORInfo(*args, **kwargs) | [
"def",
"_setOORInfo",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_grid",
".",
"GridTableBase__setOORInfo",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/grid.py#L770-L772 | |
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/external/bazel_tools/third_party/py/concurrent/futures/_base.py | python | Future.set_exception | (self, exception) | Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests. | Sets the result of the future as being the given exception. | [
"Sets",
"the",
"result",
"of",
"the",
"future",
"as",
"being",
"the",
"given",
"exception",
"."
] | def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks() | [
"def",
"set_exception",
"(",
"self",
",",
"exception",
")",
":",
"with",
"self",
".",
"_condition",
":",
"self",
".",
"_exception",
"=",
"exception",
"self",
".",
"_state",
"=",
"FINISHED",
"for",
"waiter",
"in",
"self",
".",
"_waiters",
":",
"waiter",
".",
"add_exception",
"(",
"self",
")",
"self",
".",
"_condition",
".",
"notify_all",
"(",
")",
"self",
".",
"_invoke_callbacks",
"(",
")"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/external/bazel_tools/third_party/py/concurrent/futures/_base.py#L496-L507 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_core.py | python | Rect2D.MoveRightTopTo | (*args, **kwargs) | return _core_.Rect2D_MoveRightTopTo(*args, **kwargs) | MoveRightTopTo(self, Point2D pt) | MoveRightTopTo(self, Point2D pt) | [
"MoveRightTopTo",
"(",
"self",
"Point2D",
"pt",
")"
] | def MoveRightTopTo(*args, **kwargs):
"""MoveRightTopTo(self, Point2D pt)"""
return _core_.Rect2D_MoveRightTopTo(*args, **kwargs) | [
"def",
"MoveRightTopTo",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Rect2D_MoveRightTopTo",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L1935-L1937 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_gdi.py | python | RegionIterator.GetRect | (*args, **kwargs) | return _gdi_.RegionIterator_GetRect(*args, **kwargs) | GetRect(self) -> Rect | GetRect(self) -> Rect | [
"GetRect",
"(",
"self",
")",
"-",
">",
"Rect"
] | def GetRect(*args, **kwargs):
"""GetRect(self) -> Rect"""
return _gdi_.RegionIterator_GetRect(*args, **kwargs) | [
"def",
"GetRect",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"RegionIterator_GetRect",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L1694-L1696 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py2/scipy/optimize/_remove_redundancy.py | python | _get_densest | (A, eligibleRows) | return np.argmax(rowCounts * eligibleRows) | Returns the index of the densest row of A. Ignores rows that are not
eligible for consideration.
Parameters
----------
A : 2-D array
An array representing a matrix
eligibleRows : 1-D logical array
Values indicate whether the corresponding row of A is eligible
to be considered
Returns
-------
i_densest : int
Index of the densest row in A eligible for consideration | Returns the index of the densest row of A. Ignores rows that are not
eligible for consideration. | [
"Returns",
"the",
"index",
"of",
"the",
"densest",
"row",
"of",
"A",
".",
"Ignores",
"rows",
"that",
"are",
"not",
"eligible",
"for",
"consideration",
"."
] | def _get_densest(A, eligibleRows):
"""
Returns the index of the densest row of A. Ignores rows that are not
eligible for consideration.
Parameters
----------
A : 2-D array
An array representing a matrix
eligibleRows : 1-D logical array
Values indicate whether the corresponding row of A is eligible
to be considered
Returns
-------
i_densest : int
Index of the densest row in A eligible for consideration
"""
rowCounts = _row_count(A)
return np.argmax(rowCounts * eligibleRows) | [
"def",
"_get_densest",
"(",
"A",
",",
"eligibleRows",
")",
":",
"rowCounts",
"=",
"_row_count",
"(",
"A",
")",
"return",
"np",
".",
"argmax",
"(",
"rowCounts",
"*",
"eligibleRows",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/optimize/_remove_redundancy.py#L34-L54 | |
baidu/lac | 3e10dbed9bfd87bea927c84a6627a167c17b5617 | python/LAC/_compat.py | python | strdecode | (sentence) | return sentence | string to unicode
Args:
sentence: a string of utf-8 or gbk
Returns:
input's unicode result | string to unicode | [
"string",
"to",
"unicode"
] | def strdecode(sentence):
"""string to unicode
Args:
sentence: a string of utf-8 or gbk
Returns:
input's unicode result
"""
if not isinstance(sentence, text_type):
try:
sentence = sentence.decode('utf-8')
except UnicodeDecodeError:
sentence = sentence.decode('gbk', 'ignore')
return sentence | [
"def",
"strdecode",
"(",
"sentence",
")",
":",
"if",
"not",
"isinstance",
"(",
"sentence",
",",
"text_type",
")",
":",
"try",
":",
"sentence",
"=",
"sentence",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
":",
"sentence",
"=",
"sentence",
".",
"decode",
"(",
"'gbk'",
",",
"'ignore'",
")",
"return",
"sentence"
] | https://github.com/baidu/lac/blob/3e10dbed9bfd87bea927c84a6627a167c17b5617/python/LAC/_compat.py#L51-L66 | |
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py | python | xmlNode.nextElementSibling | (self) | return __tmp | Finds the first closest next sibling of the node which is
an element node. Note the handling of entities references
is different than in the W3C DOM element traversal spec
since we don't have back reference from entities content to
entities references. | Finds the first closest next sibling of the node which is
an element node. Note the handling of entities references
is different than in the W3C DOM element traversal spec
since we don't have back reference from entities content to
entities references. | [
"Finds",
"the",
"first",
"closest",
"next",
"sibling",
"of",
"the",
"node",
"which",
"is",
"an",
"element",
"node",
".",
"Note",
"the",
"handling",
"of",
"entities",
"references",
"is",
"different",
"than",
"in",
"the",
"W3C",
"DOM",
"element",
"traversal",
"spec",
"since",
"we",
"don",
"t",
"have",
"back",
"reference",
"from",
"entities",
"content",
"to",
"entities",
"references",
"."
] | def nextElementSibling(self):
"""Finds the first closest next sibling of the node which is
an element node. Note the handling of entities references
is different than in the W3C DOM element traversal spec
since we don't have back reference from entities content to
entities references. """
ret = libxml2mod.xmlNextElementSibling(self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp | [
"def",
"nextElementSibling",
"(",
"self",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlNextElementSibling",
"(",
"self",
".",
"_o",
")",
"if",
"ret",
"is",
"None",
":",
"return",
"None",
"__tmp",
"=",
"xmlNode",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L3423-L3432 | |
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | third_party/protobuf/python/google/protobuf/internal/decoder.py | python | ReadTag | (buffer, pos) | return (buffer[start:pos], pos) | Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python. | Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple. | [
"Read",
"a",
"tag",
"from",
"the",
"buffer",
"and",
"return",
"a",
"(",
"tag_bytes",
"new_pos",
")",
"tuple",
"."
] | def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos) | [
"def",
"ReadTag",
"(",
"buffer",
",",
"pos",
")",
":",
"start",
"=",
"pos",
"while",
"ord",
"(",
"buffer",
"[",
"pos",
"]",
")",
"&",
"0x80",
":",
"pos",
"+=",
"1",
"pos",
"+=",
"1",
"return",
"(",
"buffer",
"[",
"start",
":",
"pos",
"]",
",",
"pos",
")"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/third_party/protobuf/python/google/protobuf/internal/decoder.py#L160-L175 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py2/scipy/linalg/_interpolative_backend.py | python | idd_diffsnorm | (m, n, matvect, matvect2, matvec, matvec2, its=20) | return _id.idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its) | Estimate spectral norm of the difference of two real matrices by the
randomized power method.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the transpose of the first matrix to a vector, with
call signature `y = matvect(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matvect: function
:param matvect2:
Function to apply the transpose of the second matrix to a vector, with
call signature `y = matvect2(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matvect2: function
:param matvec:
Function to apply the first matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param matvec2:
Function to apply the second matrix to a vector, with call signature
`y = matvec2(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec2: function
:param its:
Number of power method iterations.
:type its: int
:return:
Spectral norm estimate of matrix difference.
:rtype: float | Estimate spectral norm of the difference of two real matrices by the
randomized power method. | [
"Estimate",
"spectral",
"norm",
"of",
"the",
"difference",
"of",
"two",
"real",
"matrices",
"by",
"the",
"randomized",
"power",
"method",
"."
] | def idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its=20):
"""
Estimate spectral norm of the difference of two real matrices by the
randomized power method.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the transpose of the first matrix to a vector, with
call signature `y = matvect(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matvect: function
:param matvect2:
Function to apply the transpose of the second matrix to a vector, with
call signature `y = matvect2(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matvect2: function
:param matvec:
Function to apply the first matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param matvec2:
Function to apply the second matrix to a vector, with call signature
`y = matvec2(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec2: function
:param its:
Number of power method iterations.
:type its: int
:return:
Spectral norm estimate of matrix difference.
:rtype: float
"""
return _id.idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its) | [
"def",
"idd_diffsnorm",
"(",
"m",
",",
"n",
",",
"matvect",
",",
"matvect2",
",",
"matvec",
",",
"matvec2",
",",
"its",
"=",
"20",
")",
":",
"return",
"_id",
".",
"idd_diffsnorm",
"(",
"m",
",",
"n",
",",
"matvect",
",",
"matvect2",
",",
"matvec",
",",
"matvec2",
",",
"its",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/linalg/_interpolative_backend.py#L374-L413 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/frame.py | python | DataFrame._box_col_values | (self, values, items) | return klass(values, index=self.index, name=items, fastpath=True) | Provide boxed values for a column. | Provide boxed values for a column. | [
"Provide",
"boxed",
"values",
"for",
"a",
"column",
"."
] | def _box_col_values(self, values, items):
"""
Provide boxed values for a column.
"""
klass = self._constructor_sliced
return klass(values, index=self.index, name=items, fastpath=True) | [
"def",
"_box_col_values",
"(",
"self",
",",
"values",
",",
"items",
")",
":",
"klass",
"=",
"self",
".",
"_constructor_sliced",
"return",
"klass",
"(",
"values",
",",
"index",
"=",
"self",
".",
"index",
",",
"name",
"=",
"items",
",",
"fastpath",
"=",
"True",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/frame.py#L3073-L3078 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/docutils/parsers/rst/states.py | python | Body.is_enumerated_list_item | (self, ordinal, sequence, format) | return None | Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator. | Check validity based on the ordinal value and the second line. | [
"Check",
"validity",
"based",
"on",
"the",
"ordinal",
"value",
"and",
"the",
"second",
"line",
"."
] | def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None | [
"def",
"is_enumerated_list_item",
"(",
"self",
",",
"ordinal",
",",
"sequence",
",",
"format",
")",
":",
"if",
"ordinal",
"is",
"None",
":",
"return",
"None",
"try",
":",
"next_line",
"=",
"self",
".",
"state_machine",
".",
"next_line",
"(",
")",
"except",
"EOFError",
":",
"# end of input lines",
"self",
".",
"state_machine",
".",
"previous_line",
"(",
")",
"return",
"1",
"else",
":",
"self",
".",
"state_machine",
".",
"previous_line",
"(",
")",
"if",
"not",
"next_line",
"[",
":",
"1",
"]",
".",
"strip",
"(",
")",
":",
"# blank or indented",
"return",
"1",
"result",
"=",
"self",
".",
"make_enumerator",
"(",
"ordinal",
"+",
"1",
",",
"sequence",
",",
"format",
")",
"if",
"result",
":",
"next_enumerator",
",",
"auto_enumerator",
"=",
"result",
"try",
":",
"if",
"(",
"next_line",
".",
"startswith",
"(",
"next_enumerator",
")",
"or",
"next_line",
".",
"startswith",
"(",
"auto_enumerator",
")",
")",
":",
"return",
"1",
"except",
"TypeError",
":",
"pass",
"return",
"None"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/docutils/parsers/rst/states.py#L1368-L1395 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/summary/summary.py | python | histogram | (name, values, collections=None, family=None) | return val | Outputs a `Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize your data's
distribution in TensorBoard. You can see a detailed explanation of the
TensorBoard histogram dashboard
[here](https://www.tensorflow.org/get_started/tensorboard_histograms).
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer. | Outputs a `Summary` protocol buffer with a histogram. | [
"Outputs",
"a",
"Summary",
"protocol",
"buffer",
"with",
"a",
"histogram",
"."
] | def histogram(name, values, collections=None, family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize your data's
distribution in TensorBoard. You can see a detailed explanation of the
TensorBoard histogram dashboard
[here](https://www.tensorflow.org/get_started/tensorboard_histograms).
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _distribute_summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[values],
default_name='HistogramSummary') as (tag, scope):
val = _gen_logging_ops.histogram_summary(
tag=tag, values=values, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val | [
"def",
"histogram",
"(",
"name",
",",
"values",
",",
"collections",
"=",
"None",
",",
"family",
"=",
"None",
")",
":",
"# pylint: disable=line-too-long",
"if",
"_distribute_summary_op_util",
".",
"skip_summary",
"(",
")",
":",
"return",
"_constant_op",
".",
"constant",
"(",
"''",
")",
"with",
"_summary_op_util",
".",
"summary_scope",
"(",
"name",
",",
"family",
",",
"values",
"=",
"[",
"values",
"]",
",",
"default_name",
"=",
"'HistogramSummary'",
")",
"as",
"(",
"tag",
",",
"scope",
")",
":",
"val",
"=",
"_gen_logging_ops",
".",
"histogram_summary",
"(",
"tag",
"=",
"tag",
",",
"values",
"=",
"values",
",",
"name",
"=",
"scope",
")",
"_summary_op_util",
".",
"collect",
"(",
"val",
",",
"collections",
",",
"[",
"_ops",
".",
"GraphKeys",
".",
"SUMMARIES",
"]",
")",
"return",
"val"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/summary/summary.py#L144-L181 | |
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/numpy/multiarray.py | python | _as_mx_np_array | (object, device=None, zero_copy=False) | Convert arrays or any array member of container to mxnet.numpy.ndarray on device. | Convert arrays or any array member of container to mxnet.numpy.ndarray on device. | [
"Convert",
"arrays",
"or",
"any",
"array",
"member",
"of",
"container",
"to",
"mxnet",
".",
"numpy",
".",
"ndarray",
"on",
"device",
"."
] | def _as_mx_np_array(object, device=None, zero_copy=False):
"""Convert arrays or any array member of container to mxnet.numpy.ndarray on device."""
if object is None or isinstance(object, ndarray):
return object
elif isinstance(object, _np.ndarray):
from_numpy = ndarray_from_numpy(ndarray, array)
return from_numpy(object, zero_copy and object.flags['C_CONTIGUOUS'])
elif isinstance(object, (integer_types, numeric_types)):
return object
elif isinstance(object, (_np.bool_, _np.bool)):
return array(object, dtype=_np.bool_, device=device)
elif isinstance(object, (list, tuple)):
tmp = [_as_mx_np_array(arr, device=device, zero_copy=zero_copy) for arr in object]
return object.__class__(tmp)
else:
raise TypeError('Does not support converting {} to mx.np.ndarray.'.format(str(type(object)))) | [
"def",
"_as_mx_np_array",
"(",
"object",
",",
"device",
"=",
"None",
",",
"zero_copy",
"=",
"False",
")",
":",
"if",
"object",
"is",
"None",
"or",
"isinstance",
"(",
"object",
",",
"ndarray",
")",
":",
"return",
"object",
"elif",
"isinstance",
"(",
"object",
",",
"_np",
".",
"ndarray",
")",
":",
"from_numpy",
"=",
"ndarray_from_numpy",
"(",
"ndarray",
",",
"array",
")",
"return",
"from_numpy",
"(",
"object",
",",
"zero_copy",
"and",
"object",
".",
"flags",
"[",
"'C_CONTIGUOUS'",
"]",
")",
"elif",
"isinstance",
"(",
"object",
",",
"(",
"integer_types",
",",
"numeric_types",
")",
")",
":",
"return",
"object",
"elif",
"isinstance",
"(",
"object",
",",
"(",
"_np",
".",
"bool_",
",",
"_np",
".",
"bool",
")",
")",
":",
"return",
"array",
"(",
"object",
",",
"dtype",
"=",
"_np",
".",
"bool_",
",",
"device",
"=",
"device",
")",
"elif",
"isinstance",
"(",
"object",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"tmp",
"=",
"[",
"_as_mx_np_array",
"(",
"arr",
",",
"device",
"=",
"device",
",",
"zero_copy",
"=",
"zero_copy",
")",
"for",
"arr",
"in",
"object",
"]",
"return",
"object",
".",
"__class__",
"(",
"tmp",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Does not support converting {} to mx.np.ndarray.'",
".",
"format",
"(",
"str",
"(",
"type",
"(",
"object",
")",
")",
")",
")"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/numpy/multiarray.py#L190-L205 | ||
rbgirshick/caffe-fast-rcnn | 28a579eaf0668850705598b3075b8969f22226d9 | scripts/cpp_lint.py | python | PrintUsage | (message) | Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message. | Prints a brief usage string and exits, optionally with an error message. | [
"Prints",
"a",
"brief",
"usage",
"string",
"and",
"exits",
"optionally",
"with",
"an",
"error",
"message",
"."
] | def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1) | [
"def",
"PrintUsage",
"(",
"message",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"_USAGE",
")",
"if",
"message",
":",
"sys",
".",
"exit",
"(",
"'\\nFATAL ERROR: '",
"+",
"message",
")",
"else",
":",
"sys",
".",
"exit",
"(",
"1",
")"
] | https://github.com/rbgirshick/caffe-fast-rcnn/blob/28a579eaf0668850705598b3075b8969f22226d9/scripts/cpp_lint.py#L4757-L4767 | ||
baidu-research/persistent-rnn | dcb55b7bc4669021a9da82a3e847c7fe1377ef87 | site_scons/site_tools/nvcc.py | python | generate | (env) | Add Builders and construction variables for CUDA compilers to an Environment. | Add Builders and construction variables for CUDA compilers to an Environment. | [
"Add",
"Builders",
"and",
"construction",
"variables",
"for",
"CUDA",
"compilers",
"to",
"an",
"Environment",
"."
] | def generate(env):
"""
Add Builders and construction variables for CUDA compilers to an Environment.
"""
if not cuda_exists(env):
print('Failed to build NVCC tool')
generate_dummy(env)
return
# create a builder that makes PTX files from .cu files
ptx_builder = SCons.Builder.Builder(action = '$NVCC -ptx $NVCCFLAGS $_NVCCWRAPCFLAGS $NVCCWRAPCCFLAGS $_NVCCCOMCOM $SOURCES -o $TARGET',
emitter = {},
suffix = '.ptx',
src_suffix = CUDASuffixes)
env['BUILDERS']['PTXFile'] = ptx_builder
print('Building NVCC tool')
# create builders that make static & shared objects from .cu files
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CUDASuffixes:
# Add this suffix to the list of things buildable by Object
static_obj.add_action(suffix, '$NVCCCOM')
shared_obj.add_action(suffix, '$SHNVCCCOM')
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
env['BUILDERS']['CUDASharedObject'] = shared_obj
# Add this suffix to the list of things scannable
SCons.Tool.SourceFileScanner.add_scanner(suffix, CUDAScanner)
add_common_nvcc_variables(env)
# set the "CUDA Compiler Command" environment variable
env['NVCC'] = 'nvcc'
env['SHNVCC'] = 'nvcc'
# set the include path, and pass both c compiler flags and c++ compiler flags
add_nvcc_flags(env)
# 'NVCC Command'
env['NVCCCOM'] = '$NVCC -o $TARGET -c $_NVCCWRAPCFLAGS $NVCCWRAPCCFLAGS $_NVCCCOMCOM $NVCCFLAGS $SOURCES'
env['SHNVCCCOM'] = '$SHNVCC -o $TARGET -c $SHNVCCFLAGS $_NVCCWRAPSHCFLAGS $_NVCCWRAPSHCCFLAGS $_NVCCCOMCOM $NVCCFLAGS $SOURCES'
# XXX add code to generate builders for other miscellaneous
# CUDA files here, such as .gpu, etc.
(bin_path,lib_path,inc_path) = get_cuda_paths(env['cuda_path'])
env.Append(LIBPATH = [lib_path])
env.Append(RPATH = [lib_path])
env.Append(CPPPATH = [inc_path])
env.PrependENVPath('PATH', bin_path) | [
"def",
"generate",
"(",
"env",
")",
":",
"if",
"not",
"cuda_exists",
"(",
"env",
")",
":",
"print",
"(",
"'Failed to build NVCC tool'",
")",
"generate_dummy",
"(",
"env",
")",
"return",
"# create a builder that makes PTX files from .cu files",
"ptx_builder",
"=",
"SCons",
".",
"Builder",
".",
"Builder",
"(",
"action",
"=",
"'$NVCC -ptx $NVCCFLAGS $_NVCCWRAPCFLAGS $NVCCWRAPCCFLAGS $_NVCCCOMCOM $SOURCES -o $TARGET'",
",",
"emitter",
"=",
"{",
"}",
",",
"suffix",
"=",
"'.ptx'",
",",
"src_suffix",
"=",
"CUDASuffixes",
")",
"env",
"[",
"'BUILDERS'",
"]",
"[",
"'PTXFile'",
"]",
"=",
"ptx_builder",
"print",
"(",
"'Building NVCC tool'",
")",
"# create builders that make static & shared objects from .cu files",
"static_obj",
",",
"shared_obj",
"=",
"SCons",
".",
"Tool",
".",
"createObjBuilders",
"(",
"env",
")",
"for",
"suffix",
"in",
"CUDASuffixes",
":",
"# Add this suffix to the list of things buildable by Object",
"static_obj",
".",
"add_action",
"(",
"suffix",
",",
"'$NVCCCOM'",
")",
"shared_obj",
".",
"add_action",
"(",
"suffix",
",",
"'$SHNVCCCOM'",
")",
"static_obj",
".",
"add_emitter",
"(",
"suffix",
",",
"SCons",
".",
"Defaults",
".",
"StaticObjectEmitter",
")",
"shared_obj",
".",
"add_emitter",
"(",
"suffix",
",",
"SCons",
".",
"Defaults",
".",
"SharedObjectEmitter",
")",
"env",
"[",
"'BUILDERS'",
"]",
"[",
"'CUDASharedObject'",
"]",
"=",
"shared_obj",
"# Add this suffix to the list of things scannable",
"SCons",
".",
"Tool",
".",
"SourceFileScanner",
".",
"add_scanner",
"(",
"suffix",
",",
"CUDAScanner",
")",
"add_common_nvcc_variables",
"(",
"env",
")",
"# set the \"CUDA Compiler Command\" environment variable",
"env",
"[",
"'NVCC'",
"]",
"=",
"'nvcc'",
"env",
"[",
"'SHNVCC'",
"]",
"=",
"'nvcc'",
"# set the include path, and pass both c compiler flags and c++ compiler flags",
"add_nvcc_flags",
"(",
"env",
")",
"# 'NVCC Command'",
"env",
"[",
"'NVCCCOM'",
"]",
"=",
"'$NVCC -o $TARGET -c $_NVCCWRAPCFLAGS $NVCCWRAPCCFLAGS $_NVCCCOMCOM $NVCCFLAGS $SOURCES'",
"env",
"[",
"'SHNVCCCOM'",
"]",
"=",
"'$SHNVCC -o $TARGET -c $SHNVCCFLAGS $_NVCCWRAPSHCFLAGS $_NVCCWRAPSHCCFLAGS $_NVCCCOMCOM $NVCCFLAGS $SOURCES'",
"# XXX add code to generate builders for other miscellaneous",
"# CUDA files here, such as .gpu, etc.",
"(",
"bin_path",
",",
"lib_path",
",",
"inc_path",
")",
"=",
"get_cuda_paths",
"(",
"env",
"[",
"'cuda_path'",
"]",
")",
"env",
".",
"Append",
"(",
"LIBPATH",
"=",
"[",
"lib_path",
"]",
")",
"env",
".",
"Append",
"(",
"RPATH",
"=",
"[",
"lib_path",
"]",
")",
"env",
".",
"Append",
"(",
"CPPPATH",
"=",
"[",
"inc_path",
"]",
")",
"env",
".",
"PrependENVPath",
"(",
"'PATH'",
",",
"bin_path",
")"
] | https://github.com/baidu-research/persistent-rnn/blob/dcb55b7bc4669021a9da82a3e847c7fe1377ef87/site_scons/site_tools/nvcc.py#L142-L195 | ||
indutny/candor | 48e7260618f5091c80a3416828e2808cad3ea22e | tools/gyp/pylib/gyp/easy_xml.py | python | WriteXmlIfChanged | (content, path, encoding='utf-8', pretty=False,
win32=False) | Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines. | Writes the XML content to disk, touching the file only if it has changed. | [
"Writes",
"the",
"XML",
"content",
"to",
"disk",
"touching",
"the",
"file",
"only",
"if",
"it",
"has",
"changed",
"."
] | def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close() | [
"def",
"WriteXmlIfChanged",
"(",
"content",
",",
"path",
",",
"encoding",
"=",
"'utf-8'",
",",
"pretty",
"=",
"False",
",",
"win32",
"=",
"False",
")",
":",
"xml_string",
"=",
"XmlToString",
"(",
"content",
",",
"encoding",
",",
"pretty",
")",
"if",
"win32",
"and",
"os",
".",
"linesep",
"!=",
"'\\r\\n'",
":",
"xml_string",
"=",
"xml_string",
".",
"replace",
"(",
"'\\n'",
",",
"'\\r\\n'",
")",
"# Get the old content",
"try",
":",
"f",
"=",
"open",
"(",
"path",
",",
"'r'",
")",
"existing",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"except",
":",
"existing",
"=",
"None",
"# It has changed, write it",
"if",
"existing",
"!=",
"xml_string",
":",
"f",
"=",
"open",
"(",
"path",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"xml_string",
")",
"f",
".",
"close",
"(",
")"
] | https://github.com/indutny/candor/blob/48e7260618f5091c80a3416828e2808cad3ea22e/tools/gyp/pylib/gyp/easy_xml.py#L105-L131 | ||
epam/Indigo | 30e40b4b1eb9bae0207435a26cfcb81ddcc42be1 | api/python/indigo/__init__.py | python | IndigoObject.countRGroups | (self) | return self.dispatcher._checkResult(
Indigo._lib.indigoCountRGroups(self.id)
) | Molecule method returns the number of r-groups
Returns:
int: number of r-groups | Molecule method returns the number of r-groups | [
"Molecule",
"method",
"returns",
"the",
"number",
"of",
"r",
"-",
"groups"
] | def countRGroups(self):
"""Molecule method returns the number of r-groups
Returns:
int: number of r-groups
"""
self.dispatcher._setSessionId()
return self.dispatcher._checkResult(
Indigo._lib.indigoCountRGroups(self.id)
) | [
"def",
"countRGroups",
"(",
"self",
")",
":",
"self",
".",
"dispatcher",
".",
"_setSessionId",
"(",
")",
"return",
"self",
".",
"dispatcher",
".",
"_checkResult",
"(",
"Indigo",
".",
"_lib",
".",
"indigoCountRGroups",
"(",
"self",
".",
"id",
")",
")"
] | https://github.com/epam/Indigo/blob/30e40b4b1eb9bae0207435a26cfcb81ddcc42be1/api/python/indigo/__init__.py#L881-L890 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/layers/python/layers/feature_column.py | python | real_valued_column | (column_name,
dimension=1,
default_value=None,
dtype=dtypes.float32,
normalizer=None) | Creates a `_RealValuedColumn` for dense numeric data.
Args:
column_name: A string defining real valued column name.
dimension: An integer specifying dimension of the real valued column. The
default is 1.
default_value: A single value compatible with dtype or a list of values
compatible with dtype which the column takes on during tf.Example parsing
if data is missing. When dimension is not None, a default value of None
will cause tf.io.parse_example to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every dimension. If a list of values is provided,
the length of the list should be equal to the value of `dimension`. Only
scalar default value is supported in case dimension is not specified.
dtype: defines the type of values. Default value is tf.float32. Must be a
non-quantized, real integer or floating point type.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for
variable length columns, the normalizer should expect an input_tensor of
type `SparseTensor`.
Returns:
A _RealValuedColumn.
Raises:
TypeError: if dimension is not an int
ValueError: if dimension is not a positive integer
TypeError: if default_value is a list but its length is not equal to the
value of `dimension`.
TypeError: if default_value is not compatible with dtype.
ValueError: if dtype is not convertible to tf.float32. | Creates a `_RealValuedColumn` for dense numeric data. | [
"Creates",
"a",
"_RealValuedColumn",
"for",
"dense",
"numeric",
"data",
"."
] | def real_valued_column(column_name,
dimension=1,
default_value=None,
dtype=dtypes.float32,
normalizer=None):
"""Creates a `_RealValuedColumn` for dense numeric data.
Args:
column_name: A string defining real valued column name.
dimension: An integer specifying dimension of the real valued column. The
default is 1.
default_value: A single value compatible with dtype or a list of values
compatible with dtype which the column takes on during tf.Example parsing
if data is missing. When dimension is not None, a default value of None
will cause tf.io.parse_example to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every dimension. If a list of values is provided,
the length of the list should be equal to the value of `dimension`. Only
scalar default value is supported in case dimension is not specified.
dtype: defines the type of values. Default value is tf.float32. Must be a
non-quantized, real integer or floating point type.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for
variable length columns, the normalizer should expect an input_tensor of
type `SparseTensor`.
Returns:
A _RealValuedColumn.
Raises:
TypeError: if dimension is not an int
ValueError: if dimension is not a positive integer
TypeError: if default_value is a list but its length is not equal to the
value of `dimension`.
TypeError: if default_value is not compatible with dtype.
ValueError: if dtype is not convertible to tf.float32.
"""
if dimension is None:
raise TypeError("dimension must be an integer. Use the "
"_real_valued_var_len_column for variable length features."
"dimension: {}, column_name: {}".format(
dimension, column_name))
if not isinstance(dimension, int):
raise TypeError("dimension must be an integer. "
"dimension: {}, column_name: {}".format(
dimension, column_name))
if dimension < 1:
raise ValueError("dimension must be greater than 0. "
"dimension: {}, column_name: {}".format(
dimension, column_name))
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, int):
if dtype.is_integer:
default_value = ([default_value for _ in range(dimension)]
if dimension else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if dtype.is_floating:
default_value = float(default_value)
default_value = ([default_value for _ in range(dimension)]
if dimension else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, float):
if dtype.is_floating and (not dtype.is_integer):
default_value = ([default_value for _ in range(dimension)]
if dimension else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, list):
if len(default_value) != dimension:
raise ValueError(
"The length of default_value must be equal to dimension. "
"default_value: {}, dimension: {}, column_name: {}".format(
default_value, dimension, column_name))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = True
is_list_all_float = True
for v in default_value:
if not isinstance(v, int):
is_list_all_int = False
if not (isinstance(v, float) or isinstance(v, int)):
is_list_all_float = False
if is_list_all_int:
if dtype.is_integer:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
elif dtype.is_floating:
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if is_list_all_float:
if dtype.is_floating and (not dtype.is_integer):
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
raise TypeError("default_value must be compatible with dtype. "
"default_value: {}, dtype: {}, column_name: {}".format(
default_value, dtype, column_name)) | [
"def",
"real_valued_column",
"(",
"column_name",
",",
"dimension",
"=",
"1",
",",
"default_value",
"=",
"None",
",",
"dtype",
"=",
"dtypes",
".",
"float32",
",",
"normalizer",
"=",
"None",
")",
":",
"if",
"dimension",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"dimension must be an integer. Use the \"",
"\"_real_valued_var_len_column for variable length features.\"",
"\"dimension: {}, column_name: {}\"",
".",
"format",
"(",
"dimension",
",",
"column_name",
")",
")",
"if",
"not",
"isinstance",
"(",
"dimension",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"dimension must be an integer. \"",
"\"dimension: {}, column_name: {}\"",
".",
"format",
"(",
"dimension",
",",
"column_name",
")",
")",
"if",
"dimension",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"dimension must be greater than 0. \"",
"\"dimension: {}, column_name: {}\"",
".",
"format",
"(",
"dimension",
",",
"column_name",
")",
")",
"if",
"not",
"(",
"dtype",
".",
"is_integer",
"or",
"dtype",
".",
"is_floating",
")",
":",
"raise",
"ValueError",
"(",
"\"dtype must be convertible to float. \"",
"\"dtype: {}, column_name: {}\"",
".",
"format",
"(",
"dtype",
",",
"column_name",
")",
")",
"if",
"default_value",
"is",
"None",
":",
"return",
"_RealValuedColumn",
"(",
"column_name",
",",
"dimension",
",",
"default_value",
",",
"dtype",
",",
"normalizer",
")",
"if",
"isinstance",
"(",
"default_value",
",",
"int",
")",
":",
"if",
"dtype",
".",
"is_integer",
":",
"default_value",
"=",
"(",
"[",
"default_value",
"for",
"_",
"in",
"range",
"(",
"dimension",
")",
"]",
"if",
"dimension",
"else",
"[",
"default_value",
"]",
")",
"return",
"_RealValuedColumn",
"(",
"column_name",
",",
"dimension",
",",
"default_value",
",",
"dtype",
",",
"normalizer",
")",
"if",
"dtype",
".",
"is_floating",
":",
"default_value",
"=",
"float",
"(",
"default_value",
")",
"default_value",
"=",
"(",
"[",
"default_value",
"for",
"_",
"in",
"range",
"(",
"dimension",
")",
"]",
"if",
"dimension",
"else",
"[",
"default_value",
"]",
")",
"return",
"_RealValuedColumn",
"(",
"column_name",
",",
"dimension",
",",
"default_value",
",",
"dtype",
",",
"normalizer",
")",
"if",
"isinstance",
"(",
"default_value",
",",
"float",
")",
":",
"if",
"dtype",
".",
"is_floating",
"and",
"(",
"not",
"dtype",
".",
"is_integer",
")",
":",
"default_value",
"=",
"(",
"[",
"default_value",
"for",
"_",
"in",
"range",
"(",
"dimension",
")",
"]",
"if",
"dimension",
"else",
"[",
"default_value",
"]",
")",
"return",
"_RealValuedColumn",
"(",
"column_name",
",",
"dimension",
",",
"default_value",
",",
"dtype",
",",
"normalizer",
")",
"if",
"isinstance",
"(",
"default_value",
",",
"list",
")",
":",
"if",
"len",
"(",
"default_value",
")",
"!=",
"dimension",
":",
"raise",
"ValueError",
"(",
"\"The length of default_value must be equal to dimension. \"",
"\"default_value: {}, dimension: {}, column_name: {}\"",
".",
"format",
"(",
"default_value",
",",
"dimension",
",",
"column_name",
")",
")",
"# Check if the values in the list are all integers or are convertible to",
"# floats.",
"is_list_all_int",
"=",
"True",
"is_list_all_float",
"=",
"True",
"for",
"v",
"in",
"default_value",
":",
"if",
"not",
"isinstance",
"(",
"v",
",",
"int",
")",
":",
"is_list_all_int",
"=",
"False",
"if",
"not",
"(",
"isinstance",
"(",
"v",
",",
"float",
")",
"or",
"isinstance",
"(",
"v",
",",
"int",
")",
")",
":",
"is_list_all_float",
"=",
"False",
"if",
"is_list_all_int",
":",
"if",
"dtype",
".",
"is_integer",
":",
"return",
"_RealValuedColumn",
"(",
"column_name",
",",
"dimension",
",",
"default_value",
",",
"dtype",
",",
"normalizer",
")",
"elif",
"dtype",
".",
"is_floating",
":",
"default_value",
"=",
"[",
"float",
"(",
"v",
")",
"for",
"v",
"in",
"default_value",
"]",
"return",
"_RealValuedColumn",
"(",
"column_name",
",",
"dimension",
",",
"default_value",
",",
"dtype",
",",
"normalizer",
")",
"if",
"is_list_all_float",
":",
"if",
"dtype",
".",
"is_floating",
"and",
"(",
"not",
"dtype",
".",
"is_integer",
")",
":",
"default_value",
"=",
"[",
"float",
"(",
"v",
")",
"for",
"v",
"in",
"default_value",
"]",
"return",
"_RealValuedColumn",
"(",
"column_name",
",",
"dimension",
",",
"default_value",
",",
"dtype",
",",
"normalizer",
")",
"raise",
"TypeError",
"(",
"\"default_value must be compatible with dtype. \"",
"\"default_value: {}, dtype: {}, column_name: {}\"",
".",
"format",
"(",
"default_value",
",",
"dtype",
",",
"column_name",
")",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/layers/python/layers/feature_column.py#L1922-L2034 | ||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/examples/learn/text_classification.py | python | rnn_model | (features, labels, mode) | return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode) | RNN model to predict from sequence of words to a class. | RNN model to predict from sequence of words to a class. | [
"RNN",
"model",
"to",
"predict",
"from",
"sequence",
"of",
"words",
"to",
"a",
"class",
"."
] | def rnn_model(features, labels, mode):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for softmax
# classification over output classes.
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode) | [
"def",
"rnn_model",
"(",
"features",
",",
"labels",
",",
"mode",
")",
":",
"# Convert indexes of words into embeddings.",
"# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then",
"# maps word indexes of the sequence into [batch_size, sequence_length,",
"# EMBEDDING_SIZE].",
"word_vectors",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"embed_sequence",
"(",
"features",
"[",
"WORDS_FEATURE",
"]",
",",
"vocab_size",
"=",
"n_words",
",",
"embed_dim",
"=",
"EMBEDDING_SIZE",
")",
"# Split into list of embedding per word, while removing doc length dim.",
"# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].",
"word_list",
"=",
"tf",
".",
"unstack",
"(",
"word_vectors",
",",
"axis",
"=",
"1",
")",
"# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.",
"cell",
"=",
"tf",
".",
"contrib",
".",
"rnn",
".",
"GRUCell",
"(",
"EMBEDDING_SIZE",
")",
"# Create an unrolled Recurrent Neural Networks to length of",
"# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.",
"_",
",",
"encoding",
"=",
"tf",
".",
"contrib",
".",
"rnn",
".",
"static_rnn",
"(",
"cell",
",",
"word_list",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Given encoding of RNN, take encoding of last step (e.g hidden size of the",
"# neural network of last step) and pass it as features for softmax",
"# classification over output classes.",
"logits",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"encoding",
",",
"MAX_LABEL",
",",
"activation",
"=",
"None",
")",
"return",
"estimator_spec_for_softmax_classification",
"(",
"logits",
"=",
"logits",
",",
"labels",
"=",
"labels",
",",
"mode",
"=",
"mode",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/examples/learn/text_classification.py#L80-L105 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/setuptools/py2/pkg_resources/_vendor/packaging/specifiers.py | python | BaseSpecifier.__eq__ | (self, other) | Returns a boolean representing whether or not the two Specifier like
objects are equal. | Returns a boolean representing whether or not the two Specifier like
objects are equal. | [
"Returns",
"a",
"boolean",
"representing",
"whether",
"or",
"not",
"the",
"two",
"Specifier",
"like",
"objects",
"are",
"equal",
"."
] | def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
""" | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/pkg_resources/_vendor/packaging/specifiers.py#L37-L41 | ||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/mapreduce/mapreduce/input_readers.py | python | _OldAbstractDatastoreInputReader._choose_split_points | (cls, sorted_keys, shard_count) | return [sorted_keys[int(round(index_stride * i))]
for i in range(1, shard_count)] | Returns the best split points given a random set of db.Keys. | Returns the best split points given a random set of db.Keys. | [
"Returns",
"the",
"best",
"split",
"points",
"given",
"a",
"random",
"set",
"of",
"db",
".",
"Keys",
"."
] | def _choose_split_points(cls, sorted_keys, shard_count):
"""Returns the best split points given a random set of db.Keys."""
assert len(sorted_keys) >= shard_count
index_stride = len(sorted_keys) / float(shard_count)
return [sorted_keys[int(round(index_stride * i))]
for i in range(1, shard_count)] | [
"def",
"_choose_split_points",
"(",
"cls",
",",
"sorted_keys",
",",
"shard_count",
")",
":",
"assert",
"len",
"(",
"sorted_keys",
")",
">=",
"shard_count",
"index_stride",
"=",
"len",
"(",
"sorted_keys",
")",
"/",
"float",
"(",
"shard_count",
")",
"return",
"[",
"sorted_keys",
"[",
"int",
"(",
"round",
"(",
"index_stride",
"*",
"i",
")",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"shard_count",
")",
"]"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/mapreduce/mapreduce/input_readers.py#L972-L977 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/io/json/_normalize.py | python | nested_to_record | (
ds,
prefix: str = "",
sep: str = ".",
level: int = 0,
max_level: int | None = None,
) | return new_ds | A simplified json_normalize
Converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
sep : str, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
level: int, optional, default: 0
The number of levels in the json string.
max_level: int, optional, default: None
The max depth to normalize.
.. versionadded:: 0.25.0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
>>> nested_to_record(
... dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2))
... )
{\
'flat1': 1, \
'dict1.c': 1, \
'dict1.d': 2, \
'nested.e.c': 1, \
'nested.e.d': 2, \
'nested.d': 2\
} | A simplified json_normalize | [
"A",
"simplified",
"json_normalize"
] | def nested_to_record(
ds,
prefix: str = "",
sep: str = ".",
level: int = 0,
max_level: int | None = None,
):
"""
A simplified json_normalize
Converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
sep : str, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
level: int, optional, default: 0
The number of levels in the json string.
max_level: int, optional, default: None
The max depth to normalize.
.. versionadded:: 0.25.0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
>>> nested_to_record(
... dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2))
... )
{\
'flat1': 1, \
'dict1.c': 1, \
'dict1.d': 2, \
'nested.e.c': 1, \
'nested.e.d': 2, \
'nested.d': 2\
}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, str):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + sep + k
# flatten if type is dict and
# current dict level < maximum level provided and
# only dicts gets recurse-flattened
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict) or (
max_level is not None and level >= max_level
):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds | [
"def",
"nested_to_record",
"(",
"ds",
",",
"prefix",
":",
"str",
"=",
"\"\"",
",",
"sep",
":",
"str",
"=",
"\".\"",
",",
"level",
":",
"int",
"=",
"0",
",",
"max_level",
":",
"int",
"|",
"None",
"=",
"None",
",",
")",
":",
"singleton",
"=",
"False",
"if",
"isinstance",
"(",
"ds",
",",
"dict",
")",
":",
"ds",
"=",
"[",
"ds",
"]",
"singleton",
"=",
"True",
"new_ds",
"=",
"[",
"]",
"for",
"d",
"in",
"ds",
":",
"new_d",
"=",
"copy",
".",
"deepcopy",
"(",
"d",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"# each key gets renamed with prefix",
"if",
"not",
"isinstance",
"(",
"k",
",",
"str",
")",
":",
"k",
"=",
"str",
"(",
"k",
")",
"if",
"level",
"==",
"0",
":",
"newkey",
"=",
"k",
"else",
":",
"newkey",
"=",
"prefix",
"+",
"sep",
"+",
"k",
"# flatten if type is dict and",
"# current dict level < maximum level provided and",
"# only dicts gets recurse-flattened",
"# only at level>1 do we rename the rest of the keys",
"if",
"not",
"isinstance",
"(",
"v",
",",
"dict",
")",
"or",
"(",
"max_level",
"is",
"not",
"None",
"and",
"level",
">=",
"max_level",
")",
":",
"if",
"level",
"!=",
"0",
":",
"# so we skip copying for top level, common case",
"v",
"=",
"new_d",
".",
"pop",
"(",
"k",
")",
"new_d",
"[",
"newkey",
"]",
"=",
"v",
"continue",
"else",
":",
"v",
"=",
"new_d",
".",
"pop",
"(",
"k",
")",
"new_d",
".",
"update",
"(",
"nested_to_record",
"(",
"v",
",",
"newkey",
",",
"sep",
",",
"level",
"+",
"1",
",",
"max_level",
")",
")",
"new_ds",
".",
"append",
"(",
"new_d",
")",
"if",
"singleton",
":",
"return",
"new_ds",
"[",
"0",
"]",
"return",
"new_ds"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/io/json/_normalize.py#L39-L119 | |
openthread/openthread | 9fcdbed9c526c70f1556d1ed84099c1535c7cd32 | tools/otci/otci/otci.py | python | OTCI.get_commissioner_session_id | (self) | return self.__parse_int(self.execute_command('commissioner sessionid')) | Get current commissioner session id. | Get current commissioner session id. | [
"Get",
"current",
"commissioner",
"session",
"id",
"."
] | def get_commissioner_session_id(self) -> int:
"""Get current commissioner session id."""
return self.__parse_int(self.execute_command('commissioner sessionid')) | [
"def",
"get_commissioner_session_id",
"(",
"self",
")",
"->",
"int",
":",
"return",
"self",
".",
"__parse_int",
"(",
"self",
".",
"execute_command",
"(",
"'commissioner sessionid'",
")",
")"
] | https://github.com/openthread/openthread/blob/9fcdbed9c526c70f1556d1ed84099c1535c7cd32/tools/otci/otci/otci.py#L1439-L1441 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_core.py | python | CommandEvent.SetInt | (*args, **kwargs) | return _core_.CommandEvent_SetInt(*args, **kwargs) | SetInt(self, int i) | SetInt(self, int i) | [
"SetInt",
"(",
"self",
"int",
"i",
")"
] | def SetInt(*args, **kwargs):
"""SetInt(self, int i)"""
return _core_.CommandEvent_SetInt(*args, **kwargs) | [
"def",
"SetInt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"CommandEvent_SetInt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L5273-L5275 | |
PaddlePaddle/Anakin | 5fd68a6cc4c4620cd1a30794c1bf06eebd3f4730 | tools/external_converter_v2/parser/onnx/onnx_trans_utils.py | python | parse_ImageScaler | (onnx_node, weights, graph) | parse ImageScaler
:param onnx_node:
:param weights:
:param graph:
:return: | parse ImageScaler
:param onnx_node:
:param weights:
:param graph:
:return: | [
"parse",
"ImageScaler",
":",
"param",
"onnx_node",
":",
":",
"param",
"weights",
":",
":",
"param",
"graph",
":",
":",
"return",
":"
] | def parse_ImageScaler(onnx_node, weights, graph):
"""
parse ImageScaler
:param onnx_node:
:param weights:
:param graph:
:return:
"""
onnx_node['visited'] = True
onnx_node['ak_type'] = 'Scale'
ak_attr = onnx_node['ak_attr']
scale_val = onnx_node['onnx_attr']['scale']
shape = [1, 1, 1, 3]
scale_val = [1.0, 1.0, 1.0]
if 'scale' in onnx_node['onnx_attr']:
scale_val = onnx_node['onnx_attr']['scale']
if type(scale_val) is 'float':
scale_val =[ scale_val, scale_val, scale_val]
scale_np = np.full(shape, scale_val) #np.arange([scale_val])
weight_tensor = {}
weight_tensor['shape'] = shape
weight_tensor['data'] = scale_np
weight_tensor['dtype'] = 'float32'
ak_attr['weights'] = weight_tensor
bias_val = [1.0]
if 'bias' in onnx_node['onnx_attr']:
bias_val = onnx_node['onnx_attr']['bias']
# print 'bias: ', len(bias_val)
shape_b = [len(bias_val)]
# print 'shape_b: ', shape_b
bias_tensor = {}
bias_tensor['shape'] = shape_b
bias_tensor['data'] = bias_val
bias_tensor['dtype'] = 'float32'
ak_attr['bias'] = bias_tensor | [
"def",
"parse_ImageScaler",
"(",
"onnx_node",
",",
"weights",
",",
"graph",
")",
":",
"onnx_node",
"[",
"'visited'",
"]",
"=",
"True",
"onnx_node",
"[",
"'ak_type'",
"]",
"=",
"'Scale'",
"ak_attr",
"=",
"onnx_node",
"[",
"'ak_attr'",
"]",
"scale_val",
"=",
"onnx_node",
"[",
"'onnx_attr'",
"]",
"[",
"'scale'",
"]",
"shape",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"3",
"]",
"scale_val",
"=",
"[",
"1.0",
",",
"1.0",
",",
"1.0",
"]",
"if",
"'scale'",
"in",
"onnx_node",
"[",
"'onnx_attr'",
"]",
":",
"scale_val",
"=",
"onnx_node",
"[",
"'onnx_attr'",
"]",
"[",
"'scale'",
"]",
"if",
"type",
"(",
"scale_val",
")",
"is",
"'float'",
":",
"scale_val",
"=",
"[",
"scale_val",
",",
"scale_val",
",",
"scale_val",
"]",
"scale_np",
"=",
"np",
".",
"full",
"(",
"shape",
",",
"scale_val",
")",
"#np.arange([scale_val])",
"weight_tensor",
"=",
"{",
"}",
"weight_tensor",
"[",
"'shape'",
"]",
"=",
"shape",
"weight_tensor",
"[",
"'data'",
"]",
"=",
"scale_np",
"weight_tensor",
"[",
"'dtype'",
"]",
"=",
"'float32'",
"ak_attr",
"[",
"'weights'",
"]",
"=",
"weight_tensor",
"bias_val",
"=",
"[",
"1.0",
"]",
"if",
"'bias'",
"in",
"onnx_node",
"[",
"'onnx_attr'",
"]",
":",
"bias_val",
"=",
"onnx_node",
"[",
"'onnx_attr'",
"]",
"[",
"'bias'",
"]",
"# print 'bias: ', len(bias_val)",
"shape_b",
"=",
"[",
"len",
"(",
"bias_val",
")",
"]",
"# print 'shape_b: ', shape_b",
"bias_tensor",
"=",
"{",
"}",
"bias_tensor",
"[",
"'shape'",
"]",
"=",
"shape_b",
"bias_tensor",
"[",
"'data'",
"]",
"=",
"bias_val",
"bias_tensor",
"[",
"'dtype'",
"]",
"=",
"'float32'",
"ak_attr",
"[",
"'bias'",
"]",
"=",
"bias_tensor"
] | https://github.com/PaddlePaddle/Anakin/blob/5fd68a6cc4c4620cd1a30794c1bf06eebd3f4730/tools/external_converter_v2/parser/onnx/onnx_trans_utils.py#L1104-L1140 | ||
google/clif | cab24d6a105609a65c95a36a1712ae3c20c7b5df | clif/python/pyext.py | python | Module._WrapAllCallables | (self, c, cname, ln, class_ns, only_pyobjas) | Recursively process callable returns and params types of AST.Type c. | Recursively process callable returns and params types of AST.Type c. | [
"Recursively",
"process",
"callable",
"returns",
"and",
"params",
"types",
"of",
"AST",
".",
"Type",
"c",
"."
] | def _WrapAllCallables(self, c, cname, ln, class_ns, only_pyobjas):
"""Recursively process callable returns and params types of AST.Type c."""
for i, r in enumerate(c.returns):
if r.type.HasField('callable'):
for s in self.WrapOneCallable(
r.type, cname, 'ret%d' % i, ln, class_ns, only_pyobjas):
yield s
for i, p in enumerate(c.params):
if p.type.HasField('callable'):
for s in self.WrapOneCallable(
p.type, cname, 'arg%d' % i, ln, class_ns, only_pyobjas):
yield s | [
"def",
"_WrapAllCallables",
"(",
"self",
",",
"c",
",",
"cname",
",",
"ln",
",",
"class_ns",
",",
"only_pyobjas",
")",
":",
"for",
"i",
",",
"r",
"in",
"enumerate",
"(",
"c",
".",
"returns",
")",
":",
"if",
"r",
".",
"type",
".",
"HasField",
"(",
"'callable'",
")",
":",
"for",
"s",
"in",
"self",
".",
"WrapOneCallable",
"(",
"r",
".",
"type",
",",
"cname",
",",
"'ret%d'",
"%",
"i",
",",
"ln",
",",
"class_ns",
",",
"only_pyobjas",
")",
":",
"yield",
"s",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"c",
".",
"params",
")",
":",
"if",
"p",
".",
"type",
".",
"HasField",
"(",
"'callable'",
")",
":",
"for",
"s",
"in",
"self",
".",
"WrapOneCallable",
"(",
"p",
".",
"type",
",",
"cname",
",",
"'arg%d'",
"%",
"i",
",",
"ln",
",",
"class_ns",
",",
"only_pyobjas",
")",
":",
"yield",
"s"
] | https://github.com/google/clif/blob/cab24d6a105609a65c95a36a1712ae3c20c7b5df/clif/python/pyext.py#L294-L305 | ||
GJDuck/LowFat | ecf6a0f0fa1b73a27a626cf493cc39e477b6faea | llvm-4.0.0.src/tools/clang/bindings/python/clang/cindex.py | python | Cursor.enum_value | (self) | return self._enum_value | Return the value of an enum constant. | Return the value of an enum constant. | [
"Return",
"the",
"value",
"of",
"an",
"enum",
"constant",
"."
] | def enum_value(self):
"""Return the value of an enum constant."""
if not hasattr(self, '_enum_value'):
assert self.kind == CursorKind.ENUM_CONSTANT_DECL
# Figure out the underlying type of the enum to know if it
# is a signed or unsigned quantity.
underlying_type = self.type
if underlying_type.kind == TypeKind.ENUM:
underlying_type = underlying_type.get_declaration().enum_type
if underlying_type.kind in (TypeKind.CHAR_U,
TypeKind.UCHAR,
TypeKind.CHAR16,
TypeKind.CHAR32,
TypeKind.USHORT,
TypeKind.UINT,
TypeKind.ULONG,
TypeKind.ULONGLONG,
TypeKind.UINT128):
self._enum_value = \
conf.lib.clang_getEnumConstantDeclUnsignedValue(self)
else:
self._enum_value = conf.lib.clang_getEnumConstantDeclValue(self)
return self._enum_value | [
"def",
"enum_value",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_enum_value'",
")",
":",
"assert",
"self",
".",
"kind",
"==",
"CursorKind",
".",
"ENUM_CONSTANT_DECL",
"# Figure out the underlying type of the enum to know if it",
"# is a signed or unsigned quantity.",
"underlying_type",
"=",
"self",
".",
"type",
"if",
"underlying_type",
".",
"kind",
"==",
"TypeKind",
".",
"ENUM",
":",
"underlying_type",
"=",
"underlying_type",
".",
"get_declaration",
"(",
")",
".",
"enum_type",
"if",
"underlying_type",
".",
"kind",
"in",
"(",
"TypeKind",
".",
"CHAR_U",
",",
"TypeKind",
".",
"UCHAR",
",",
"TypeKind",
".",
"CHAR16",
",",
"TypeKind",
".",
"CHAR32",
",",
"TypeKind",
".",
"USHORT",
",",
"TypeKind",
".",
"UINT",
",",
"TypeKind",
".",
"ULONG",
",",
"TypeKind",
".",
"ULONGLONG",
",",
"TypeKind",
".",
"UINT128",
")",
":",
"self",
".",
"_enum_value",
"=",
"conf",
".",
"lib",
".",
"clang_getEnumConstantDeclUnsignedValue",
"(",
"self",
")",
"else",
":",
"self",
".",
"_enum_value",
"=",
"conf",
".",
"lib",
".",
"clang_getEnumConstantDeclValue",
"(",
"self",
")",
"return",
"self",
".",
"_enum_value"
] | https://github.com/GJDuck/LowFat/blob/ecf6a0f0fa1b73a27a626cf493cc39e477b6faea/llvm-4.0.0.src/tools/clang/bindings/python/clang/cindex.py#L1562-L1584 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/propgrid.py | python | PropertyGrid.Create | (*args, **kwargs) | return _propgrid.PropertyGrid_Create(*args, **kwargs) | Create(self, Window parent, int id=ID_ANY, Point pos=DefaultPosition,
Size size=DefaultSize, long style=(0),
String name=wxPropertyGridNameStr) -> bool | Create(self, Window parent, int id=ID_ANY, Point pos=DefaultPosition,
Size size=DefaultSize, long style=(0),
String name=wxPropertyGridNameStr) -> bool | [
"Create",
"(",
"self",
"Window",
"parent",
"int",
"id",
"=",
"ID_ANY",
"Point",
"pos",
"=",
"DefaultPosition",
"Size",
"size",
"=",
"DefaultSize",
"long",
"style",
"=",
"(",
"0",
")",
"String",
"name",
"=",
"wxPropertyGridNameStr",
")",
"-",
">",
"bool"
] | def Create(*args, **kwargs):
"""
Create(self, Window parent, int id=ID_ANY, Point pos=DefaultPosition,
Size size=DefaultSize, long style=(0),
String name=wxPropertyGridNameStr) -> bool
"""
return _propgrid.PropertyGrid_Create(*args, **kwargs) | [
"def",
"Create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PropertyGrid_Create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/propgrid.py#L2011-L2017 | |
SpenceKonde/megaTinyCore | 1c4a70b18a149fe6bcb551dfa6db11ca50b8997b | megaavr/tools/libs/pyedbglib/protocols/avr8protocol.py | python | Avr8Protocol.regfile_write | (self, data) | return self.memory_write(self.AVR8_MEMTYPE_REGFILE, 0, data) | Writes the AVR registe file (R0::R31)
:param data: register array
:return: | Writes the AVR registe file (R0::R31) | [
"Writes",
"the",
"AVR",
"registe",
"file",
"(",
"R0",
"::",
"R31",
")"
] | def regfile_write(self, data):
"""
Writes the AVR registe file (R0::R31)
:param data: register array
:return:
"""
if len(data) != 32:
raise ValueError("Invalid data length for regfile")
self.logger.debug("Writing register file")
return self.memory_write(self.AVR8_MEMTYPE_REGFILE, 0, data) | [
"def",
"regfile_write",
"(",
"self",
",",
"data",
")",
":",
"if",
"len",
"(",
"data",
")",
"!=",
"32",
":",
"raise",
"ValueError",
"(",
"\"Invalid data length for regfile\"",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Writing register file\"",
")",
"return",
"self",
".",
"memory_write",
"(",
"self",
".",
"AVR8_MEMTYPE_REGFILE",
",",
"0",
",",
"data",
")"
] | https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/pyedbglib/protocols/avr8protocol.py#L444-L454 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/utils/subprocess.py | python | runner_with_spinner_message | (message) | return runner | Provide a subprocess_runner that shows a spinner message.
Intended for use with for pep517's Pep517HookCaller. Thus, the runner has
an API that matches what's expected by Pep517HookCaller.subprocess_runner. | Provide a subprocess_runner that shows a spinner message. | [
"Provide",
"a",
"subprocess_runner",
"that",
"shows",
"a",
"spinner",
"message",
"."
] | def runner_with_spinner_message(message):
# type: (str) -> Callable[..., None]
"""Provide a subprocess_runner that shows a spinner message.
Intended for use with for pep517's Pep517HookCaller. Thus, the runner has
an API that matches what's expected by Pep517HookCaller.subprocess_runner.
"""
def runner(
cmd, # type: List[str]
cwd=None, # type: Optional[str]
extra_environ=None # type: Optional[Mapping[str, Any]]
):
# type: (...) -> None
with open_spinner(message) as spinner:
call_subprocess(
cmd,
cwd=cwd,
extra_environ=extra_environ,
spinner=spinner,
)
return runner | [
"def",
"runner_with_spinner_message",
"(",
"message",
")",
":",
"# type: (str) -> Callable[..., None]",
"def",
"runner",
"(",
"cmd",
",",
"# type: List[str]",
"cwd",
"=",
"None",
",",
"# type: Optional[str]",
"extra_environ",
"=",
"None",
"# type: Optional[Mapping[str, Any]]",
")",
":",
"# type: (...) -> None",
"with",
"open_spinner",
"(",
"message",
")",
"as",
"spinner",
":",
"call_subprocess",
"(",
"cmd",
",",
"cwd",
"=",
"cwd",
",",
"extra_environ",
"=",
"extra_environ",
",",
"spinner",
"=",
"spinner",
",",
")",
"return",
"runner"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/utils/subprocess.py#L547-L591 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.