nwo stringlengths 5 86 | sha stringlengths 40 40 | path stringlengths 4 189 | language stringclasses 1 value | identifier stringlengths 1 94 | parameters stringlengths 2 4.03k | argument_list stringclasses 1 value | return_statement stringlengths 0 11.5k | docstring stringlengths 1 33.2k | docstring_summary stringlengths 0 5.15k | docstring_tokens list | function stringlengths 34 151k | function_tokens list | url stringlengths 90 278 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/ndarray/numpy/_op.py | python | not_equal | (x1, x2, out=None) | return _api_internal.not_equal(x1, x2, out) | Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False]) | Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False]) | [
"Return",
"(",
"x1",
"!",
"=",
"x2",
")",
"element",
"-",
"wise",
".",
"Parameters",
"----------",
"x1",
"x2",
":",
"ndarrays",
"or",
"scalars",
"Input",
"arrays",
".",
"If",
"x1",
".",
"shape",
"!",
"=",
"x2",
".",
"shape",
"they",
"must",
"be",
"... | def not_equal(x1, x2, out=None):
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False])
"""
if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):
return _np.not_equal(x1, x2, out=out)
return _api_internal.not_equal(x1, x2, out) | [
"def",
"not_equal",
"(",
"x1",
",",
"x2",
",",
"out",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"x1",
",",
"numeric_types",
")",
"and",
"isinstance",
"(",
"x2",
",",
"numeric_types",
")",
":",
"return",
"_np",
".",
"not_equal",
"(",
"x1",
",",
... | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/ndarray/numpy/_op.py#L7388-L7418 | |
SFTtech/openage | d6a08c53c48dc1e157807471df92197f6ca9e04d | openage/convert/processor/conversion/aoc/pregen_processor.py | python | AoCPregenSubprocessor.generate_attributes | (full_data_set, pregen_converter_group) | Generate Attribute objects.
:param full_data_set: GenieObjectContainer instance that
contains all relevant data for the conversion
process.
:type full_data_set: ...dataformat.aoc.genie_object_container.GenieObjectContainer
:param pregen_converter_group: GenieObjectGroup instance that stores
pregenerated API objects for referencing with
ForwardRef
:type pregen_converter_group: ...dataformat.aoc.genie_object_container.GenieObjectGroup | Generate Attribute objects. | [
"Generate",
"Attribute",
"objects",
"."
] | def generate_attributes(full_data_set, pregen_converter_group):
"""
Generate Attribute objects.
:param full_data_set: GenieObjectContainer instance that
contains all relevant data for the conversion
process.
:type full_data_set: ...dataformat.aoc.genie_object_container.GenieObjectContainer
:param pregen_converter_group: GenieObjectGroup instance that stores
pregenerated API objects for referencing with
ForwardRef
:type pregen_converter_group: ...dataformat.aoc.genie_object_container.GenieObjectGroup
"""
pregen_nyan_objects = full_data_set.pregen_nyan_objects
api_objects = full_data_set.nyan_api_objects
# TODO: Fill translations
# =======================================================================
attribute_parent = "engine.util.attribute.Attribute"
attributes_location = "data/util/attribute/"
# =======================================================================
# HP
# =======================================================================
health_ref_in_modpack = "util.attribute.types.Health"
health_raw_api_object = RawAPIObject(health_ref_in_modpack,
"Health", api_objects,
attributes_location)
health_raw_api_object.set_filename("types")
health_raw_api_object.add_raw_parent(attribute_parent)
name_forward_ref = ForwardRef(pregen_converter_group,
"util.attribute.types.Health.HealthName")
health_raw_api_object.add_raw_member("name", name_forward_ref,
attribute_parent)
abbrv_forward_ref = ForwardRef(pregen_converter_group,
"util.attribute.types.Health.HealthAbbreviation")
health_raw_api_object.add_raw_member("abbreviation", abbrv_forward_ref,
attribute_parent)
pregen_converter_group.add_raw_api_object(health_raw_api_object)
pregen_nyan_objects.update({health_ref_in_modpack: health_raw_api_object})
name_value_parent = "engine.util.language.translated.type.TranslatedString"
health_name_ref_in_modpack = "util.attribute.types.Health.HealthName"
health_name_value = RawAPIObject(health_name_ref_in_modpack, "HealthName",
api_objects, attributes_location)
health_name_value.set_filename("types")
health_name_value.add_raw_parent(name_value_parent)
health_name_value.add_raw_member("translations", [], name_value_parent)
pregen_converter_group.add_raw_api_object(health_name_value)
pregen_nyan_objects.update({health_name_ref_in_modpack: health_name_value})
abbrv_value_parent = "engine.util.language.translated.type.TranslatedString"
health_abbrv_ref_in_modpack = "util.attribute.types.Health.HealthAbbreviation"
health_abbrv_value = RawAPIObject(health_abbrv_ref_in_modpack, "HealthAbbreviation",
api_objects, attributes_location)
health_abbrv_value.set_filename("types")
health_abbrv_value.add_raw_parent(abbrv_value_parent)
health_abbrv_value.add_raw_member("translations", [], abbrv_value_parent)
pregen_converter_group.add_raw_api_object(health_abbrv_value)
pregen_nyan_objects.update({health_abbrv_ref_in_modpack: health_abbrv_value})
# =======================================================================
# Faith
# =======================================================================
faith_ref_in_modpack = "util.attribute.types.Faith"
faith_raw_api_object = RawAPIObject(faith_ref_in_modpack,
"Faith", api_objects,
attributes_location)
faith_raw_api_object.set_filename("types")
faith_raw_api_object.add_raw_parent(attribute_parent)
name_forward_ref = ForwardRef(pregen_converter_group,
"util.attribute.types.Faith.FaithName")
faith_raw_api_object.add_raw_member("name", name_forward_ref,
attribute_parent)
abbrv_forward_ref = ForwardRef(pregen_converter_group,
"util.attribute.types.Faith.FaithAbbreviation")
faith_raw_api_object.add_raw_member("abbreviation", abbrv_forward_ref,
attribute_parent)
pregen_converter_group.add_raw_api_object(faith_raw_api_object)
pregen_nyan_objects.update({faith_ref_in_modpack: faith_raw_api_object})
name_value_parent = "engine.util.language.translated.type.TranslatedString"
faith_name_ref_in_modpack = "util.attribute.types.Faith.FaithName"
faith_name_value = RawAPIObject(faith_name_ref_in_modpack, "FaithName",
api_objects, attributes_location)
faith_name_value.set_filename("types")
faith_name_value.add_raw_parent(name_value_parent)
faith_name_value.add_raw_member("translations", [], name_value_parent)
pregen_converter_group.add_raw_api_object(faith_name_value)
pregen_nyan_objects.update({faith_name_ref_in_modpack: faith_name_value})
abbrv_value_parent = "engine.util.language.translated.type.TranslatedString"
faith_abbrv_ref_in_modpack = "util.attribute.types.Faith.FaithAbbreviation"
faith_abbrv_value = RawAPIObject(faith_abbrv_ref_in_modpack, "FaithAbbreviation",
api_objects, attributes_location)
faith_abbrv_value.set_filename("types")
faith_abbrv_value.add_raw_parent(abbrv_value_parent)
faith_abbrv_value.add_raw_member("translations", [], abbrv_value_parent)
pregen_converter_group.add_raw_api_object(faith_abbrv_value)
pregen_nyan_objects.update({faith_abbrv_ref_in_modpack: faith_abbrv_value}) | [
"def",
"generate_attributes",
"(",
"full_data_set",
",",
"pregen_converter_group",
")",
":",
"pregen_nyan_objects",
"=",
"full_data_set",
".",
"pregen_nyan_objects",
"api_objects",
"=",
"full_data_set",
".",
"nyan_api_objects",
"# TODO: Fill translations",
"# ==================... | https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/processor/conversion/aoc/pregen_processor.py#L60-L167 | ||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/refineubfftsetup.py | python | RefineUBFFTSetupDialog.do_ok | (self) | return | accept the current set up and return
:return: | accept the current set up and return | [
"accept",
"the",
"current",
"set",
"up",
"and",
"return"
] | def do_ok(self):
"""accept the current set up and return
:return:
"""
try:
min_d = float(str(self.ui.lineEdit_minD.text()))
max_d = float(str(self.ui.lineEdit_maxD.text()))
tolerance = float(str(self.ui.lineEdit_tolerance.text()))
except ValueError:
# unable to parse the value right
self.ui.label_message.setText('Unable to set up MinD, MaxD or Tolerance due to value error.\n'
'Either enter correct value or press "Cancel".')
else:
# good to go?
if min_d >= max_d:
self.ui.label_message.setText('MinD cannot be equal or larger than MaxD.')
elif tolerance <= 0. or tolerance >= 1.:
self.ui.label_message.setText('Tolerance cannot be negative or larger than 1.')
else:
# finally good value
self._minD = min_d
self._maxD = max_d
self._tolerance = tolerance
# and close the window
self.close()
# END-IF-ELSE
# END-TRY-EXCEPTION
return | [
"def",
"do_ok",
"(",
"self",
")",
":",
"try",
":",
"min_d",
"=",
"float",
"(",
"str",
"(",
"self",
".",
"ui",
".",
"lineEdit_minD",
".",
"text",
"(",
")",
")",
")",
"max_d",
"=",
"float",
"(",
"str",
"(",
"self",
".",
"ui",
".",
"lineEdit_maxD",
... | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/refineubfftsetup.py#L47-L78 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/cgutils.py | python | increment_index | (builder, val) | return builder.add(val, one, flags=['nsw']) | Increment an index *val*. | Increment an index *val*. | [
"Increment",
"an",
"index",
"*",
"val",
"*",
"."
] | def increment_index(builder, val):
"""
Increment an index *val*.
"""
one = val.type(1)
# We pass the "nsw" flag in the hope that LLVM understands the index
# never changes sign. Unfortunately this doesn't always work
# (e.g. ndindex()).
return builder.add(val, one, flags=['nsw']) | [
"def",
"increment_index",
"(",
"builder",
",",
"val",
")",
":",
"one",
"=",
"val",
".",
"type",
"(",
"1",
")",
"# We pass the \"nsw\" flag in the hope that LLVM understands the index",
"# never changes sign. Unfortunately this doesn't always work",
"# (e.g. ndindex()).",
"retu... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/cgutils.py#L438-L446 | |
SFTtech/openage | d6a08c53c48dc1e157807471df92197f6ca9e04d | openage/convert/entity_object/conversion/aoc/genie_connection.py | python | GenieBuildingConnection.__init__ | (self, building_id, full_data_set, members=None) | Creates a new Genie building connection.
:param building_id: The id of the building from the .dat file.
:param full_data_set: GenieObjectContainer instance that
contains all relevant data for the conversion
process.
:param members: An already existing member dict. | Creates a new Genie building connection. | [
"Creates",
"a",
"new",
"Genie",
"building",
"connection",
"."
] | def __init__(self, building_id, full_data_set, members=None):
"""
Creates a new Genie building connection.
:param building_id: The id of the building from the .dat file.
:param full_data_set: GenieObjectContainer instance that
contains all relevant data for the conversion
process.
:param members: An already existing member dict.
"""
super().__init__(building_id, members=members)
self.data = full_data_set | [
"def",
"__init__",
"(",
"self",
",",
"building_id",
",",
"full_data_set",
",",
"members",
"=",
"None",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"building_id",
",",
"members",
"=",
"members",
")",
"self",
".",
"data",
"=",
"full_data_set"
] | https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/entity_object/conversion/aoc/genie_connection.py#L44-L57 | ||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | build/android/pylib/remote/device/remote_device_environment.py | python | RemoteDeviceEnvironment._RevokeAccessToken | (self) | Destroys access token for remote device service. | Destroys access token for remote device service. | [
"Destroys",
"access",
"token",
"for",
"remote",
"device",
"service",
"."
] | def _RevokeAccessToken(self):
"""Destroys access token for remote device service."""
logging.info('Revoking remote service access token')
with appurify_sanitized.SanitizeLogging(self._verbose_count,
logging.WARNING):
revoke_token_results = appurify_sanitized.api.access_token_revoke(
self._access_token)
remote_device_helper.TestHttpResponse(revoke_token_results,
'Unable to revoke access token.') | [
"def",
"_RevokeAccessToken",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Revoking remote service access token'",
")",
"with",
"appurify_sanitized",
".",
"SanitizeLogging",
"(",
"self",
".",
"_verbose_count",
",",
"logging",
".",
"WARNING",
")",
":",
"rev... | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/build/android/pylib/remote/device/remote_device_environment.py#L232-L240 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/imaplib.py | python | IMAP4.uid | (self, command, *args) | return self._untagged_response(typ, dat, name) | Execute "command arg ..." with messages identified by UID,
rather than message number.
(typ, [data]) = <instance>.uid(command, arg1, arg2, ...)
Returns response appropriate to 'command'. | Execute "command arg ..." with messages identified by UID,
rather than message number. | [
"Execute",
"command",
"arg",
"...",
"with",
"messages",
"identified",
"by",
"UID",
"rather",
"than",
"message",
"number",
"."
] | def uid(self, command, *args):
"""Execute "command arg ..." with messages identified by UID,
rather than message number.
(typ, [data]) = <instance>.uid(command, arg1, arg2, ...)
Returns response appropriate to 'command'.
"""
command = command.upper()
if not command in Commands:
raise self.error("Unknown IMAP4 UID command: %s" % command)
if self.state not in Commands[command]:
raise self.error("command %s illegal in state %s, "
"only allowed in states %s" %
(command, self.state,
', '.join(Commands[command])))
name = 'UID'
typ, dat = self._simple_command(name, command, *args)
if command in ('SEARCH', 'SORT', 'THREAD'):
name = command
else:
name = 'FETCH'
return self._untagged_response(typ, dat, name) | [
"def",
"uid",
"(",
"self",
",",
"command",
",",
"*",
"args",
")",
":",
"command",
"=",
"command",
".",
"upper",
"(",
")",
"if",
"not",
"command",
"in",
"Commands",
":",
"raise",
"self",
".",
"error",
"(",
"\"Unknown IMAP4 UID command: %s\"",
"%",
"comman... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/imaplib.py#L873-L895 | |
zhaoweicai/cascade-rcnn | 2252f46158ea6555868ca6fa5c221ea71d9b5e6c | scripts/cpp_lint.py | python | FileInfo.NoExtension | (self) | return '/'.join(self.Split()[0:2]) | File has no source file extension. | File has no source file extension. | [
"File",
"has",
"no",
"source",
"file",
"extension",
"."
] | def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2]) | [
"def",
"NoExtension",
"(",
"self",
")",
":",
"return",
"'/'",
".",
"join",
"(",
"self",
".",
"Split",
"(",
")",
"[",
"0",
":",
"2",
"]",
")"
] | https://github.com/zhaoweicai/cascade-rcnn/blob/2252f46158ea6555868ca6fa5c221ea71d9b5e6c/scripts/cpp_lint.py#L956-L958 | |
msitt/blpapi-python | bebcf43668c9e5f5467b1f685f9baebbfc45bc87 | src/blpapi/eventdispatcher.py | python | EventDispatcher.start | (self) | return internals.blpapi_EventDispatcher_start(self.__handle) | Start generating callbacks for events from sessions associated with
this :class:`EventDispatcher`. | Start generating callbacks for events from sessions associated with
this :class:`EventDispatcher`. | [
"Start",
"generating",
"callbacks",
"for",
"events",
"from",
"sessions",
"associated",
"with",
"this",
":",
"class",
":",
"EventDispatcher",
"."
] | def start(self):
"""Start generating callbacks for events from sessions associated with
this :class:`EventDispatcher`.
"""
return internals.blpapi_EventDispatcher_start(self.__handle) | [
"def",
"start",
"(",
"self",
")",
":",
"return",
"internals",
".",
"blpapi_EventDispatcher_start",
"(",
"self",
".",
"__handle",
")"
] | https://github.com/msitt/blpapi-python/blob/bebcf43668c9e5f5467b1f685f9baebbfc45bc87/src/blpapi/eventdispatcher.py#L44-L49 | |
domino-team/openwrt-cc | 8b181297c34d14d3ca521cc9f31430d561dbc688 | package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py | python | ParallelState.LoadTargetBuildFileCallback | (self, result) | Handle the results of running LoadTargetBuildFile in another process. | Handle the results of running LoadTargetBuildFile in another process. | [
"Handle",
"the",
"results",
"of",
"running",
"LoadTargetBuildFile",
"in",
"another",
"process",
"."
] | def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release() | [
"def",
"LoadTargetBuildFileCallback",
"(",
"self",
",",
"result",
")",
":",
"self",
".",
"condition",
".",
"acquire",
"(",
")",
"if",
"not",
"result",
":",
"self",
".",
"error",
"=",
"True",
"self",
".",
"condition",
".",
"notify",
"(",
")",
"self",
".... | https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py#L552-L570 | ||
papyrussolution/OpenPapyrus | bbfb5ec2ea2109b8e2f125edd838e12eaf7b8b91 | Src/OSF/LIBXML/python/libxml.py | python | SAXCallback.startElement | (self, tag, attrs) | called at the start of every element, tag is the name of
the element, attrs is a dictionary of the element's attributes | called at the start of every element, tag is the name of
the element, attrs is a dictionary of the element's attributes | [
"called",
"at",
"the",
"start",
"of",
"every",
"element",
"tag",
"is",
"the",
"name",
"of",
"the",
"element",
"attrs",
"is",
"a",
"dictionary",
"of",
"the",
"element",
"s",
"attributes"
] | def startElement(self, tag, attrs):
"""called at the start of every element, tag is the name of
the element, attrs is a dictionary of the element's attributes"""
pass | [
"def",
"startElement",
"(",
"self",
",",
"tag",
",",
"attrs",
")",
":",
"pass"
] | https://github.com/papyrussolution/OpenPapyrus/blob/bbfb5ec2ea2109b8e2f125edd838e12eaf7b8b91/Src/OSF/LIBXML/python/libxml.py#L172-L175 | ||
naver/sling | 5671cd445a2caae0b4dd0332299e4cfede05062c | webkit/Source/JavaScriptCore/disassembler/udis86/ud_opcode.py | python | UdOpcodeTables.newTable | (self, typ) | return tbl | Create a new opcode table of a give type `typ`. | Create a new opcode table of a give type `typ`. | [
"Create",
"a",
"new",
"opcode",
"table",
"of",
"a",
"give",
"type",
"typ",
"."
] | def newTable(self, typ):
"""Create a new opcode table of a give type `typ`. """
tbl = UdOpcodeTable(typ)
self._tables.append(tbl)
return tbl | [
"def",
"newTable",
"(",
"self",
",",
"typ",
")",
":",
"tbl",
"=",
"UdOpcodeTable",
"(",
"typ",
")",
"self",
".",
"_tables",
".",
"append",
"(",
"tbl",
")",
"return",
"tbl"
] | https://github.com/naver/sling/blob/5671cd445a2caae0b4dd0332299e4cfede05062c/webkit/Source/JavaScriptCore/disassembler/udis86/ud_opcode.py#L262-L266 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/s3transfer/processpool.py | python | TransferMonitor.notify_exception | (self, transfer_id, exception) | Notify an exception was encountered for a transfer
:param transfer_id: Unique identifier for the transfer
:param exception: The exception encountered for that transfer | Notify an exception was encountered for a transfer | [
"Notify",
"an",
"exception",
"was",
"encountered",
"for",
"a",
"transfer"
] | def notify_exception(self, transfer_id, exception):
"""Notify an exception was encountered for a transfer
:param transfer_id: Unique identifier for the transfer
:param exception: The exception encountered for that transfer
"""
# TODO: Not all exceptions are pickleable so if we are running
# this in a multiprocessing.BaseManager we will want to
# make sure to update this signature to ensure pickleability of the
# arguments or have the ProxyObject do the serialization.
self._transfer_states[transfer_id].exception = exception | [
"def",
"notify_exception",
"(",
"self",
",",
"transfer_id",
",",
"exception",
")",
":",
"# TODO: Not all exceptions are pickleable so if we are running",
"# this in a multiprocessing.BaseManager we will want to",
"# make sure to update this signature to ensure pickleability of the",
"# arg... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/s3transfer/processpool.py#L620-L630 | ||
OPAE/opae-sdk | 221124343c8275243a249eb72d69e0ea2d568d1b | python/opae.admin/opae/admin/tools/fpgaotsu.py | python | otsu_updater.write | (self, obj, mtd_dev) | return True | Write the flash range described in obj.
Args:
obj: an object for one of the parsed "flash" sections
from the manifest.
mtd_dev: an mtd object for the open flash device. | Write the flash range described in obj. | [
"Write",
"the",
"flash",
"range",
"described",
"in",
"obj",
"."
] | def write(self, obj, mtd_dev):
"""Write the flash range described in obj.
Args:
obj: an object for one of the parsed "flash" sections
from the manifest.
mtd_dev: an mtd object for the open flash device.
"""
if obj.get('filename') is not None:
seek = 0 if obj.get('seek') is None else to_int(obj['seek'])[1]
start = to_int(obj['start'])[1]
end = to_int(obj['end'])[1]
filename = os.path.join(self._fw_dir, obj['filename'])
verify = obj.get('verify', False)
with open(filename, 'rb') as infile:
LOG.info('Writing %s@0x%x for %d bytes (%s)',
obj['type'], start, (end + 1) - start,
os.path.basename(filename))
infile.seek(seek)
if infile.tell() != seek:
raise IOError('failed to seek in input file %s: 0x%x' %
(filename, seek))
if min([l.level for l in LOG.handlers]) < logging.INFO:
prog = LOG.debug
else:
prog = sys.stdout
mtd_dev.copy_from(infile,
(end + 1) - start,
start,
progress=prog,
chunked=self._chunk_size)
if verify:
return self.verify(obj, mtd_dev, infile)
return True | [
"def",
"write",
"(",
"self",
",",
"obj",
",",
"mtd_dev",
")",
":",
"if",
"obj",
".",
"get",
"(",
"'filename'",
")",
"is",
"not",
"None",
":",
"seek",
"=",
"0",
"if",
"obj",
".",
"get",
"(",
"'seek'",
")",
"is",
"None",
"else",
"to_int",
"(",
"o... | https://github.com/OPAE/opae-sdk/blob/221124343c8275243a249eb72d69e0ea2d568d1b/python/opae.admin/opae/admin/tools/fpgaotsu.py#L364-L403 | |
NVIDIA/MDL-SDK | aa9642b2546ad7b6236b5627385d882c2ed83c5d | src/mdl/jit/llvm/dist/utils/lit/lit/ShUtil.py | python | ShLexer.maybe_eat | (self, c) | return False | maybe_eat(c) - Consume the character c if it is the next character,
returning True if a character was consumed. | maybe_eat(c) - Consume the character c if it is the next character,
returning True if a character was consumed. | [
"maybe_eat",
"(",
"c",
")",
"-",
"Consume",
"the",
"character",
"c",
"if",
"it",
"is",
"the",
"next",
"character",
"returning",
"True",
"if",
"a",
"character",
"was",
"consumed",
"."
] | def maybe_eat(self, c):
"""
maybe_eat(c) - Consume the character c if it is the next character,
returning True if a character was consumed. """
if self.data[self.pos] == c:
self.pos += 1
return True
return False | [
"def",
"maybe_eat",
"(",
"self",
",",
"c",
")",
":",
"if",
"self",
".",
"data",
"[",
"self",
".",
"pos",
"]",
"==",
"c",
":",
"self",
".",
"pos",
"+=",
"1",
"return",
"True",
"return",
"False"
] | https://github.com/NVIDIA/MDL-SDK/blob/aa9642b2546ad7b6236b5627385d882c2ed83c5d/src/mdl/jit/llvm/dist/utils/lit/lit/ShUtil.py#L22-L29 | |
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/perf/metrics/rendering_stats.py | python | GetScrollInputLatencyEvents | (scroll_type, browser_process, timeline_range) | return scroll_events | Get scroll events' LatencyInfo from the browser process's trace buffer
that are within the timeline_range.
Scroll events (MouseWheel, GestureScrollUpdate or JS scroll on TouchMove)
dump their LatencyInfo into trace buffer as async trace event with name
"InputLatency". The trace event has a memeber 'step' containing its event
type and a memeber 'data' containing its latency history. | Get scroll events' LatencyInfo from the browser process's trace buffer
that are within the timeline_range. | [
"Get",
"scroll",
"events",
"LatencyInfo",
"from",
"the",
"browser",
"process",
"s",
"trace",
"buffer",
"that",
"are",
"within",
"the",
"timeline_range",
"."
] | def GetScrollInputLatencyEvents(scroll_type, browser_process, timeline_range):
"""Get scroll events' LatencyInfo from the browser process's trace buffer
that are within the timeline_range.
Scroll events (MouseWheel, GestureScrollUpdate or JS scroll on TouchMove)
dump their LatencyInfo into trace buffer as async trace event with name
"InputLatency". The trace event has a memeber 'step' containing its event
type and a memeber 'data' containing its latency history.
"""
scroll_events = []
if not browser_process:
return scroll_events
for event in browser_process.IterAllAsyncSlicesOfName("InputLatency"):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
for ss in event.sub_slices:
if 'step' not in ss.args:
continue
if 'data' not in ss.args:
continue
if ss.args['step'] == scroll_type:
scroll_events.append(ss)
return scroll_events | [
"def",
"GetScrollInputLatencyEvents",
"(",
"scroll_type",
",",
"browser_process",
",",
"timeline_range",
")",
":",
"scroll_events",
"=",
"[",
"]",
"if",
"not",
"browser_process",
":",
"return",
"scroll_events",
"for",
"event",
"in",
"browser_process",
".",
"IterAllA... | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/perf/metrics/rendering_stats.py#L18-L40 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/prompt-toolkit/py3/prompt_toolkit/renderer.py | python | Renderer.wait_for_cpr_responses | (self, timeout: int = 1) | Wait for a CPR response. | Wait for a CPR response. | [
"Wait",
"for",
"a",
"CPR",
"response",
"."
] | async def wait_for_cpr_responses(self, timeout: int = 1) -> None:
"""
Wait for a CPR response.
"""
cpr_futures = list(self._waiting_for_cpr_futures) # Make copy.
# When there are no CPRs in the queue. Don't do anything.
if not cpr_futures or self.cpr_support == CPR_Support.NOT_SUPPORTED:
return None
async def wait_for_responses() -> None:
for response_f in cpr_futures:
await response_f
async def wait_for_timeout() -> None:
await sleep(timeout)
# Got timeout, erase queue.
for response_f in cpr_futures:
response_f.cancel()
self._waiting_for_cpr_futures = deque()
tasks = {
ensure_future(wait_for_responses()),
ensure_future(wait_for_timeout()),
}
_, pending = await wait(tasks, return_when=FIRST_COMPLETED)
for task in pending:
task.cancel() | [
"async",
"def",
"wait_for_cpr_responses",
"(",
"self",
",",
"timeout",
":",
"int",
"=",
"1",
")",
"->",
"None",
":",
"cpr_futures",
"=",
"list",
"(",
"self",
".",
"_waiting_for_cpr_futures",
")",
"# Make copy.",
"# When there are no CPRs in the queue. Don't do anythin... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py3/prompt_toolkit/renderer.py#L553-L581 | ||
dmlc/treelite | df56babb6a4a2d7c29d719c28ce53acfa7dbab3c | python/treelite/contrib/__init__.py | python | generate_cmakelists | (dirpath, options=None) | Generate a CMakeLists.txt for a given directory of headers and sources. The
resulting CMakeLists.txt will be stored in the directory. This function is useful
for deploying a model on a different machine.
Parameters
----------
dirpath : :py:class:`str <python:str>`
directory containing the header and source files previously generated
by :py:meth:`Model.compile`. The directory must contain recipe.json
which specifies build dependencies.
options : :py:class:`list <python:list>` of :py:class:`str <python:str>`, \
optional
Additional options to pass to toolchain | Generate a CMakeLists.txt for a given directory of headers and sources. The
resulting CMakeLists.txt will be stored in the directory. This function is useful
for deploying a model on a different machine. | [
"Generate",
"a",
"CMakeLists",
".",
"txt",
"for",
"a",
"given",
"directory",
"of",
"headers",
"and",
"sources",
".",
"The",
"resulting",
"CMakeLists",
".",
"txt",
"will",
"be",
"stored",
"in",
"the",
"directory",
".",
"This",
"function",
"is",
"useful",
"f... | def generate_cmakelists(dirpath, options=None):
"""
Generate a CMakeLists.txt for a given directory of headers and sources. The
resulting CMakeLists.txt will be stored in the directory. This function is useful
for deploying a model on a different machine.
Parameters
----------
dirpath : :py:class:`str <python:str>`
directory containing the header and source files previously generated
by :py:meth:`Model.compile`. The directory must contain recipe.json
which specifies build dependencies.
options : :py:class:`list <python:list>` of :py:class:`str <python:str>`, \
optional
Additional options to pass to toolchain
"""
if not os.path.isdir(dirpath):
raise TreeliteError(f'Directory {dirpath} does not exist')
try:
with open(os.path.join(dirpath, 'recipe.json'), 'r', encoding='UTF-8') as f:
recipe = json.load(f)
except IOError as e:
raise TreeliteError('Failed to open recipe.json') from e
if 'sources' not in recipe or 'target' not in recipe:
raise TreeliteError('Malformed recipe.json')
if options is not None:
try:
_ = iter(options)
options = [str(x) for x in options]
except TypeError as e:
raise TreeliteError('options must be a list of string') from e
else:
options = []
target = recipe['target']
sources = ' '.join([x['name'] + '.c' for x in recipe['sources']])
options = ' '.join(options)
with open(os.path.join(dirpath, 'CMakeLists.txt'), 'w', encoding='UTF-8') as f:
print('cmake_minimum_required(VERSION 3.13)', file=f)
print('project(mushroom LANGUAGES C)\n', file=f)
print(f'add_library({target} SHARED)', file=f)
print(f'target_sources({target} PRIVATE header.h {sources})', file=f)
print(f'target_compile_options({target} PRIVATE {options})', file=f)
print(f'target_include_directories({target} PRIVATE "${{PROJECT_BINARY_DIR}}")', file=f)
print(f'set_target_properties({target} PROPERTIES', file=f)
print('''POSITION_INDEPENDENT_CODE ON
C_STANDARD 99
C_STANDARD_REQUIRED ON
PREFIX ""
RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}"
RUNTIME_OUTPUT_DIRECTORY_DEBUG "${PROJECT_BINARY_DIR}"
RUNTIME_OUTPUT_DIRECTORY_RELEASE "${PROJECT_BINARY_DIR}"
RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO "${PROJECT_BINARY_DIR}"
RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL "${PROJECT_BINARY_DIR}"
LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}"
LIBRARY_OUTPUT_DIRECTORY_DEBUG "${PROJECT_BINARY_DIR}"
LIBRARY_OUTPUT_DIRECTORY_RELEASE "${PROJECT_BINARY_DIR}"
LIBRARY_OUTPUT_DIRECTORY_RELWITHDEBINFO "${PROJECT_BINARY_DIR}"
LIBRARY_OUTPUT_DIRECTORY_MINSIZEREL "${PROJECT_BINARY_DIR}")
''', file=f) | [
"def",
"generate_cmakelists",
"(",
"dirpath",
",",
"options",
"=",
"None",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dirpath",
")",
":",
"raise",
"TreeliteError",
"(",
"f'Directory {dirpath} does not exist'",
")",
"try",
":",
"with",
"ope... | https://github.com/dmlc/treelite/blob/df56babb6a4a2d7c29d719c28ce53acfa7dbab3c/python/treelite/contrib/__init__.py#L119-L179 | ||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/tpu/python/tpu/tpu_feed.py | python | InfeedQueue.tuple_shapes | (self) | return self._tuple_shapes | Returns the shapes of the InfeedQueue tuple elements. | Returns the shapes of the InfeedQueue tuple elements. | [
"Returns",
"the",
"shapes",
"of",
"the",
"InfeedQueue",
"tuple",
"elements",
"."
] | def tuple_shapes(self):
"""Returns the shapes of the InfeedQueue tuple elements."""
return self._tuple_shapes | [
"def",
"tuple_shapes",
"(",
"self",
")",
":",
"return",
"self",
".",
"_tuple_shapes"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/tpu/python/tpu/tpu_feed.py#L166-L168 | |
cocos-creator/engine-native | 984c4c9f5838253313b44ccd429bd8fac4ec8a6a | tools/bindings-generator/clang/cindex.py | python | CursorKind.is_translation_unit | (self) | return conf.lib.clang_isTranslationUnit(self) | Test if this is a translation unit kind. | Test if this is a translation unit kind. | [
"Test",
"if",
"this",
"is",
"a",
"translation",
"unit",
"kind",
"."
] | def is_translation_unit(self):
"""Test if this is a translation unit kind."""
return conf.lib.clang_isTranslationUnit(self) | [
"def",
"is_translation_unit",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_isTranslationUnit",
"(",
"self",
")"
] | https://github.com/cocos-creator/engine-native/blob/984c4c9f5838253313b44ccd429bd8fac4ec8a6a/tools/bindings-generator/clang/cindex.py#L695-L697 | |
forkineye/ESPixelStick | 22926f1c0d1131f1369fc7cad405689a095ae3cb | dist/bin/pyserial/serial/serialwin32.py | python | Serial._update_rts_state | (self) | Set terminal status line: Request To Send | Set terminal status line: Request To Send | [
"Set",
"terminal",
"status",
"line",
":",
"Request",
"To",
"Send"
] | def _update_rts_state(self):
"""Set terminal status line: Request To Send"""
if self._rts_state:
win32.EscapeCommFunction(self._port_handle, win32.SETRTS)
else:
win32.EscapeCommFunction(self._port_handle, win32.CLRRTS) | [
"def",
"_update_rts_state",
"(",
"self",
")",
":",
"if",
"self",
".",
"_rts_state",
":",
"win32",
".",
"EscapeCommFunction",
"(",
"self",
".",
"_port_handle",
",",
"win32",
".",
"SETRTS",
")",
"else",
":",
"win32",
".",
"EscapeCommFunction",
"(",
"self",
"... | https://github.com/forkineye/ESPixelStick/blob/22926f1c0d1131f1369fc7cad405689a095ae3cb/dist/bin/pyserial/serial/serialwin32.py#L373-L378 | ||
p4lang/p4c | 3272e79369f20813cc1a555a5eb26f44432f84a4 | tools/stf/stf_lexer.py | python | STFLexer.reset_lineno | (self) | Resets the internal line number counter of the lexer. | Resets the internal line number counter of the lexer. | [
"Resets",
"the",
"internal",
"line",
"number",
"counter",
"of",
"the",
"lexer",
"."
] | def reset_lineno(self):
""" Resets the internal line number counter of the lexer.
"""
self.lexer.lineno = 1
self.lexer.colno = 1 | [
"def",
"reset_lineno",
"(",
"self",
")",
":",
"self",
".",
"lexer",
".",
"lineno",
"=",
"1",
"self",
".",
"lexer",
".",
"colno",
"=",
"1"
] | https://github.com/p4lang/p4c/blob/3272e79369f20813cc1a555a5eb26f44432f84a4/tools/stf/stf_lexer.py#L32-L36 | ||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/basic_fitting/basic_fitting_model.py | python | BasicFittingModel._get_new_exclude_start_and_end_x_for | (self, dataset_index: int) | return check_exclude_start_and_end_x_is_valid(start_x, end_x, new_exclude_start_x, new_exclude_end_x) | Gets the new exclude start and end X to use for a specific dataset. It tries to use the current data. | Gets the new exclude start and end X to use for a specific dataset. It tries to use the current data. | [
"Gets",
"the",
"new",
"exclude",
"start",
"and",
"end",
"X",
"to",
"use",
"for",
"a",
"specific",
"dataset",
".",
"It",
"tries",
"to",
"use",
"the",
"current",
"data",
"."
] | def _get_new_exclude_start_and_end_x_for(self, dataset_index: int) -> tuple:
"""Gets the new exclude start and end X to use for a specific dataset. It tries to use the current data."""
start_x, end_x = self.fitting_context.start_xs[dataset_index], self.fitting_context.end_xs[dataset_index]
exclude_start_xs, exclude_end_xs = self.fitting_context.exclude_start_xs, self.fitting_context.exclude_end_xs
new_exclude_start_x = exclude_start_xs[dataset_index] if dataset_index < len(exclude_start_xs) else end_x
new_exclude_end_x = exclude_end_xs[dataset_index] if dataset_index < len(exclude_end_xs) else end_x
return check_exclude_start_and_end_x_is_valid(start_x, end_x, new_exclude_start_x, new_exclude_end_x) | [
"def",
"_get_new_exclude_start_and_end_x_for",
"(",
"self",
",",
"dataset_index",
":",
"int",
")",
"->",
"tuple",
":",
"start_x",
",",
"end_x",
"=",
"self",
".",
"fitting_context",
".",
"start_xs",
"[",
"dataset_index",
"]",
",",
"self",
".",
"fitting_context",
... | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/basic_fitting/basic_fitting_model.py#L589-L597 | |
bingwin/MicroChat | 81d9a71a212c1cbca5bba497ec42659a7d25dccf | mars/lint/cpplint.py | python | FindNextMultiLineCommentEnd | (lines, lineix) | return len(lines) | We are inside a comment, find the end marker. | We are inside a comment, find the end marker. | [
"We",
"are",
"inside",
"a",
"comment",
"find",
"the",
"end",
"marker",
"."
] | def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines) | [
"def",
"FindNextMultiLineCommentEnd",
"(",
"lines",
",",
"lineix",
")",
":",
"while",
"lineix",
"<",
"len",
"(",
"lines",
")",
":",
"if",
"lines",
"[",
"lineix",
"]",
".",
"strip",
"(",
")",
".",
"endswith",
"(",
"'*/'",
")",
":",
"return",
"lineix",
... | https://github.com/bingwin/MicroChat/blob/81d9a71a212c1cbca5bba497ec42659a7d25dccf/mars/lint/cpplint.py#L1246-L1252 | |
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/training/python/training/python_input.py | python | _process_yielded_dict | (feature_values, keys, features, dtypes, shapes) | return processed_values | Read feature_values from the generator and emit a proper output dict. | Read feature_values from the generator and emit a proper output dict. | [
"Read",
"feature_values",
"from",
"the",
"generator",
"and",
"emit",
"a",
"proper",
"output",
"dict",
"."
] | def _process_yielded_dict(feature_values, keys, features, dtypes, shapes):
"""Read feature_values from the generator and emit a proper output dict."""
if not isinstance(feature_values, dict):
raise TypeError("generator must return dict, saw: %s" % feature_values)
processed_values = {}
for pk in keys:
if feature_values.get(pk, None) is not None:
processed_values[pk] = np.asarray(
feature_values[pk], dtype=dtypes[pk].as_numpy_dtype)
check_shape = tensor_shape.TensorShape(processed_values[pk].shape)
if not shapes[pk].is_compatible_with(check_shape):
raise ValueError(
"Feature '%s' has shape %s that is incompatible with declared "
"shape: %s" % (pk, shapes[pk], check_shape))
continue
if isinstance(features[pk], parsing_ops.FixedLenFeature):
if features[pk].default_value is not None:
processed_values[pk] = np.asarray(
features[pk].default_value, dtype=dtypes[pk].as_numpy_dtype)
elif isinstance(features[pk], parsing_ops.FixedLenSequenceFeature):
processed_values[pk] = np.empty(
[0] + features[pk].shape.aslist(), dtype=dtypes[pk].as_numpy_dtype)
else:
raise ValueError(
"Expected generator to return key '%s' with non-empty value" % pk)
return processed_values | [
"def",
"_process_yielded_dict",
"(",
"feature_values",
",",
"keys",
",",
"features",
",",
"dtypes",
",",
"shapes",
")",
":",
"if",
"not",
"isinstance",
"(",
"feature_values",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"generator must return dict, saw: %s\... | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/training/python/training/python_input.py#L31-L58 | |
psi4/psi4 | be533f7f426b6ccc263904e55122899b16663395 | psi4/driver/qcdb/libmintspointgrp.py | python | SymmetryOperation.c2_y | (self) | Set equal to C2 about the y axis | Set equal to C2 about the y axis | [
"Set",
"equal",
"to",
"C2",
"about",
"the",
"y",
"axis"
] | def c2_y(self):
"""Set equal to C2 about the y axis"""
self.i()
self.d[1][1] = 1.0
self.bits = SymmOps['C2_y'] | [
"def",
"c2_y",
"(",
"self",
")",
":",
"self",
".",
"i",
"(",
")",
"self",
".",
"d",
"[",
"1",
"]",
"[",
"1",
"]",
"=",
"1.0",
"self",
".",
"bits",
"=",
"SymmOps",
"[",
"'C2_y'",
"]"
] | https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/qcdb/libmintspointgrp.py#L253-L257 | ||
synfig/synfig | a5ec91db5b751dc12e4400ccfb5c063fd6d2d928 | synfig-studio/plugins/lottie-exporter/common/Bline.py | python | Bline.get_layer | (self) | return self.parent | Recursively find the layer of this bline | Recursively find the layer of this bline | [
"Recursively",
"find",
"the",
"layer",
"of",
"this",
"bline"
] | def get_layer(self):
"""
Recursively find the layer of this bline
"""
if isinstance(self.parent, common.Layer.Layer):
return self.parent
return self.parent | [
"def",
"get_layer",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"parent",
",",
"common",
".",
"Layer",
".",
"Layer",
")",
":",
"return",
"self",
".",
"parent",
"return",
"self",
".",
"parent"
] | https://github.com/synfig/synfig/blob/a5ec91db5b751dc12e4400ccfb5c063fd6d2d928/synfig-studio/plugins/lottie-exporter/common/Bline.py#L60-L66 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/ultimatelistctrl.py | python | UltimateListMainWindow.HandleHyperLink | (self, item) | Handles the hyperlink items, sending the ``EVT_LIST_ITEM_HYPERLINK`` event.
:param `item`: an instance of :class:`UltimateListItem`. | Handles the hyperlink items, sending the ``EVT_LIST_ITEM_HYPERLINK`` event. | [
"Handles",
"the",
"hyperlink",
"items",
"sending",
"the",
"EVT_LIST_ITEM_HYPERLINK",
"event",
"."
] | def HandleHyperLink(self, item):
"""
Handles the hyperlink items, sending the ``EVT_LIST_ITEM_HYPERLINK`` event.
:param `item`: an instance of :class:`UltimateListItem`.
"""
if self.IsItemHyperText(item):
self.SendNotify(item._itemId, wxEVT_COMMAND_LIST_ITEM_HYPERLINK) | [
"def",
"HandleHyperLink",
"(",
"self",
",",
"item",
")",
":",
"if",
"self",
".",
"IsItemHyperText",
"(",
"item",
")",
":",
"self",
".",
"SendNotify",
"(",
"item",
".",
"_itemId",
",",
"wxEVT_COMMAND_LIST_ITEM_HYPERLINK",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/ultimatelistctrl.py#L7840-L7848 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_core.py | python | ThreadEvent.GetInt | (*args, **kwargs) | return _core_.ThreadEvent_GetInt(*args, **kwargs) | GetInt(self) -> int | GetInt(self) -> int | [
"GetInt",
"(",
"self",
")",
"-",
">",
"int"
] | def GetInt(*args, **kwargs):
"""GetInt(self) -> int"""
return _core_.ThreadEvent_GetInt(*args, **kwargs) | [
"def",
"GetInt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"ThreadEvent_GetInt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L5386-L5388 | |
sdhash/sdhash | b9eff63e4e5867e910f41fd69032bbb1c94a2a5e | sdhash-ui/cherrypy/_cpreqbody.py | python | SizedReader.readlines | (self, sizehint=None) | return lines | Read lines from the request body and return them. | Read lines from the request body and return them. | [
"Read",
"lines",
"from",
"the",
"request",
"body",
"and",
"return",
"them",
"."
] | def readlines(self, sizehint=None):
"""Read lines from the request body and return them."""
if self.length is not None:
if sizehint is None:
sizehint = self.length - self.bytes_read
else:
sizehint = min(sizehint, self.length - self.bytes_read)
lines = []
seen = 0
while True:
line = self.readline()
if not line:
break
lines.append(line)
seen += len(line)
if seen >= sizehint:
break
return lines | [
"def",
"readlines",
"(",
"self",
",",
"sizehint",
"=",
"None",
")",
":",
"if",
"self",
".",
"length",
"is",
"not",
"None",
":",
"if",
"sizehint",
"is",
"None",
":",
"sizehint",
"=",
"self",
".",
"length",
"-",
"self",
".",
"bytes_read",
"else",
":",
... | https://github.com/sdhash/sdhash/blob/b9eff63e4e5867e910f41fd69032bbb1c94a2a5e/sdhash-ui/cherrypy/_cpreqbody.py#L838-L856 | |
IntelRealSense/librealsense | c94410a420b74e5fb6a414bd12215c05ddd82b69 | wrappers/python/examples/box_dimensioner_multicam/helper_functions.py | python | convert_depth_pixel_to_metric_coordinate | (depth, pixel_x, pixel_y, camera_intrinsics) | return X, Y, depth | Convert the depth and image point information to metric coordinates
Parameters:
-----------
depth : double
The depth value of the image point
pixel_x : double
The x value of the image coordinate
pixel_y : double
The y value of the image coordinate
camera_intrinsics : The intrinsic values of the imager in whose coordinate system the depth_frame is computed
Return:
----------
X : double
The x value in meters
Y : double
The y value in meters
Z : double
The z value in meters | Convert the depth and image point information to metric coordinates | [
"Convert",
"the",
"depth",
"and",
"image",
"point",
"information",
"to",
"metric",
"coordinates"
] | def convert_depth_pixel_to_metric_coordinate(depth, pixel_x, pixel_y, camera_intrinsics):
"""
Convert the depth and image point information to metric coordinates
Parameters:
-----------
depth : double
The depth value of the image point
pixel_x : double
The x value of the image coordinate
pixel_y : double
The y value of the image coordinate
camera_intrinsics : The intrinsic values of the imager in whose coordinate system the depth_frame is computed
Return:
----------
X : double
The x value in meters
Y : double
The y value in meters
Z : double
The z value in meters
"""
X = (pixel_x - camera_intrinsics.ppx)/camera_intrinsics.fx *depth
Y = (pixel_y - camera_intrinsics.ppy)/camera_intrinsics.fy *depth
return X, Y, depth | [
"def",
"convert_depth_pixel_to_metric_coordinate",
"(",
"depth",
",",
"pixel_x",
",",
"pixel_y",
",",
"camera_intrinsics",
")",
":",
"X",
"=",
"(",
"pixel_x",
"-",
"camera_intrinsics",
".",
"ppx",
")",
"/",
"camera_intrinsics",
".",
"fx",
"*",
"depth",
"Y",
"=... | https://github.com/IntelRealSense/librealsense/blob/c94410a420b74e5fb6a414bd12215c05ddd82b69/wrappers/python/examples/box_dimensioner_multicam/helper_functions.py#L121-L147 | |
google/shaka-packager | e1b0c7c45431327fd3ce193514a5407d07b39b22 | packager/third_party/protobuf/python/mox.py | python | UnorderedGroup.MethodCalled | (self, mock_method) | Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group. | Remove a method call from the group. | [
"Remove",
"a",
"method",
"call",
"from",
"the",
"group",
"."
] | def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self) | [
"def",
"MethodCalled",
"(",
"self",
",",
"mock_method",
")",
":",
"# Check to see if this method exists, and if so, remove it from the set",
"# and return it.",
"for",
"method",
"in",
"self",
".",
"_methods",
":",
"if",
"method",
"==",
"mock_method",
":",
"# Remove the ca... | https://github.com/google/shaka-packager/blob/e1b0c7c45431327fd3ce193514a5407d07b39b22/packager/third_party/protobuf/python/mox.py#L1223-L1255 | ||
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/_ctypes/ndarray.py | python | NDArrayBase.__init__ | (self, handle, writable=True) | initialize a new NDArray
Parameters
----------
handle : NDArrayHandle
NDArray handle of C API | initialize a new NDArray | [
"initialize",
"a",
"new",
"NDArray"
] | def __init__(self, handle, writable=True):
"""initialize a new NDArray
Parameters
----------
handle : NDArrayHandle
NDArray handle of C API
"""
if handle is not None:
assert isinstance(handle, NDArrayHandle)
self.handle = handle
self.writable = writable
self._alive = True | [
"def",
"__init__",
"(",
"self",
",",
"handle",
",",
"writable",
"=",
"True",
")",
":",
"if",
"handle",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"handle",
",",
"NDArrayHandle",
")",
"self",
".",
"handle",
"=",
"handle",
"self",
".",
"writa... | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/_ctypes/ndarray.py#L36-L48 | ||
codilime/veles | e65de5a7c268129acffcdb03034efd8d256d025c | python/veles/async_conn/conn.py | python | AsyncConnection.get_bindata | (self, node, key, start, end) | Fetches a range of bindata of a given node with the given key, returns
an awaitable of bytes. | Fetches a range of bindata of a given node with the given key, returns
an awaitable of bytes. | [
"Fetches",
"a",
"range",
"of",
"bindata",
"of",
"a",
"given",
"node",
"with",
"the",
"given",
"key",
"returns",
"an",
"awaitable",
"of",
"bytes",
"."
] | def get_bindata(self, node, key, start, end):
"""
Fetches a range of bindata of a given node with the given key, returns
an awaitable of bytes.
"""
raise NotImplementedError | [
"def",
"get_bindata",
"(",
"self",
",",
"node",
",",
"key",
",",
"start",
",",
"end",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/codilime/veles/blob/e65de5a7c268129acffcdb03034efd8d256d025c/python/veles/async_conn/conn.py#L75-L80 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/fsspec/spec.py | python | AbstractFileSystem.find | (self, path, maxdepth=None, withdirs=False, **kwargs) | List all files below path.
Like posix ``find`` command without conditions
Parameters
----------
path : str
maxdepth: int or None
If not None, the maximum number of levels to descend
withdirs: bool
Whether to include directory paths in the output. This is True
when used by glob, but users usually only want files.
kwargs are passed to ``ls``. | List all files below path. | [
"List",
"all",
"files",
"below",
"path",
"."
] | def find(self, path, maxdepth=None, withdirs=False, **kwargs):
"""List all files below path.
Like posix ``find`` command without conditions
Parameters
----------
path : str
maxdepth: int or None
If not None, the maximum number of levels to descend
withdirs: bool
Whether to include directory paths in the output. This is True
when used by glob, but users usually only want files.
kwargs are passed to ``ls``.
"""
# TODO: allow equivalent of -name parameter
path = self._strip_protocol(path)
out = dict()
detail = kwargs.pop("detail", False)
for path, dirs, files in self.walk(path, maxdepth, detail=True, **kwargs):
if withdirs:
files.update(dirs)
out.update({info["name"]: info for name, info in files.items()})
if self.isfile(path) and path not in out:
# walk works on directories, but find should also return [path]
# when path happens to be a file
out[path] = {}
names = sorted(out)
if not detail:
return names
else:
return {name: out[name] for name in names} | [
"def",
"find",
"(",
"self",
",",
"path",
",",
"maxdepth",
"=",
"None",
",",
"withdirs",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: allow equivalent of -name parameter",
"path",
"=",
"self",
".",
"_strip_protocol",
"(",
"path",
")",
"out",
"=... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/fsspec/spec.py#L375-L406 | ||
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/utils/bundled_inputs.py | python | augment_many_model_functions_with_bundled_inputs | (
model: torch.jit.ScriptModule,
inputs: Dict[Callable, Optional[Sequence[Tuple[Any, ...]]]],
_receive_inflate_expr: Optional[List[str]] = None, # For debugging.
info: Optional[Dict[Callable, List[str]]] = None, # Optional argument to provide info about the function or its inputs
skip_size_check=False,
) | Add bundled sample inputs to a model for an arbitrary list of public functions.
Models with bundled inputs can be invoked in a uniform manner by
benchmarking and code coverage tools.
Augmented models will support the following methods:
`get_all_bundled_inputs_for_<function_name>() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)`
`get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
Returns a dictionary mapping function names to a metadata dictionary.
This nested dictionary maps preset strings like:
'get_inputs_function_name' -> the name of a function attribute in this model that can be
run to get back a list of inputs corresponding to that function.
'info' -> the user provided extra information about the bundled inputs
If forward has bundled inputs then these following functions are also defined:
`get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs(): model(*inp)`
`get_num_bundled_inputs() -> int`
Equivalent to `len(model.get_all_bundled_inputs())`,
but slightly easier to call from C++.
Inputs can be specified in one of two ways:
- The model can define `_generate_bundled_inputs_for_<function_name>`.
If the user chooses this method inputs[<function>] should map to None
- The `inputs` argument to this function can be a dictionary mapping functions to a
list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_<function_name>.
The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a
list of inputs, the inner tuple is the list of args that together make up one input.
For inputs of functions that take one arg, this will be a tuple of length one. The Any, ...
is the actual data that makes up the args, e.g. a tensor.
Info is an optional parameter that maps functions to a list of strings providing extra information about that
function's bundled inputs. This could be descriptions, expected outputs, etc.
- Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']}
This function will attempt to optimize arguments so that (e.g.)
arguments like `torch.zeros(1000)` will be represented compactly.
Only top-level arguments will be optimized.
Tensors in lists or tuples will not. | Add bundled sample inputs to a model for an arbitrary list of public functions. | [
"Add",
"bundled",
"sample",
"inputs",
"to",
"a",
"model",
"for",
"an",
"arbitrary",
"list",
"of",
"public",
"functions",
"."
] | def augment_many_model_functions_with_bundled_inputs(
model: torch.jit.ScriptModule,
inputs: Dict[Callable, Optional[Sequence[Tuple[Any, ...]]]],
_receive_inflate_expr: Optional[List[str]] = None, # For debugging.
info: Optional[Dict[Callable, List[str]]] = None, # Optional argument to provide info about the function or its inputs
skip_size_check=False,
) -> None:
"""Add bundled sample inputs to a model for an arbitrary list of public functions.
Models with bundled inputs can be invoked in a uniform manner by
benchmarking and code coverage tools.
Augmented models will support the following methods:
`get_all_bundled_inputs_for_<function_name>() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)`
`get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
Returns a dictionary mapping function names to a metadata dictionary.
This nested dictionary maps preset strings like:
'get_inputs_function_name' -> the name of a function attribute in this model that can be
run to get back a list of inputs corresponding to that function.
'info' -> the user provided extra information about the bundled inputs
If forward has bundled inputs then these following functions are also defined:
`get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs(): model(*inp)`
`get_num_bundled_inputs() -> int`
Equivalent to `len(model.get_all_bundled_inputs())`,
but slightly easier to call from C++.
Inputs can be specified in one of two ways:
- The model can define `_generate_bundled_inputs_for_<function_name>`.
If the user chooses this method inputs[<function>] should map to None
- The `inputs` argument to this function can be a dictionary mapping functions to a
list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_<function_name>.
The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a
list of inputs, the inner tuple is the list of args that together make up one input.
For inputs of functions that take one arg, this will be a tuple of length one. The Any, ...
is the actual data that makes up the args, e.g. a tensor.
Info is an optional parameter that maps functions to a list of strings providing extra information about that
function's bundled inputs. This could be descriptions, expected outputs, etc.
- Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']}
This function will attempt to optimize arguments so that (e.g.)
arguments like `torch.zeros(1000)` will be represented compactly.
Only top-level arguments will be optimized.
Tensors in lists or tuples will not.
"""
if not isinstance(model, torch.jit.ScriptModule):
raise Exception("Only ScriptModule is supported.")
if not inputs:
raise Exception("Please provide inputs for at least 1 function")
if hasattr(model, "get_all_bundled_inputs") or hasattr(model, "get_bundled_inputs_functions_and_info"):
raise Exception(
"Models can only be augmented with bundled inputs once. "
"This Model seems to have already been augmented with "
"bundled inputs. Please start afresh with one that "
"doesn't have bundled inputs.",
)
get_bundled_inputs_functions_and_info_template = ""
for function, input_list in inputs.items():
if hasattr(function, "__name__"):
function_name = function.__name__
else:
if hasattr(function, "name"):
function_name = function.name # type: ignore[attr-defined]
else:
raise Exception(
'At least one of your functions has no attribute name please ensure all have one. m.foo.name = "foo"')
if input_list is not None and not isinstance(input_list, Sequence):
raise TypeError("Error inputs for function {0} is not a Sequence".format(function_name))
function_arg_types = [arg.type for arg in function.schema.arguments[1:]] # type: ignore[attr-defined]
deflated_inputs_type: ListType = ListType(TupleType(function_arg_types))
model._c._register_attribute("_bundled_inputs_deflated_{name}".format(name=function_name), deflated_inputs_type, [])
if hasattr(model, "_generate_bundled_inputs_for_" + function_name):
if input_list is not None:
raise Exception(
"inputs[{name}] is not None, but _generate_bundled_inputs_for_{name} is already defined".format(
name=function_name
)
)
# Model author already defined _generate_bundled_inputs_for_<function_name>.
elif input_list is None or len(input_list) == 0:
raise Exception(
"inputs for {name} must be specified if _generate_bundled_inputs_for_{name} is not already defined".format(
name=function_name,
)
)
else:
# Iterate over the inputs and args in each input.
# Accumulate `deflated_inputs` as (possibly) compressed values
# and `parts` to be joined into the expression that unpacks them.
deflated_inputs = []
parts = []
for inp_idx, args in enumerate(input_list):
if not isinstance(args, Tuple) and not isinstance(args, List): # type: ignore[arg-type]
raise TypeError(
"Error bundled input for function {0} idx: {1} is not a Tuple or a List".format(function_name, inp_idx)
)
deflated_args = []
parts.append("(")
for arg_idx, arg in enumerate(args):
inflate_helper_fn_name = _get_inflate_helper_fn_name(arg_idx, inp_idx, function_name)
deflated, inflater, helper_definition = _inflate_expr(
arg,
f"deflated[{inp_idx}][{arg_idx}]",
inflate_helper_fn_name,
skip_size_check=skip_size_check,
)
deflated_args.append(deflated)
parts.append(f" {inflater},")
if helper_definition:
model.define(textwrap.dedent(helper_definition))
deflated_inputs.append(tuple(deflated_args))
parts.append("),")
parts.append("")
expr = "\n".join(parts)
# Back-channel return this expr for debugging.
if _receive_inflate_expr is not None:
_receive_inflate_expr.append(expr)
setattr(model, "_bundled_inputs_deflated_{name}".format(name=function_name), deflated_inputs)
definition = textwrap.dedent("""
def _generate_bundled_inputs_for_{name}(self):
deflated = self._bundled_inputs_deflated_{name}
return [
{expr}
]
""").format(expr=expr, name=function_name)
model.define(definition)
# Define get_all_bundled_inputs_for_<function_name> that caches the generated inputs.
model.define(textwrap.dedent("""
def get_all_bundled_inputs_for_{name}(self):
all_inputs = self._generate_bundled_inputs_for_{name}()
assert all_inputs is not None
return all_inputs
""").format(name=function_name))
# Add to the high level helper methods
inputs_info = repr(info[function]) if info and function in info else '[]'
get_bundled_inputs_functions_and_info_template += """
temp_dict : Dict[str,List[str]] = {{}}
info: List[str] = {info}
temp_dict['info'] = info
temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{name}']
all_inputs['{name}'] = temp_dict
""".format(
name=function_name,
info=inputs_info,
)
# To ensure backwards compatibility and a streamlined api for forward these wrappers are provided
if function_name == 'forward':
model.define(textwrap.dedent("""
def get_all_bundled_inputs(self):
return self.get_all_bundled_inputs_for_forward()
"""))
model.define(textwrap.dedent("""
def get_num_bundled_inputs(self):
return len(self.get_all_bundled_inputs_for_forward())
"""))
# Define some high level helper methods that act on all bundled inputs
model.define(textwrap.dedent("""
def get_bundled_inputs_functions_and_info(self):
all_inputs : Dict[str, Dict[str,List[str]]] = {{}}
{template}
return all_inputs
""".format(template=get_bundled_inputs_functions_and_info_template))) | [
"def",
"augment_many_model_functions_with_bundled_inputs",
"(",
"model",
":",
"torch",
".",
"jit",
".",
"ScriptModule",
",",
"inputs",
":",
"Dict",
"[",
"Callable",
",",
"Optional",
"[",
"Sequence",
"[",
"Tuple",
"[",
"Any",
",",
"...",
"]",
"]",
"]",
"]",
... | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/utils/bundled_inputs.py#L180-L366 | ||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/python/training/input.py | python | maybe_batch_join | (tensors_list, keep_input, batch_size, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None,
name=None) | return _batch_join(
tensors_list,
batch_size,
keep_input,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name) | Runs a list of tensors to conditionally fill a queue to create batches.
See docstring in `batch_join` for more details.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresponding value in `keep_input` is `True`. This tensor essentially acts
as a filtering mechanism.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`. | Runs a list of tensors to conditionally fill a queue to create batches. | [
"Runs",
"a",
"list",
"of",
"tensors",
"to",
"conditionally",
"fill",
"a",
"queue",
"to",
"create",
"batches",
"."
] | def maybe_batch_join(tensors_list, keep_input, batch_size, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Runs a list of tensors to conditionally fill a queue to create batches.
See docstring in `batch_join` for more details.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresponding value in `keep_input` is `True`. This tensor essentially acts
as a filtering mechanism.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
return _batch_join(
tensors_list,
batch_size,
keep_input,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name) | [
"def",
"maybe_batch_join",
"(",
"tensors_list",
",",
"keep_input",
",",
"batch_size",
",",
"capacity",
"=",
"32",
",",
"enqueue_many",
"=",
"False",
",",
"shapes",
"=",
"None",
",",
"dynamic_pad",
"=",
"False",
",",
"allow_smaller_final_batch",
"=",
"False",
"... | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/training/input.py#L1077-L1126 | |
espressomd/espresso | 7e29f9052e710fe1ebf0f5d2a8076b32921fbc6a | samples/gibbs_ensemble/gibbs_ensemble.py | python | Client.revert_remove_particle | (self) | Revert last particle remove | Revert last particle remove | [
"Revert",
"last",
"particle",
"remove"
] | def revert_remove_particle(self):
""" Revert last particle remove """
self.system.part.add(self.old_particle)
self.old_particle = None | [
"def",
"revert_remove_particle",
"(",
"self",
")",
":",
"self",
".",
"system",
".",
"part",
".",
"add",
"(",
"self",
".",
"old_particle",
")",
"self",
".",
"old_particle",
"=",
"None"
] | https://github.com/espressomd/espresso/blob/7e29f9052e710fe1ebf0f5d2a8076b32921fbc6a/samples/gibbs_ensemble/gibbs_ensemble.py#L115-L118 | ||
rsummers11/CADLab | 976ed959a0b5208bb4173127a7ef732ac73a9b6f | MULAN_universal_lesion_analysis/maskrcnn/engine/trainer.py | python | reduce_loss_dict | (loss_dict) | return reduced_losses | Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction. | Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction. | [
"Reduce",
"the",
"loss",
"dictionary",
"from",
"all",
"processes",
"so",
"that",
"process",
"with",
"rank",
"0",
"has",
"the",
"averaged",
"results",
".",
"Returns",
"a",
"dict",
"with",
"the",
"same",
"fields",
"as",
"loss_dict",
"after",
"reduction",
"."
] | def reduce_loss_dict(loss_dict):
"""
Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k, v in loss_dict.items():
loss_names.append(k)
all_losses.append(v)
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses | [
"def",
"reduce_loss_dict",
"(",
"loss_dict",
")",
":",
"world_size",
"=",
"get_world_size",
"(",
")",
"if",
"world_size",
"<",
"2",
":",
"return",
"loss_dict",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"loss_names",
"=",
"[",
"]",
"all_losses",
"=",
... | https://github.com/rsummers11/CADLab/blob/976ed959a0b5208bb4173127a7ef732ac73a9b6f/MULAN_universal_lesion_analysis/maskrcnn/engine/trainer.py#L22-L44 | |
Ardour/ardour | a63a18a3387b90c0920d9b1668d2a50bd6302b83 | tools/cstyle.py | python | CStyleChecker.line_checks | (self, line) | return | Run the style checker on provided line of text, but within the context
of how the line fits within the file. | Run the style checker on provided line of text, but within the context
of how the line fits within the file. | [
"Run",
"the",
"style",
"checker",
"on",
"provided",
"line",
"of",
"text",
"but",
"within",
"the",
"context",
"of",
"how",
"the",
"line",
"fits",
"within",
"the",
"file",
"."
] | def line_checks (self, line):
"""
Run the style checker on provided line of text, but within the context
of how the line fits within the file.
"""
indent = len (self.indent_re.search (line).group ())
if re.search ("^\s+}", line):
if not self.last_line_indent_curly and indent != self.last_line_indent:
None # self.error ("bad indent on close curly brace")
self.last_line_indent_curly = True
else:
self.last_line_indent_curly = False
# Now all the stylistic warnings regex checks.
for (check_re, msg) in self.warning_checks:
if check_re.search (line):
self.warning (msg)
# Now all the stylistic error regex checks.
for (check_re, msg) in self.error_checks:
if check_re.search (line):
self.error (msg)
if re.search ("[a-zA-Z0-9_][<>!=^/&\|]{1,2}[a-zA-Z0-9_]", line):
# ignore #include <foo.h> and C++ templates with indirection/pointer/reference operators
if not re.search (".*#include.*[a-zA-Z0-9]/[a-zA-Z]", line) and not re.search ("[a-zA-Z0-9_]>[&\*]*\s", line):
self.error ("missing space around operator")
self.last_line_indent = indent
return | [
"def",
"line_checks",
"(",
"self",
",",
"line",
")",
":",
"indent",
"=",
"len",
"(",
"self",
".",
"indent_re",
".",
"search",
"(",
"line",
")",
".",
"group",
"(",
")",
")",
"if",
"re",
".",
"search",
"(",
"\"^\\s+}\"",
",",
"line",
")",
":",
"if"... | https://github.com/Ardour/ardour/blob/a63a18a3387b90c0920d9b1668d2a50bd6302b83/tools/cstyle.py#L196-L227 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/zipfile.py | python | ZipFile.read | (self, name, pwd=None) | Return file bytes for name. | Return file bytes for name. | [
"Return",
"file",
"bytes",
"for",
"name",
"."
] | def read(self, name, pwd=None):
"""Return file bytes for name."""
with self.open(name, "r", pwd) as fp:
return fp.read() | [
"def",
"read",
"(",
"self",
",",
"name",
",",
"pwd",
"=",
"None",
")",
":",
"with",
"self",
".",
"open",
"(",
"name",
",",
"\"r\"",
",",
"pwd",
")",
"as",
"fp",
":",
"return",
"fp",
".",
"read",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/zipfile.py#L1462-L1465 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/build/waf-1.7.13/lmbrwaflib/cryengine_modules.py | python | LyLauncherApplication | (ctx, *k, **kw) | return RunTaskGenerator(ctx, *k, **kw) | Module to build a custom launcher that will build monolithically when needed | Module to build a custom launcher that will build monolithically when needed | [
"Module",
"to",
"build",
"a",
"custom",
"launcher",
"that",
"will",
"build",
"monolithically",
"when",
"needed"
] | def LyLauncherApplication(ctx, *k, **kw):
"""
Module to build a custom launcher that will build monolithically when needed
"""
if ctx.is_module_exempt(kw.get('target', '')):
return
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx, kw)
# Setup TaskGenerator specific settings
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx, kw)
append_kw_entry(kw, 'win_linkflags', ['/SUBSYSTEM:CONSOLE'])
# Default clang behavior is to disable exceptions. For console apps we want to enable them
if 'CXXFLAGS' in list(ctx.env.keys()) and 'darwin' in ctx.get_current_platform_list(ctx.env['PLATFORM']):
if '-fno-exceptions' in ctx.env['CXXFLAGS']:
ctx.env['CXXFLAGS'].remove("-fno-exceptions")
LoadSharedSettings(ctx, k, kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
append_kw_entry(kw, 'features', ['copy_3rd_party_binaries'])
if ctx.is_build_monolithic():
# Apply the monolithic logic to the application in the same way launchers are done
append_kw_entry(kw, 'defines', [ '_LIB', 'AZ_MONOLITHIC_BUILD' ])
append_kw_entry(kw, 'features', [ 'apply_monolithic_build_settings'])
kw['is_launcher'] = True
if 'gem_spec' in kw:
# Specified both a project and an override gem spec
gem_spec = kw['gem_spec']
static_modules = kw.get('static_modules', [])
codegen_static_modules_cpp_for_application(ctx, static_modules, gem_spec, k, kw)
ctx.apply_gem_spec_to_context(gem_spec, kw)
elif 'project' in kw:
# Specified a game project
project = kw['project']
codegen_static_modules_cpp_for_launcher(ctx, project, k, kw)
ctx.apply_gems_to_context(project, k, kw)
ctx.apply_required_gems_to_context(kw['target'], kw)
else:
# If no gem spec is specified, then this console app needs to be treated the same way as a launcher: Append the game name in front
# of the target, in order to handle multiple enabled games
active_projects = ctx.get_enabled_game_project_list()
for project in active_projects:
kw_per_console_app = copy.deepcopy(kw)
kw_per_console_app['target'] = project + kw['target'] # rename the target!
ctx.apply_gems_to_context(project, k, kw_per_console_app)
RunTaskGenerator(ctx, *k, **kw_per_console_app)
return None
return RunTaskGenerator(ctx, *k, **kw) | [
"def",
"LyLauncherApplication",
"(",
"ctx",
",",
"*",
"k",
",",
"*",
"*",
"kw",
")",
":",
"if",
"ctx",
".",
"is_module_exempt",
"(",
"kw",
".",
"get",
"(",
"'target'",
",",
"''",
")",
")",
":",
"return",
"# Initialize the Task Generator",
"if",
"not",
... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/lmbrwaflib/cryengine_modules.py#L1708-L1774 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/learn/python/learn/estimators/estimator.py | python | BaseEstimator.get_variable_names | (self) | return [name for name, _ in list_variables(self.model_dir)] | Returns list of all variable names in this model.
Returns:
List of names. | Returns list of all variable names in this model. | [
"Returns",
"list",
"of",
"all",
"variable",
"names",
"in",
"this",
"model",
"."
] | def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)] | [
"def",
"get_variable_names",
"(",
"self",
")",
":",
"return",
"[",
"name",
"for",
"name",
",",
"_",
"in",
"list_variables",
"(",
"self",
".",
"model_dir",
")",
"]"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/learn/python/learn/estimators/estimator.py#L683-L689 | |
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/python2_version/klampt/robotsim.py | python | RigidObjectModel.setContactParameters | (self, params) | return _robotsim.RigidObjectModel_setContactParameters(self, params) | setContactParameters(RigidObjectModel self, ContactParameters params) | setContactParameters(RigidObjectModel self, ContactParameters params) | [
"setContactParameters",
"(",
"RigidObjectModel",
"self",
"ContactParameters",
"params",
")"
] | def setContactParameters(self, params):
"""
setContactParameters(RigidObjectModel self, ContactParameters params)
"""
return _robotsim.RigidObjectModel_setContactParameters(self, params) | [
"def",
"setContactParameters",
"(",
"self",
",",
"params",
")",
":",
"return",
"_robotsim",
".",
"RigidObjectModel_setContactParameters",
"(",
"self",
",",
"params",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/robotsim.py#L5446-L5453 | |
twhui/LiteFlowNet | 00925aebf2db9ac50f4b1666f718688b10dd10d1 | scripts/cpp_lint.py | python | RemoveMultiLineCommentsFromRange | (lines, begin, end) | Clears a range of lines for multi-line comments. | Clears a range of lines for multi-line comments. | [
"Clears",
"a",
"range",
"of",
"lines",
"for",
"multi",
"-",
"line",
"comments",
"."
] | def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy' | [
"def",
"RemoveMultiLineCommentsFromRange",
"(",
"lines",
",",
"begin",
",",
"end",
")",
":",
"# Having // dummy comments makes the lines non-empty, so we will not get",
"# unnecessary blank line warnings later in the code.",
"for",
"i",
"in",
"range",
"(",
"begin",
",",
"end",
... | https://github.com/twhui/LiteFlowNet/blob/00925aebf2db9ac50f4b1666f718688b10dd10d1/scripts/cpp_lint.py#L1143-L1148 | ||
rdkit/rdkit | ede860ae316d12d8568daf5ee800921c3389c84e | rdkit/sping/pid.py | python | Canvas.drawRect | (self, x1, y1, x2, y2, edgeColor=None, edgeWidth=None, fillColor=None, dash=None,
**kwargs) | Draw the rectangle between x1,y1, and x2,y2. \
These should have x1<x2 and y1<y2. | Draw the rectangle between x1,y1, and x2,y2. \
These should have x1<x2 and y1<y2. | [
"Draw",
"the",
"rectangle",
"between",
"x1",
"y1",
"and",
"x2",
"y2",
".",
"\\",
"These",
"should",
"have",
"x1<x2",
"and",
"y1<y2",
"."
] | def drawRect(self, x1, y1, x2, y2, edgeColor=None, edgeWidth=None, fillColor=None, dash=None,
**kwargs):
"Draw the rectangle between x1,y1, and x2,y2. \
These should have x1<x2 and y1<y2."
pointList = [(x1, y1), (x2, y1), (x2, y2), (x1, y2)]
self.drawPolygon(pointList, edgeColor, edgeWidth, fillColor, closed=1, dash=dash, **kwargs) | [
"def",
"drawRect",
"(",
"self",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"edgeColor",
"=",
"None",
",",
"edgeWidth",
"=",
"None",
",",
"fillColor",
"=",
"None",
",",
"dash",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"pointList",
"="... | https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/rdkit/sping/pid.py#L445-L451 | ||
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py | python | URI.authority | (self) | return ret | Get the authority part from an URI | Get the authority part from an URI | [
"Get",
"the",
"authority",
"part",
"from",
"an",
"URI"
] | def authority(self):
"""Get the authority part from an URI """
ret = libxml2mod.xmlURIGetAuthority(self._o)
return ret | [
"def",
"authority",
"(",
"self",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlURIGetAuthority",
"(",
"self",
".",
"_o",
")",
"return",
"ret"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L6969-L6972 | |
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/image/image.py | python | imread | (filename, *args, **kwargs) | return read_fn(filename, *args, **kwargs) | Read and decode an image to an NDArray.
.. note:: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)> | Read and decode an image to an NDArray. | [
"Read",
"and",
"decode",
"an",
"image",
"to",
"an",
"NDArray",
"."
] | def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
.. note:: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
if is_np_array():
read_fn = _npi.cvimread
else:
read_fn = _internal._cvimread
return read_fn(filename, *args, **kwargs) | [
"def",
"imread",
"(",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"is_np_array",
"(",
")",
":",
"read_fn",
"=",
"_npi",
".",
"cvimread",
"else",
":",
"read_fn",
"=",
"_internal",
".",
"_cvimread",
"return",
"read_fn",
"(",
... | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/image/image.py#L51-L93 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_core.py | python | Image.GetOrFindMaskColour | (*args, **kwargs) | return _core_.Image_GetOrFindMaskColour(*args, **kwargs) | GetOrFindMaskColour() -> (r,g,b)
Get the current mask colour or find a suitable colour. | GetOrFindMaskColour() -> (r,g,b) | [
"GetOrFindMaskColour",
"()",
"-",
">",
"(",
"r",
"g",
"b",
")"
] | def GetOrFindMaskColour(*args, **kwargs):
"""
GetOrFindMaskColour() -> (r,g,b)
Get the current mask colour or find a suitable colour.
"""
return _core_.Image_GetOrFindMaskColour(*args, **kwargs) | [
"def",
"GetOrFindMaskColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Image_GetOrFindMaskColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L3442-L3448 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/traitlets/py2/traitlets/traitlets.py | python | Enum.info | (self) | return result | Returns a description of the trait. | Returns a description of the trait. | [
"Returns",
"a",
"description",
"of",
"the",
"trait",
"."
] | def info(self):
""" Returns a description of the trait."""
result = 'any of ' + repr(self.values)
if self.allow_none:
return result + ' or None'
return result | [
"def",
"info",
"(",
"self",
")",
":",
"result",
"=",
"'any of '",
"+",
"repr",
"(",
"self",
".",
"values",
")",
"if",
"self",
".",
"allow_none",
":",
"return",
"result",
"+",
"' or None'",
"return",
"result"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/traitlets/py2/traitlets/traitlets.py#L2140-L2145 | |
koth/kcws | 88efbd36a7022de4e6e90f5a1fb880cf87cfae9f | third_party/setuptools/pkg_resources.py | python | Environment.remove | (self, dist) | Remove `dist` from the environment | Remove `dist` from the environment | [
"Remove",
"dist",
"from",
"the",
"environment"
] | def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist) | [
"def",
"remove",
"(",
"self",
",",
"dist",
")",
":",
"self",
".",
"_distmap",
"[",
"dist",
".",
"key",
"]",
".",
"remove",
"(",
"dist",
")"
] | https://github.com/koth/kcws/blob/88efbd36a7022de4e6e90f5a1fb880cf87cfae9f/third_party/setuptools/pkg_resources.py#L812-L814 | ||
Komnomnomnom/swigibpy | cfd307fdbfaffabc69a2dc037538d7e34a8b8daf | swigibpy.py | python | TagValueList.pop | (self) | return _swigibpy.TagValueList_pop(self) | pop(TagValueList self) -> std::vector< shared_ptr< TagValue > >::value_type | pop(TagValueList self) -> std::vector< shared_ptr< TagValue > >::value_type | [
"pop",
"(",
"TagValueList",
"self",
")",
"-",
">",
"std",
"::",
"vector<",
"shared_ptr<",
"TagValue",
">",
">",
"::",
"value_type"
] | def pop(self):
"""pop(TagValueList self) -> std::vector< shared_ptr< TagValue > >::value_type"""
return _swigibpy.TagValueList_pop(self) | [
"def",
"pop",
"(",
"self",
")",
":",
"return",
"_swigibpy",
".",
"TagValueList_pop",
"(",
"self",
")"
] | https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L714-L716 | |
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | chrome/tools/build/win/create_installer_archive.py | python | MakeStagingDirectories | (staging_dir) | return (file_path, temp_file_path) | Creates a staging path for installer archive. If directory exists already,
deletes the existing directory. | Creates a staging path for installer archive. If directory exists already,
deletes the existing directory. | [
"Creates",
"a",
"staging",
"path",
"for",
"installer",
"archive",
".",
"If",
"directory",
"exists",
"already",
"deletes",
"the",
"existing",
"directory",
"."
] | def MakeStagingDirectories(staging_dir):
"""Creates a staging path for installer archive. If directory exists already,
deletes the existing directory.
"""
file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.makedirs(file_path)
temp_file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(temp_file_path):
shutil.rmtree(temp_file_path)
os.makedirs(temp_file_path)
return (file_path, temp_file_path) | [
"def",
"MakeStagingDirectories",
"(",
"staging_dir",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"staging_dir",
",",
"TEMP_ARCHIVE_DIR",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"shutil",
".",
"rmtree",
... | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/chrome/tools/build/win/create_installer_archive.py#L157-L170 | |
weichengkuo/DeepBox | c4f8c065b6a51cf296540cc453a44f0519aaacc9 | caffe-fast-rcnn/scripts/cpp_lint.py | python | _IsTestFilename | (filename) | Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise. | Determines if the given filename has a suffix that identifies it as a test. | [
"Determines",
"if",
"the",
"given",
"filename",
"has",
"a",
"suffix",
"that",
"identifies",
"it",
"as",
"a",
"test",
"."
] | def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False | [
"def",
"_IsTestFilename",
"(",
"filename",
")",
":",
"if",
"(",
"filename",
".",
"endswith",
"(",
"'_test.cc'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'_unittest.cc'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'_regtest.cc'",
")",
")",
":",
"ret... | https://github.com/weichengkuo/DeepBox/blob/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/scripts/cpp_lint.py#L3603-L3617 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/utils/virtualenv.py | python | running_under_virtualenv | () | return _running_under_venv() or _running_under_regular_virtualenv() | Return True if we're running inside a virtualenv, False otherwise. | Return True if we're running inside a virtualenv, False otherwise. | [
"Return",
"True",
"if",
"we",
"re",
"running",
"inside",
"a",
"virtualenv",
"False",
"otherwise",
"."
] | def running_under_virtualenv():
# type: () -> bool
"""Return True if we're running inside a virtualenv, False otherwise.
"""
return _running_under_venv() or _running_under_regular_virtualenv() | [
"def",
"running_under_virtualenv",
"(",
")",
":",
"# type: () -> bool",
"return",
"_running_under_venv",
"(",
")",
"or",
"_running_under_regular_virtualenv",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/utils/virtualenv.py#L37-L41 | |
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | scripts/abins/abinsalgorithm.py | python | AbinsAlgorithm.get_cross_section | (scattering: str = 'Total',
nucleons_number: Optional[int] = None,
*,
protons_number: int) | return atom.neutron()[scattering_keys[scattering]] | Calculates cross section for the given element.
:param scattering: Type of cross-section: 'Incoherent', 'Coherent' or 'Total'
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
:returns: cross section for that element | Calculates cross section for the given element.
:param scattering: Type of cross-section: 'Incoherent', 'Coherent' or 'Total'
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
:returns: cross section for that element | [
"Calculates",
"cross",
"section",
"for",
"the",
"given",
"element",
".",
":",
"param",
"scattering",
":",
"Type",
"of",
"cross",
"-",
"section",
":",
"Incoherent",
"Coherent",
"or",
"Total",
":",
"param",
"protons_number",
":",
"number",
"of",
"protons",
"in... | def get_cross_section(scattering: str = 'Total',
nucleons_number: Optional[int] = None,
*,
protons_number: int) -> float:
"""
Calculates cross section for the given element.
:param scattering: Type of cross-section: 'Incoherent', 'Coherent' or 'Total'
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
:returns: cross section for that element
"""
if nucleons_number is not None:
try:
atom = Atom(a_number=nucleons_number, z_number=protons_number)
# isotopes are not implemented for all elements so use different constructor in that cases
except RuntimeError:
logger.warning(f"Could not find data for isotope {nucleons_number}, "
f"using default values for {protons_number} protons.")
atom = Atom(z_number=protons_number)
else:
atom = Atom(z_number=protons_number)
scattering_keys = {'Incoherent': 'inc_scatt_xs',
'Coherent': 'coh_scatt_xs',
'Total': 'tot_scatt_xs'}
return atom.neutron()[scattering_keys[scattering]] | [
"def",
"get_cross_section",
"(",
"scattering",
":",
"str",
"=",
"'Total'",
",",
"nucleons_number",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"*",
",",
"protons_number",
":",
"int",
")",
"->",
"float",
":",
"if",
"nucleons_number",
"is",
"not",
... | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/abins/abinsalgorithm.py#L556-L581 | |
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | tools/json_schema_compiler/cc_generator.py | python | _Generator._GenerateEventNameConstant | (self, event) | return c | Generates a constant string array for the event name. | Generates a constant string array for the event name. | [
"Generates",
"a",
"constant",
"string",
"array",
"for",
"the",
"event",
"name",
"."
] | def _GenerateEventNameConstant(self, event):
"""Generates a constant string array for the event name.
"""
c = Code()
c.Append('const char kEventName[] = "%s.%s";' % (
self._namespace.name, event.name))
return c | [
"def",
"_GenerateEventNameConstant",
"(",
"self",
",",
"event",
")",
":",
"c",
"=",
"Code",
"(",
")",
"c",
".",
"Append",
"(",
"'const char kEventName[] = \"%s.%s\";'",
"%",
"(",
"self",
".",
"_namespace",
".",
"name",
",",
"event",
".",
"name",
")",
")",
... | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/json_schema_compiler/cc_generator.py#L1105-L1111 | |
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/python/ops/nn_grad.py | python | _LogSoftmaxGrad | (op, grad) | return grad - math_ops.reduce_sum(grad, 1, keep_dims=True) * softmax | The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input. | The gradient for log_softmax. | [
"The",
"gradient",
"for",
"log_softmax",
"."
] | def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, 1, keep_dims=True) * softmax | [
"def",
"_LogSoftmaxGrad",
"(",
"op",
",",
"grad",
")",
":",
"softmax",
"=",
"math_ops",
".",
"exp",
"(",
"op",
".",
"outputs",
"[",
"0",
"]",
")",
"return",
"grad",
"-",
"math_ops",
".",
"reduce_sum",
"(",
"grad",
",",
"1",
",",
"keep_dims",
"=",
"... | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/ops/nn_grad.py#L163-L177 | |
klzgrad/naiveproxy | ed2c513637c77b18721fe428d7ed395b4d284c83 | src/build/android/gyp/util/build_utils.py | python | AtomicOutput | (path, only_if_changed=True, mode='w+b') | Helper to prevent half-written outputs.
Args:
path: Path to the final output file, which will be written atomically.
only_if_changed: If True (the default), do not touch the filesystem
if the content has not changed.
mode: The mode to open the file in (str).
Returns:
A python context manager that yelds a NamedTemporaryFile instance
that must be used by clients to write the data to. On exit, the
manager will try to replace the final output file with the
temporary one if necessary. The temporary file is always destroyed
on exit.
Example:
with build_utils.AtomicOutput(output_path) as tmp_file:
subprocess.check_call(['prog', '--output', tmp_file.name]) | Helper to prevent half-written outputs. | [
"Helper",
"to",
"prevent",
"half",
"-",
"written",
"outputs",
"."
] | def AtomicOutput(path, only_if_changed=True, mode='w+b'):
"""Helper to prevent half-written outputs.
Args:
path: Path to the final output file, which will be written atomically.
only_if_changed: If True (the default), do not touch the filesystem
if the content has not changed.
mode: The mode to open the file in (str).
Returns:
A python context manager that yelds a NamedTemporaryFile instance
that must be used by clients to write the data to. On exit, the
manager will try to replace the final output file with the
temporary one if necessary. The temporary file is always destroyed
on exit.
Example:
with build_utils.AtomicOutput(output_path) as tmp_file:
subprocess.check_call(['prog', '--output', tmp_file.name])
"""
# Create in same directory to ensure same filesystem when moving.
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
MakeDirectory(dirname)
with tempfile.NamedTemporaryFile(
mode, suffix=os.path.basename(path), dir=dirname, delete=False) as f:
try:
yield f
# file should be closed before comparison/move.
f.close()
if not (only_if_changed and os.path.exists(path) and
filecmp.cmp(f.name, path)):
shutil.move(f.name, path)
finally:
if os.path.exists(f.name):
os.unlink(f.name) | [
"def",
"AtomicOutput",
"(",
"path",
",",
"only_if_changed",
"=",
"True",
",",
"mode",
"=",
"'w+b'",
")",
":",
"# Create in same directory to ensure same filesystem when moving.",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"not",
"... | https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/android/gyp/util/build_utils.py#L151-L185 | ||
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib-tk/turtle.py | python | TurtleScreenBase._iscolorstring | (self, color) | return ok | Check if the string color is a legal Tkinter color string. | Check if the string color is a legal Tkinter color string. | [
"Check",
"if",
"the",
"string",
"color",
"is",
"a",
"legal",
"Tkinter",
"color",
"string",
"."
] | def _iscolorstring(self, color):
"""Check if the string color is a legal Tkinter color string.
"""
try:
rgb = self.cv.winfo_rgb(color)
ok = True
except TK.TclError:
ok = False
return ok | [
"def",
"_iscolorstring",
"(",
"self",
",",
"color",
")",
":",
"try",
":",
"rgb",
"=",
"self",
".",
"cv",
".",
"winfo_rgb",
"(",
"color",
")",
"ok",
"=",
"True",
"except",
"TK",
".",
"TclError",
":",
"ok",
"=",
"False",
"return",
"ok"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib-tk/turtle.py#L592-L600 | |
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/warnings.py | python | simplefilter | (action, category=Warning, lineno=0, append=0) | Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters | Insert a simple entry into the list of warnings filters (at the front). | [
"Insert",
"a",
"simple",
"entry",
"into",
"the",
"list",
"of",
"warnings",
"filters",
"(",
"at",
"the",
"front",
")",
"."
] | def simplefilter(action, category=Warning, lineno=0, append=0):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item) | [
"def",
"simplefilter",
"(",
"action",
",",
"category",
"=",
"Warning",
",",
"lineno",
"=",
"0",
",",
"append",
"=",
"0",
")",
":",
"assert",
"action",
"in",
"(",
"\"error\"",
",",
"\"ignore\"",
",",
"\"always\"",
",",
"\"default\"",
",",
"\"module\"",
",... | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/warnings.py#L74-L92 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/stats/mstats_basic.py | python | argstoarray | (*args) | return output | Constructs a 2D array from a group of sequences.
Sequences are filled with missing values to match the length of the longest
sequence.
Parameters
----------
args : sequences
Group of sequences.
Returns
-------
argstoarray : MaskedArray
A ( `m` x `n` ) masked array, where `m` is the number of arguments and
`n` the length of the longest argument.
Notes
-----
`numpy.ma.row_stack` has identical behavior, but is called with a sequence
of sequences. | Constructs a 2D array from a group of sequences. | [
"Constructs",
"a",
"2D",
"array",
"from",
"a",
"group",
"of",
"sequences",
"."
] | def argstoarray(*args):
"""
Constructs a 2D array from a group of sequences.
Sequences are filled with missing values to match the length of the longest
sequence.
Parameters
----------
args : sequences
Group of sequences.
Returns
-------
argstoarray : MaskedArray
A ( `m` x `n` ) masked array, where `m` is the number of arguments and
`n` the length of the longest argument.
Notes
-----
`numpy.ma.row_stack` has identical behavior, but is called with a sequence
of sequences.
"""
if len(args) == 1 and not isinstance(args[0], ndarray):
output = ma.asarray(args[0])
if output.ndim != 2:
raise ValueError("The input should be 2D")
else:
n = len(args)
m = max([len(k) for k in args])
output = ma.array(np.empty((n,m), dtype=float), mask=True)
for (k,v) in enumerate(args):
output[k,:len(v)] = v
output[np.logical_not(np.isfinite(output._data))] = masked
return output | [
"def",
"argstoarray",
"(",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"not",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"ndarray",
")",
":",
"output",
"=",
"ma",
".",
"asarray",
"(",
"args",
"[",
"0",
"]",
")",
... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/stats/mstats_basic.py#L101-L137 | |
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/distribute/failure_handling/failure_handling.py | python | CoordinatedCheckpointManager._wait_for_signal | (self) | Watch out for peer preemption signal and step-to-save and acknowledge. | Watch out for peer preemption signal and step-to-save and acknowledge. | [
"Watch",
"out",
"for",
"peer",
"preemption",
"signal",
"and",
"step",
"-",
"to",
"-",
"save",
"and",
"acknowledge",
"."
] | def _wait_for_signal(self):
"""Watch out for peer preemption signal and step-to-save and acknowledge."""
context.context().get_config_key_value(_RUN_COUNT_KEY)
# This must be set before we set the ack key below, otherwise its value in
# _checkpoint_if_preempted may be outdated.
self._received_sigterm_and_step.set()
ack_key = f'{_ACKNOWLEDGE_KEY}_{self._id_in_cluster}'
context.context().set_config_key_value(ack_key, '1')
logging.info('CoordinatedCheckpointManager._wait_for_signal: %s set, '
'preemption awareness acknowledged', ack_key) | [
"def",
"_wait_for_signal",
"(",
"self",
")",
":",
"context",
".",
"context",
"(",
")",
".",
"get_config_key_value",
"(",
"_RUN_COUNT_KEY",
")",
"# This must be set before we set the ack key below, otherwise its value in",
"# _checkpoint_if_preempted may be outdated.",
"self",
"... | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/distribute/failure_handling/failure_handling.py#L369-L381 | ||
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/numpy/utils.py | python | _broadcast_to_shape | (x, shape) | return _broadcast_to(x, F.shape(x), shape, ndim_to) | Broadcasts x from current shape to shape | Broadcasts x from current shape to shape | [
"Broadcasts",
"x",
"from",
"current",
"shape",
"to",
"shape"
] | def _broadcast_to_shape(x, shape):
"""Broadcasts x from current shape to shape"""
ndim_to = len(shape)
x = _expand(x, ndim_to)
return _broadcast_to(x, F.shape(x), shape, ndim_to) | [
"def",
"_broadcast_to_shape",
"(",
"x",
",",
"shape",
")",
":",
"ndim_to",
"=",
"len",
"(",
"shape",
")",
"x",
"=",
"_expand",
"(",
"x",
",",
"ndim_to",
")",
"return",
"_broadcast_to",
"(",
"x",
",",
"F",
".",
"shape",
"(",
"x",
")",
",",
"shape",
... | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/numpy/utils.py#L90-L94 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py3/sklearn/pipeline.py | python | FeatureUnion.fit_transform | (self, X, y=None, **fit_params) | return Xs | Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. | Fit all transformers, transform the data and concatenate results. | [
"Fit",
"all",
"transformers",
"transform",
"the",
"data",
"and",
"concatenate",
"results",
"."
] | def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
results = self._parallel_func(X, y, fit_params, _fit_transform_one)
if not results:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*results)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs | [
"def",
"fit_transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"*",
"*",
"fit_params",
")",
":",
"results",
"=",
"self",
".",
"_parallel_func",
"(",
"X",
",",
"y",
",",
"fit_params",
",",
"_fit_transform_one",
")",
"if",
"not",
"results",
... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py3/sklearn/pipeline.py#L919-L948 | |
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | third_party/jinja2/nodes.py | python | Node.set_environment | (self, environment) | return self | Set the environment for all nodes. | Set the environment for all nodes. | [
"Set",
"the",
"environment",
"for",
"all",
"nodes",
"."
] | def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self | [
"def",
"set_environment",
"(",
"self",
",",
"environment",
")",
":",
"todo",
"=",
"deque",
"(",
"[",
"self",
"]",
")",
"while",
"todo",
":",
"node",
"=",
"todo",
".",
"popleft",
"(",
")",
"node",
".",
"environment",
"=",
"environment",
"todo",
".",
"... | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/jinja2/nodes.py#L219-L226 | |
trailofbits/llvm-sanitizer-tutorial | d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99 | llvm/tools/clang/tools/scan-build-py/libear/__init__.py | python | Toolset.set_language_standard | (self, standard) | part of public interface | part of public interface | [
"part",
"of",
"public",
"interface"
] | def set_language_standard(self, standard):
""" part of public interface """
self.c_flags.append('-std=' + standard) | [
"def",
"set_language_standard",
"(",
"self",
",",
"standard",
")",
":",
"self",
".",
"c_flags",
".",
"append",
"(",
"'-std='",
"+",
"standard",
")"
] | https://github.com/trailofbits/llvm-sanitizer-tutorial/blob/d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99/llvm/tools/clang/tools/scan-build-py/libear/__init__.py#L91-L93 | ||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/python/training/monitored_session.py | python | _HookedSession._check_stop | (self) | return self._should_stop | See base class. | See base class. | [
"See",
"base",
"class",
"."
] | def _check_stop(self):
"""See base class."""
return self._should_stop | [
"def",
"_check_stop",
"(",
"self",
")",
":",
"return",
"self",
".",
"_should_stop"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/training/monitored_session.py#L947-L949 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/plat-mac/lib-scriptpackages/Netscape/WorldWideWeb_suite.py | python | WorldWideWeb_suite_Events.parse_anchor | (self, _object, _attributes={}, **_arguments) | parse anchor: Resolves the relative URL
Required argument: Main URL
Keyword argument relative_to: Relative URL
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: Parsed URL | parse anchor: Resolves the relative URL
Required argument: Main URL
Keyword argument relative_to: Relative URL
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: Parsed URL | [
"parse",
"anchor",
":",
"Resolves",
"the",
"relative",
"URL",
"Required",
"argument",
":",
"Main",
"URL",
"Keyword",
"argument",
"relative_to",
":",
"Relative",
"URL",
"Keyword",
"argument",
"_attributes",
":",
"AppleEvent",
"attribute",
"dictionary",
"Returns",
"... | def parse_anchor(self, _object, _attributes={}, **_arguments):
"""parse anchor: Resolves the relative URL
Required argument: Main URL
Keyword argument relative_to: Relative URL
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: Parsed URL
"""
_code = 'WWW!'
_subcode = 'PRSA'
aetools.keysubst(_arguments, self._argmap_parse_anchor)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----'] | [
"def",
"parse_anchor",
"(",
"self",
",",
"_object",
",",
"_attributes",
"=",
"{",
"}",
",",
"*",
"*",
"_arguments",
")",
":",
"_code",
"=",
"'WWW!'",
"_subcode",
"=",
"'PRSA'",
"aetools",
".",
"keysubst",
"(",
"_arguments",
",",
"self",
".",
"_argmap_par... | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/plat-mac/lib-scriptpackages/Netscape/WorldWideWeb_suite.py#L172-L192 | ||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/py_vulcanize/third_party/rjsmin/_setup/py3/commands.py | python | BuildExt.initialize_options | (self) | Prepare for new options | Prepare for new options | [
"Prepare",
"for",
"new",
"options"
] | def initialize_options(self):
""" Prepare for new options """
_build_ext.build_ext.initialize_options(self)
if 'build_ext' in _option_defaults:
for opt_name, default in _option_defaults['build_ext']:
setattr(self, opt_name, default) | [
"def",
"initialize_options",
"(",
"self",
")",
":",
"_build_ext",
".",
"build_ext",
".",
"initialize_options",
"(",
"self",
")",
"if",
"'build_ext'",
"in",
"_option_defaults",
":",
"for",
"opt_name",
",",
"default",
"in",
"_option_defaults",
"[",
"'build_ext'",
... | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/py_vulcanize/third_party/rjsmin/_setup/py3/commands.py#L181-L186 | ||
rsocket/rsocket-cpp | 45ed594ebd6701f40795c31ec922d784ec7fc921 | build/fbcode_builder/fbcode_builder.py | python | FBCodeBuilder.render | (self, steps) | return res | Converts nested actions to your builder's expected output format.
Typically takes the output of build(). | [] | def render(self, steps):
"""
Converts nested actions to your builder's expected output format.
Typically takes the output of build().
"""
res = self._render_impl(steps) # Implementation-dependent
# Now that the output is rendered, we expect all options to have
# been used.
unused_options = set(self._options_do_not_access)
unused_options -= self.options_used
if unused_options:
raise RuntimeError(
"Unused options: {0} -- please check if you made a typo "
"in any of them. Those that are truly not useful should "
"be not be set so that this typo detection can be useful.".format(
unused_options
)
)
return res | [
"def",
"render",
"(",
"self",
",",
"steps",
")",
":",
"res",
"=",
"self",
".",
"_render_impl",
"(",
"steps",
")",
"# Implementation-dependent",
"# Now that the output is rendered, we expect all options to have",
"# been used.",
"unused_options",
"=",
"set",
"(",
"self",... | https://github.com/rsocket/rsocket-cpp/blob/45ed594ebd6701f40795c31ec922d784ec7fc921/build/fbcode_builder/fbcode_builder.py#L124-L144 | ||
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/python/client/timeline.py | python | _ChromeTraceFormatter.emit_flow_start | (self, name, timestamp, pid, tid, flow_id) | Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer. | Adds a flow start event to the trace. | [
"Adds",
"a",
"flow",
"start",
"event",
"to",
"the",
"trace",
"."
] | def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event) | [
"def",
"emit_flow_start",
"(",
"self",
",",
"name",
",",
"timestamp",
",",
"pid",
",",
"tid",
",",
"flow_id",
")",
":",
"event",
"=",
"self",
".",
"_create_event",
"(",
"'s'",
",",
"'DataFlow'",
",",
"name",
",",
"pid",
",",
"tid",
",",
"timestamp",
... | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/client/timeline.py#L184-L199 | ||
rdiankov/openrave | d1a23023fd4b58f077d2ca949ceaf1b91f3f13d7 | docs/breathe/parser/doxygen/index.py | python | DoxygenTypeSub.find_compounds_and_members | (self, details) | return results | Returns a list of all compounds and their members which match details | Returns a list of all compounds and their members which match details | [
"Returns",
"a",
"list",
"of",
"all",
"compounds",
"and",
"their",
"members",
"which",
"match",
"details"
] | def find_compounds_and_members(self, details):
"""
Returns a list of all compounds and their members which match details
"""
results = []
for compound in self.compound:
members = compound.find_members(details)
if members:
results.append([compound, members])
else:
if details.match(compound):
results.append([compound, []])
return results | [
"def",
"find_compounds_and_members",
"(",
"self",
",",
"details",
")",
":",
"results",
"=",
"[",
"]",
"for",
"compound",
"in",
"self",
".",
"compound",
":",
"members",
"=",
"compound",
".",
"find_members",
"(",
"details",
")",
"if",
"members",
":",
"result... | https://github.com/rdiankov/openrave/blob/d1a23023fd4b58f077d2ca949ceaf1b91f3f13d7/docs/breathe/parser/doxygen/index.py#L20-L34 | |
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/ops/ctc_ops.py | python | ctc_loss_dense | (labels,
logits,
label_length,
logit_length,
logits_time_major=True,
unique=None,
blank_index=0,
name=None) | Computes CTC (Connectionist Temporal Classification) loss.
This op implements the CTC loss as presented in (Graves et al., 2006),
using the batched forward backward algorithm described in (Sim et al., 2017).
Notes:
Significant differences from tf.compat.v1.nn.ctc_loss:
Supports GPU and TPU (tf.compat.v1.nn.ctc_loss supports CPU only):
For batched operations, GPU and TPU are significantly faster than using
ctc_loss on CPU.
This implementation runs on CPU, but significantly slower than ctc_loss.
Blank label is 0 rather num_classes - 1, unless overridden by blank_index.
Logits and labels are dense arrays with padding rather than SparseTensor.
The only mode supported is the same as:
preprocess_collapse_repeated=False, ctc_merge_repeated=True
To collapse labels, the caller can preprocess label sequence first.
The dense implementation supports both CPU, GPU and TPU. A fast path is
provided that significantly improves memory use for large vocabulary if the
caller preprocesses label sequences to get unique label indices on the CPU
(eg. in the data input pipeline) using ctc_ops.unique and simplifies this in
the optional "unique" kwarg. This is especially useful for TPU and GPU but
also works with if used on CPU.
Args:
labels: tensor of shape [batch_size, max_label_seq_length]
logits: tensor of shape [frames, batch_size, num_labels], if
logits_time_major == False, shape is [batch_size, frames, num_labels].
label_length: tensor of shape [batch_size] Length of reference label
sequence in labels.
logit_length: tensor of shape [batch_size] Length of input sequence in
logits.
logits_time_major: (optional) If True (default), logits is shaped [time,
batch, logits]. If False, shape is [batch, time, logits]
unique: (optional) Unique label indices as computed by unique(labels). If
supplied, enable a faster, memory efficient implementation on TPU.
blank_index: (optional) Set the class index to use for the blank label.
Negative values will start from num_classes, ie, -1 will reproduce the
ctc_loss behavior of using num_classes - 1 for the blank symbol. There is
some memory/performance overhead to switching from the default of 0 as an
additional shifted copy of the logits may be created.
name: A name for this `Op`. Defaults to "ctc_loss_dense".
Returns:
loss: tensor of shape [batch_size], negative log probabilities.
References:
Connectionist Temporal Classification - Labeling Unsegmented Sequence Data
with Recurrent Neural Networks:
[Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891)
([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))
Improving the efficiency of forward-backward algorithm using batched
computation in TensorFlow:
[Sim et al., 2017](https://ieeexplore.ieee.org/document/8268944)
([pdf](http://bacchiani.net/resume/papers/ASRU2017.pdf)) | Computes CTC (Connectionist Temporal Classification) loss. | [
"Computes",
"CTC",
"(",
"Connectionist",
"Temporal",
"Classification",
")",
"loss",
"."
] | def ctc_loss_dense(labels,
logits,
label_length,
logit_length,
logits_time_major=True,
unique=None,
blank_index=0,
name=None):
"""Computes CTC (Connectionist Temporal Classification) loss.
This op implements the CTC loss as presented in (Graves et al., 2006),
using the batched forward backward algorithm described in (Sim et al., 2017).
Notes:
Significant differences from tf.compat.v1.nn.ctc_loss:
Supports GPU and TPU (tf.compat.v1.nn.ctc_loss supports CPU only):
For batched operations, GPU and TPU are significantly faster than using
ctc_loss on CPU.
This implementation runs on CPU, but significantly slower than ctc_loss.
Blank label is 0 rather num_classes - 1, unless overridden by blank_index.
Logits and labels are dense arrays with padding rather than SparseTensor.
The only mode supported is the same as:
preprocess_collapse_repeated=False, ctc_merge_repeated=True
To collapse labels, the caller can preprocess label sequence first.
The dense implementation supports both CPU, GPU and TPU. A fast path is
provided that significantly improves memory use for large vocabulary if the
caller preprocesses label sequences to get unique label indices on the CPU
(eg. in the data input pipeline) using ctc_ops.unique and simplifies this in
the optional "unique" kwarg. This is especially useful for TPU and GPU but
also works with if used on CPU.
Args:
labels: tensor of shape [batch_size, max_label_seq_length]
logits: tensor of shape [frames, batch_size, num_labels], if
logits_time_major == False, shape is [batch_size, frames, num_labels].
label_length: tensor of shape [batch_size] Length of reference label
sequence in labels.
logit_length: tensor of shape [batch_size] Length of input sequence in
logits.
logits_time_major: (optional) If True (default), logits is shaped [time,
batch, logits]. If False, shape is [batch, time, logits]
unique: (optional) Unique label indices as computed by unique(labels). If
supplied, enable a faster, memory efficient implementation on TPU.
blank_index: (optional) Set the class index to use for the blank label.
Negative values will start from num_classes, ie, -1 will reproduce the
ctc_loss behavior of using num_classes - 1 for the blank symbol. There is
some memory/performance overhead to switching from the default of 0 as an
additional shifted copy of the logits may be created.
name: A name for this `Op`. Defaults to "ctc_loss_dense".
Returns:
loss: tensor of shape [batch_size], negative log probabilities.
References:
Connectionist Temporal Classification - Labeling Unsegmented Sequence Data
with Recurrent Neural Networks:
[Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891)
([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))
Improving the efficiency of forward-backward algorithm using batched
computation in TensorFlow:
[Sim et al., 2017](https://ieeexplore.ieee.org/document/8268944)
([pdf](http://bacchiani.net/resume/papers/ASRU2017.pdf))
"""
with ops.name_scope(name, "ctc_loss_dense",
[logits, labels, label_length, logit_length]):
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
label_length = ops.convert_to_tensor(label_length, name="label_length")
logit_length = ops.convert_to_tensor(logit_length, name="logit_length")
orig_dtype = logits.dtype
if orig_dtype in (dtypes.float16, dtypes.bfloat16):
logits = math_ops.cast(logits, dtypes.float32)
if not logits_time_major:
logits = array_ops.transpose(logits, perm=[1, 0, 2])
if blank_index != 0:
if blank_index < 0:
blank_index += _get_dim(logits, 2)
logits = array_ops.concat([
logits[:, :, blank_index:blank_index + 1],
logits[:, :, :blank_index],
logits[:, :, blank_index + 1:],
],
axis=2)
labels = array_ops.where(labels < blank_index, labels + 1, labels)
args = [logits, labels, label_length, logit_length]
if unique:
unique_y, unique_idx = unique
if blank_index != 0:
unique_y = array_ops.where(unique_y < blank_index, unique_y + 1,
unique_y)
label_mask_len = math_ops.reduce_max(unique_idx, axis=1) + 1
max_label_length = _get_dim(unique_y, 1)
label_mask = array_ops.sequence_mask(label_mask_len, max_label_length)
unique_y = array_ops.where(label_mask, unique_y,
array_ops.zeros_like(unique_y))
args.extend([unique_y, unique_idx])
@custom_gradient.custom_gradient
def compute_ctc_loss(logits_t, labels_t, label_length_t, logit_length_t,
*unique_t):
"""Compute CTC loss."""
logits_t.set_shape(logits.shape)
labels_t.set_shape(labels.shape)
label_length_t.set_shape(label_length.shape)
logit_length_t.set_shape(logit_length.shape)
kwargs = dict(
logits=logits_t,
labels=labels_t,
label_length=label_length_t,
logit_length=logit_length_t)
if unique_t:
kwargs["unique"] = unique_t
result = ctc_loss_and_grad(**kwargs)
def grad(grad_loss):
grad = [array_ops.reshape(grad_loss, [1, -1, 1]) * result[1]]
grad += [None] * (len(args) - len(grad))
return grad
return result[0], grad
loss = compute_ctc_loss(*args)
if orig_dtype in (dtypes.float16, dtypes.bfloat16):
loss = math_ops.cast(loss, orig_dtype)
return loss | [
"def",
"ctc_loss_dense",
"(",
"labels",
",",
"logits",
",",
"label_length",
",",
"logit_length",
",",
"logits_time_major",
"=",
"True",
",",
"unique",
"=",
"None",
",",
"blank_index",
"=",
"0",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"nam... | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/ctc_ops.py#L984-L1114 | ||
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Code/Tools/waf-1.7.13/waflib/Context.py | python | Context.cmd_and_log | (self, cmd, **kw) | return out | Execute a command and return stdout if the execution is successful.
An exception is thrown when the exit status is non-0. In that case, both stderr and stdout
will be bound to the WafError object::
def configure(conf):
out = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.STDOUT, quiet=waflib.Context.BOTH)
(out, err) = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.BOTH)
try:
conf.cmd_and_log(['which', 'someapp'], output=waflib.Context.BOTH)
except Exception as e:
print(e.stdout, e.stderr)
:param cmd: args for subprocess.Popen
:param kw: keyword arguments for subprocess.Popen | Execute a command and return stdout if the execution is successful.
An exception is thrown when the exit status is non-0. In that case, both stderr and stdout
will be bound to the WafError object:: | [
"Execute",
"a",
"command",
"and",
"return",
"stdout",
"if",
"the",
"execution",
"is",
"successful",
".",
"An",
"exception",
"is",
"thrown",
"when",
"the",
"exit",
"status",
"is",
"non",
"-",
"0",
".",
"In",
"that",
"case",
"both",
"stderr",
"and",
"stdou... | def cmd_and_log(self, cmd, **kw):
"""
Execute a command and return stdout if the execution is successful.
An exception is thrown when the exit status is non-0. In that case, both stderr and stdout
will be bound to the WafError object::
def configure(conf):
out = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.STDOUT, quiet=waflib.Context.BOTH)
(out, err) = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.BOTH)
try:
conf.cmd_and_log(['which', 'someapp'], output=waflib.Context.BOTH)
except Exception as e:
print(e.stdout, e.stderr)
:param cmd: args for subprocess.Popen
:param kw: keyword arguments for subprocess.Popen
"""
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
Logs.debug('runner: %r' % cmd)
if 'quiet' in kw:
quiet = kw['quiet']
del kw['quiet']
else:
quiet = None
if 'output' in kw:
to_ret = kw['output']
del kw['output']
else:
to_ret = STDOUT
kw['stdout'] = kw['stderr'] = subprocess.PIPE
if quiet is None:
self.to_log(cmd)
if hasattr(self, 'cmd_coordinator'):
(ret, out, err) = self.cmd_coordinator.execute_command(cmd, **kw)
else:
try:
p = subprocess.Popen(cmd, **kw)
(out, err) = p.communicate()
ret = p.returncode
except Exception as e:
raise Errors.WafError('Execution failure: %s' % str(e), ex=e)
if not isinstance(out, str):
out = out.decode(sys.stdout.encoding or 'iso8859-1')
if not isinstance(err, str):
err = err.decode(sys.stdout.encoding or 'iso8859-1')
if out and quiet != STDOUT and quiet != BOTH:
self.to_log('out: %s' % out)
if err and quiet != STDERR and quiet != BOTH:
self.to_log('err: %s' % err)
if ret:
e = Errors.WafError('Command %r returned %r' % (cmd, ret))
e.returncode =ret
e.stderr = err
e.stdout = out
raise e
if to_ret == BOTH:
return (out, err)
elif to_ret == STDERR:
return err
return out | [
"def",
"cmd_and_log",
"(",
"self",
",",
"cmd",
",",
"*",
"*",
"kw",
")",
":",
"subprocess",
"=",
"Utils",
".",
"subprocess",
"kw",
"[",
"'shell'",
"]",
"=",
"isinstance",
"(",
"cmd",
",",
"str",
")",
"Logs",
".",
"debug",
"(",
"'runner: %r'",
"%",
... | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/Context.py#L416-L485 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/network/auth.py | python | MultiDomainBasicAuth.warn_on_401 | (self, resp, **kwargs) | Response callback to warn about incorrect credentials. | Response callback to warn about incorrect credentials. | [
"Response",
"callback",
"to",
"warn",
"about",
"incorrect",
"credentials",
"."
] | def warn_on_401(self, resp, **kwargs):
# type: (Response, **Any) -> None
"""Response callback to warn about incorrect credentials."""
if resp.status_code == 401:
logger.warning(
'401 Error, Credentials not correct for %s', resp.request.url,
) | [
"def",
"warn_on_401",
"(",
"self",
",",
"resp",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Response, **Any) -> None",
"if",
"resp",
".",
"status_code",
"==",
"401",
":",
"logger",
".",
"warning",
"(",
"'401 Error, Credentials not correct for %s'",
",",
"resp",
... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/network/auth.py#L575-L587 | ||
LLNL/lbann | 26083e6c86050302ce33148aea70f62e61cacb92 | applications/ATOM/eval_atom_wae.py | python | construct_data_reader | (run_args) | return message | Construct Protobuf message for Python data reader.
The Python data reader will import this Python file to access the
sample access functions. | Construct Protobuf message for Python data reader. | [
"Construct",
"Protobuf",
"message",
"for",
"Python",
"data",
"reader",
"."
] | def construct_data_reader(run_args):
"""
Construct Protobuf message for Python data reader.
The Python data reader will import this Python file to access the
sample access functions.
"""
module_file = os.path.abspath(run_args.data_module_file)
os.environ["DATA_CONFIG"] = os.path.abspath(run_args.data_config)
module_name = os.path.splitext(os.path.basename(module_file))[0]
module_dir = os.path.dirname(module_file)
print("module_name: {}\tmodule_dir: {}".format(module_name, module_dir))
# Base data reader message
message = lbann.reader_pb2.DataReader()
# Training set data reader
data_reader = message.reader.add()
data_reader.name = "python"
data_reader.role = "train"
data_reader.shuffle = True
data_reader.percent_of_data_to_use = 1.0
data_reader.validation_percent = 0.1
data_reader.tournament_percent = 0.1
data_reader.python.module = module_name
data_reader.python.module_dir = module_dir
data_reader.python.sample_function = "get_sample"
data_reader.python.num_samples_function = "num_samples"
data_reader.python.sample_dims_function = "sample_dims"
return message | [
"def",
"construct_data_reader",
"(",
"run_args",
")",
":",
"module_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"run_args",
".",
"data_module_file",
")",
"os",
".",
"environ",
"[",
"\"DATA_CONFIG\"",
"]",
"=",
"os",
".",
"path",
".",
"abspath",
"(",... | https://github.com/LLNL/lbann/blob/26083e6c86050302ce33148aea70f62e61cacb92/applications/ATOM/eval_atom_wae.py#L194-L228 | |
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/pickletools.py | python | read_floatnl | (f) | return float(s) | r"""
>>> import StringIO
>>> read_floatnl(StringIO.StringIO("-1.25\n6"))
-1.25 | r"""
>>> import StringIO
>>> read_floatnl(StringIO.StringIO("-1.25\n6"))
-1.25 | [
"r",
">>>",
"import",
"StringIO",
">>>",
"read_floatnl",
"(",
"StringIO",
".",
"StringIO",
"(",
"-",
"1",
".",
"25",
"\\",
"n6",
"))",
"-",
"1",
".",
"25"
] | def read_floatnl(f):
r"""
>>> import StringIO
>>> read_floatnl(StringIO.StringIO("-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s) | [
"def",
"read_floatnl",
"(",
"f",
")",
":",
"s",
"=",
"read_stringnl",
"(",
"f",
",",
"decode",
"=",
"False",
",",
"stripquotes",
"=",
"False",
")",
"return",
"float",
"(",
"s",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/pickletools.py#L562-L569 | |
gem5/gem5 | 141cc37c2d4b93959d4c249b8f7e6a8b2ef75338 | src/python/gem5/components/processors/random_generator_core.py | python | RandomGeneratorCore.__init__ | (
self,
duration: str,
rate: str,
block_size: int,
min_addr: int,
max_addr: int,
rd_perc: int,
data_limit: int,
) | The random generator core interface.
This class defines the interface for a generator core that will create
a random traffic specific to the parameters below. This core uses
PyTrafficGen to create and inject the synthetic traffic.
:param duration: The number of ticks for the generator core to generate
traffic.
:param rate: The rate at which the synthetic data is read/written.
:param block_size: The number of bytes to be read/written with each
request.
:param min_addr: The lower bound of the address range the generator
will read/write from/to.
:param max_addr: The upper bound of the address range the generator
will read/write from/to.
:param rd_perc: The percentage of read requests among all the generated
requests. The write percentage would be equal to 100 - rd_perc.
:param data_limit: The amount of data in bytes to read/write by the
generator before stopping generation. | The random generator core interface. | [
"The",
"random",
"generator",
"core",
"interface",
"."
] | def __init__(
self,
duration: str,
rate: str,
block_size: int,
min_addr: int,
max_addr: int,
rd_perc: int,
data_limit: int,
) -> None:
super().__init__()
""" The random generator core interface.
This class defines the interface for a generator core that will create
a random traffic specific to the parameters below. This core uses
PyTrafficGen to create and inject the synthetic traffic.
:param duration: The number of ticks for the generator core to generate
traffic.
:param rate: The rate at which the synthetic data is read/written.
:param block_size: The number of bytes to be read/written with each
request.
:param min_addr: The lower bound of the address range the generator
will read/write from/to.
:param max_addr: The upper bound of the address range the generator
will read/write from/to.
:param rd_perc: The percentage of read requests among all the generated
requests. The write percentage would be equal to 100 - rd_perc.
:param data_limit: The amount of data in bytes to read/write by the
generator before stopping generation.
"""
self.generator = PyTrafficGen()
self._duration = duration
self._rate = rate
self._block_size = block_size
self._min_addr = min_addr
self._max_addr = max_addr
self._rd_perc = rd_perc
self._data_limit = data_limit | [
"def",
"__init__",
"(",
"self",
",",
"duration",
":",
"str",
",",
"rate",
":",
"str",
",",
"block_size",
":",
"int",
",",
"min_addr",
":",
"int",
",",
"max_addr",
":",
"int",
",",
"rd_perc",
":",
"int",
",",
"data_limit",
":",
"int",
",",
")",
"->"... | https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/src/python/gem5/components/processors/random_generator_core.py#L40-L78 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/targets/arrayobj.py | python | record_setattr | (context, builder, sig, args, attr) | Generic setattr() implementation for records: set the given
record member, i.e. a scalar. | Generic setattr() implementation for records: set the given
record member, i.e. a scalar. | [
"Generic",
"setattr",
"()",
"implementation",
"for",
"records",
":",
"set",
"the",
"given",
"record",
"member",
"i",
".",
"e",
".",
"a",
"scalar",
"."
] | def record_setattr(context, builder, sig, args, attr):
"""
Generic setattr() implementation for records: set the given
record member, i.e. a scalar.
"""
typ, valty = sig.args
target, val = args
context.sentry_record_alignment(typ, attr)
offset = typ.offset(attr)
elemty = typ.typeof(attr)
dptr = cgutils.get_record_member(builder, target, offset,
context.get_data_type(elemty))
val = context.cast(builder, val, valty, elemty)
align = None if typ.aligned else 1
context.pack_value(builder, elemty, val, dptr, align=align) | [
"def",
"record_setattr",
"(",
"context",
",",
"builder",
",",
"sig",
",",
"args",
",",
"attr",
")",
":",
"typ",
",",
"valty",
"=",
"sig",
".",
"args",
"target",
",",
"val",
"=",
"args",
"context",
".",
"sentry_record_alignment",
"(",
"typ",
",",
"attr"... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/targets/arrayobj.py#L2387-L2403 | ||
crosslife/OpenBird | 9e0198a1a2295f03fa1e8676e216e22c9c7d380b | cocos2d/tools/localvartoauto/LocalVarToAuto.py | python | change_local_classvarname_to_auto | (filename, rep, change) | change all local class variable name to auto | change all local class variable name to auto | [
"change",
"all",
"local",
"class",
"variable",
"name",
"to",
"auto"
] | def change_local_classvarname_to_auto(filename, rep, change):
"change all local class variable name to auto"
f = open(filename)
content = None
changed = False
# read the file, change it, and save it to content
try:
content = cStringIO.StringIO()
changed = False
for line in f:
i = 0
#start to replace
while True:
result = rep.match(line, i)
# founded
if result:
changed = True
#find the matched string where to start
startIndex = line.index(result.group(0))
#replace the change part
line = line.replace(result.group(change), "auto ", startIndex)
i += 1
else:
break
#write the result to content
content.write(line)
finally:
f.close()
if changed:
f = open(filename, "w")
f.write(content.getvalue())
f.close()
content.close() | [
"def",
"change_local_classvarname_to_auto",
"(",
"filename",
",",
"rep",
",",
"change",
")",
":",
"f",
"=",
"open",
"(",
"filename",
")",
"content",
"=",
"None",
"changed",
"=",
"False",
"# read the file, change it, and save it to content",
"try",
":",
"content",
... | https://github.com/crosslife/OpenBird/blob/9e0198a1a2295f03fa1e8676e216e22c9c7d380b/cocos2d/tools/localvartoauto/LocalVarToAuto.py#L92-L126 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_core.py | python | EvtHandler.ProcessEvent | (*args, **kwargs) | return _core_.EvtHandler_ProcessEvent(*args, **kwargs) | ProcessEvent(self, Event event) -> bool | ProcessEvent(self, Event event) -> bool | [
"ProcessEvent",
"(",
"self",
"Event",
"event",
")",
"-",
">",
"bool"
] | def ProcessEvent(*args, **kwargs):
"""ProcessEvent(self, Event event) -> bool"""
return _core_.EvtHandler_ProcessEvent(*args, **kwargs) | [
"def",
"ProcessEvent",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"EvtHandler_ProcessEvent",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L4152-L4154 | |
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/data/experimental/ops/grouping.py | python | group_by_reducer | (key_func, reducer) | return _apply_fn | A transformation that groups elements and performs a reduction.
This transformation maps element of a dataset to a key using `key_func` and
groups the elements by key. The `reducer` is used to process each group; its
`init_func` is used to initialize state for each group when it is created, the
`reduce_func` is used to update the state every time an element is mapped to
the matching group, and the `finalize_func` is used to map the final state to
an output value.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reducer: An instance of `Reducer`, which captures the reduction logic using
the `init_func`, `reduce_func`, and `finalize_func` functions.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`. | A transformation that groups elements and performs a reduction. | [
"A",
"transformation",
"that",
"groups",
"elements",
"and",
"performs",
"a",
"reduction",
"."
] | def group_by_reducer(key_func, reducer):
"""A transformation that groups elements and performs a reduction.
This transformation maps element of a dataset to a key using `key_func` and
groups the elements by key. The `reducer` is used to process each group; its
`init_func` is used to initialize state for each group when it is created, the
`reduce_func` is used to update the state every time an element is mapped to
the matching group, and the `finalize_func` is used to map the final state to
an output value.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reducer: An instance of `Reducer`, which captures the reduction logic using
the `init_func`, `reduce_func`, and `finalize_func` functions.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _GroupByReducerDataset(dataset, key_func, reducer)
return _apply_fn | [
"def",
"group_by_reducer",
"(",
"key_func",
",",
"reducer",
")",
":",
"def",
"_apply_fn",
"(",
"dataset",
")",
":",
"\"\"\"Function from `Dataset` to `Dataset` that applies the transformation.\"\"\"",
"return",
"_GroupByReducerDataset",
"(",
"dataset",
",",
"key_func",
",",... | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/data/experimental/ops/grouping.py#L29-L55 | |
etotheipi/BitcoinArmory | 2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98 | armoryengine/PyBtcWallet.py | python | PyBtcWallet.hasScrAddr | (self, scrAddr) | return self.hasAddr(scrAddr[1:]) | Wallets currently only hold P2PKH scraddrs, so if it's not that, False | Wallets currently only hold P2PKH scraddrs, so if it's not that, False | [
"Wallets",
"currently",
"only",
"hold",
"P2PKH",
"scraddrs",
"so",
"if",
"it",
"s",
"not",
"that",
"False"
] | def hasScrAddr(self, scrAddr):
"""
Wallets currently only hold P2PKH scraddrs, so if it's not that, False
"""
if not scrAddr[0] == SCRADDR_P2PKH_BYTE or not len(scrAddr)==21:
return False
# For P2PKH scraddrs, the first byte is prefix, next 20 bytes is addr160
return self.hasAddr(scrAddr[1:]) | [
"def",
"hasScrAddr",
"(",
"self",
",",
"scrAddr",
")",
":",
"if",
"not",
"scrAddr",
"[",
"0",
"]",
"==",
"SCRADDR_P2PKH_BYTE",
"or",
"not",
"len",
"(",
"scrAddr",
")",
"==",
"21",
":",
"return",
"False",
"# For P2PKH scraddrs, the first byte is prefix, next 20 b... | https://github.com/etotheipi/BitcoinArmory/blob/2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98/armoryengine/PyBtcWallet.py#L496-L504 | |
forkineye/ESPixelStick | 22926f1c0d1131f1369fc7cad405689a095ae3cb | dist/bin/pyserial/serial/serialposix.py | python | VTIMESerial.read | (self, size=1) | return bytes(read) | \
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read. | \
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read. | [
"\\",
"Read",
"size",
"bytes",
"from",
"the",
"serial",
"port",
".",
"If",
"a",
"timeout",
"is",
"set",
"it",
"may",
"return",
"less",
"characters",
"as",
"requested",
".",
"With",
"no",
"timeout",
"it",
"will",
"block",
"until",
"the",
"requested",
"num... | def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
read = bytearray()
while len(read) < size:
buf = os.read(self.fd, size - len(read))
if not buf:
break
read.extend(buf)
return bytes(read) | [
"def",
"read",
"(",
"self",
",",
"size",
"=",
"1",
")",
":",
"if",
"not",
"self",
".",
"is_open",
":",
"raise",
"portNotOpenError",
"read",
"=",
"bytearray",
"(",
")",
"while",
"len",
"(",
"read",
")",
"<",
"size",
":",
"buf",
"=",
"os",
".",
"re... | https://github.com/forkineye/ESPixelStick/blob/22926f1c0d1131f1369fc7cad405689a095ae3cb/dist/bin/pyserial/serial/serialposix.py#L794-L808 | |
yue/yue | 619d62c191b13c51c01be451dc48917c34a5aefc | building/tools/cpplint.py | python | ShouldCheckNamespaceIndentation | (nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum) | return IsBlockInNameSpace(nesting_state, is_forward_declaration) | This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace. | This method determines if we should apply our namespace indentation check. | [
"This",
"method",
"determines",
"if",
"we",
"should",
"apply",
"our",
"namespace",
"indentation",
"check",
"."
] | def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration) | [
"def",
"ShouldCheckNamespaceIndentation",
"(",
"nesting_state",
",",
"is_namespace_indent_item",
",",
"raw_lines_no_comments",
",",
"linenum",
")",
":",
"is_forward_declaration",
"=",
"IsForwardClassDeclaration",
"(",
"raw_lines_no_comments",
",",
"linenum",
")",
"if",
"not... | https://github.com/yue/yue/blob/619d62c191b13c51c01be451dc48917c34a5aefc/building/tools/cpplint.py#L5723-L5750 | |
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/apiclient/googleapiclient/discovery.py | python | _urljoin | (base, url) | return new_base + new_url | Custom urljoin replacement supporting : before / in url. | Custom urljoin replacement supporting : before / in url. | [
"Custom",
"urljoin",
"replacement",
"supporting",
":",
"before",
"/",
"in",
"url",
"."
] | def _urljoin(base, url):
"""Custom urljoin replacement supporting : before / in url."""
# In general, it's unsafe to simply join base and url. However, for
# the case of discovery documents, we know:
# * base will never contain params, query, or fragment
# * url will never contain a scheme or net_loc.
# In general, this means we can safely join on /; we just need to
# ensure we end up with precisely one / joining base and url. The
# exception here is the case of media uploads, where url will be an
# absolute url.
if url.startswith('http://') or url.startswith('https://'):
return urljoin(base, url)
new_base = base if base.endswith('/') else base + '/'
new_url = url[1:] if url.startswith('/') else url
return new_base + new_url | [
"def",
"_urljoin",
"(",
"base",
",",
"url",
")",
":",
"# In general, it's unsafe to simply join base and url. However, for",
"# the case of discovery documents, we know:",
"# * base will never contain params, query, or fragment",
"# * url will never contain a scheme or net_loc.",
"# In gen... | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/apiclient/googleapiclient/discovery.py#L497-L511 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/xmlrpc/server.py | python | SimpleXMLRPCDispatcher.system_methodSignature | (self, method_name) | return 'signatures not supported' | system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature. | system.methodSignature('add') => [double, int, int] | [
"system",
".",
"methodSignature",
"(",
"add",
")",
"=",
">",
"[",
"double",
"int",
"int",
"]"
] | def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported' | [
"def",
"system_methodSignature",
"(",
"self",
",",
"method_name",
")",
":",
"# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html",
"return",
"'signatures not supported'"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/xmlrpc/server.py#L303-L314 | |
facebookincubator/BOLT | 88c70afe9d388ad430cc150cc158641701397f70 | lldb/third_party/Python/module/pexpect-4.6/pexpect/screen.py | python | screen.cursor_save | (self) | Save current cursor position. | Save current cursor position. | [
"Save",
"current",
"cursor",
"position",
"."
] | def cursor_save (self): # <ESC>[s
'''Save current cursor position.'''
self.cursor_save_attrs() | [
"def",
"cursor_save",
"(",
"self",
")",
":",
"# <ESC>[s",
"self",
".",
"cursor_save_attrs",
"(",
")"
] | https://github.com/facebookincubator/BOLT/blob/88c70afe9d388ad430cc150cc158641701397f70/lldb/third_party/Python/module/pexpect-4.6/pexpect/screen.py#L318-L321 | ||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/email/mime/image.py | python | MIMEImage.__init__ | (self, _imagedata, _subtype=None,
_encoder=encoders.encode_base64, **_params) | Create an image/* type MIME document.
_imagedata is a string containing the raw image data. If this data
can be decoded by the standard Python `imghdr' module, then the
subtype will be automatically included in the Content-Type header.
Otherwise, you can specify the specific image subtype via the _subtype
parameter.
_encoder is a function which will perform the actual encoding for
transport of the image data. It takes one argument, which is this
Image instance. It should use get_payload() and set_payload() to
change the payload to the encoded form. It should also add any
Content-Transfer-Encoding or other headers to the message as
necessary. The default encoding is Base64.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header. | Create an image/* type MIME document. | [
"Create",
"an",
"image",
"/",
"*",
"type",
"MIME",
"document",
"."
] | def __init__(self, _imagedata, _subtype=None,
_encoder=encoders.encode_base64, **_params):
"""Create an image/* type MIME document.
_imagedata is a string containing the raw image data. If this data
can be decoded by the standard Python `imghdr' module, then the
subtype will be automatically included in the Content-Type header.
Otherwise, you can specify the specific image subtype via the _subtype
parameter.
_encoder is a function which will perform the actual encoding for
transport of the image data. It takes one argument, which is this
Image instance. It should use get_payload() and set_payload() to
change the payload to the encoded form. It should also add any
Content-Transfer-Encoding or other headers to the message as
necessary. The default encoding is Base64.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
_subtype = imghdr.what(None, _imagedata)
if _subtype is None:
raise TypeError('Could not guess image MIME subtype')
MIMENonMultipart.__init__(self, 'image', _subtype, **_params)
self.set_payload(_imagedata)
_encoder(self) | [
"def",
"__init__",
"(",
"self",
",",
"_imagedata",
",",
"_subtype",
"=",
"None",
",",
"_encoder",
"=",
"encoders",
".",
"encode_base64",
",",
"*",
"*",
"_params",
")",
":",
"if",
"_subtype",
"is",
"None",
":",
"_subtype",
"=",
"imghdr",
".",
"what",
"(... | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/email/mime/image.py#L19-L46 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/stc.py | python | StyledTextCtrl.SetHotspotActiveBackground | (*args, **kwargs) | return _stc.StyledTextCtrl_SetHotspotActiveBackground(*args, **kwargs) | SetHotspotActiveBackground(self, bool useSetting, Colour back)
Set a back colour for active hotspots. | SetHotspotActiveBackground(self, bool useSetting, Colour back) | [
"SetHotspotActiveBackground",
"(",
"self",
"bool",
"useSetting",
"Colour",
"back",
")"
] | def SetHotspotActiveBackground(*args, **kwargs):
"""
SetHotspotActiveBackground(self, bool useSetting, Colour back)
Set a back colour for active hotspots.
"""
return _stc.StyledTextCtrl_SetHotspotActiveBackground(*args, **kwargs) | [
"def",
"SetHotspotActiveBackground",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_SetHotspotActiveBackground",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/stc.py#L5232-L5238 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/dis.py | python | findlabels | (code) | return labels | Detect all offsets in a byte code which are jump targets.
Return the list of offsets. | Detect all offsets in a byte code which are jump targets. | [
"Detect",
"all",
"offsets",
"in",
"a",
"byte",
"code",
"which",
"are",
"jump",
"targets",
"."
] | def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
for offset, op, arg in _unpack_opargs(code):
if arg is not None:
if op in hasjrel:
label = offset + 2 + arg
elif op in hasjabs:
label = arg
else:
continue
if label not in labels:
labels.append(label)
return labels | [
"def",
"findlabels",
"(",
"code",
")",
":",
"labels",
"=",
"[",
"]",
"for",
"offset",
",",
"op",
",",
"arg",
"in",
"_unpack_opargs",
"(",
"code",
")",
":",
"if",
"arg",
"is",
"not",
"None",
":",
"if",
"op",
"in",
"hasjrel",
":",
"label",
"=",
"of... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/dis.py#L430-L447 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/feature_column/feature_column.py | python | _BucketizedColumn._get_sparse_tensors | (self, inputs, weight_collections=None,
trainable=None) | return _CategoricalColumn.IdWeightPair(sparse_tensor, None) | Converts dense inputs to SparseTensor so downstream code can use it. | Converts dense inputs to SparseTensor so downstream code can use it. | [
"Converts",
"dense",
"inputs",
"to",
"SparseTensor",
"so",
"downstream",
"code",
"can",
"use",
"it",
"."
] | def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = inputs.get(self)
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return _CategoricalColumn.IdWeightPair(sparse_tensor, None) | [
"def",
"_get_sparse_tensors",
"(",
"self",
",",
"inputs",
",",
"weight_collections",
"=",
"None",
",",
"trainable",
"=",
"None",
")",
":",
"input_tensor",
"=",
"inputs",
".",
"get",
"(",
"self",
")",
"batch_size",
"=",
"array_ops",
".",
"shape",
"(",
"inpu... | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/feature_column/feature_column.py#L2411-L2439 | |
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/ops/losses/losses_impl.py | python | sigmoid_cross_entropy | (
multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS) | Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
multi_class_labels: `[batch_size, num_classes]` target integer labels in
`{0, 1}`.
logits: Float `[batch_size, num_classes]` logits outputs of the network.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`multi_class_labels`, and must be broadcastable to `multi_class_labels`
(i.e., all dimensions must be either `1`, or the same as the
corresponding `losses` dimension).
label_smoothing: If greater than `0` then smooth the labels.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `logits`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None. Also if `multi_class_labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility | Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits. | [
"Creates",
"a",
"cross",
"-",
"entropy",
"loss",
"using",
"tf",
".",
"nn",
".",
"sigmoid_cross_entropy_with_logits",
"."
] | def sigmoid_cross_entropy(
multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
multi_class_labels: `[batch_size, num_classes]` target integer labels in
`{0, 1}`.
logits: Float `[batch_size, num_classes]` logits outputs of the network.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`multi_class_labels`, and must be broadcastable to `multi_class_labels`
(i.e., all dimensions must be either `1`, or the same as the
corresponding `losses` dimension).
label_smoothing: If greater than `0` then smooth the labels.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `logits`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None. Also if `multi_class_labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if multi_class_labels is None:
raise ValueError("Argument `multi_class_labels` must not be None.")
if logits is None:
raise ValueError("Argument `logits` must not be None.")
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
(logits, multi_class_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction) | [
"def",
"sigmoid_cross_entropy",
"(",
"multi_class_labels",
",",
"logits",
",",
"weights",
"=",
"1.0",
",",
"label_smoothing",
"=",
"0",
",",
"scope",
"=",
"None",
",",
"loss_collection",
"=",
"ops",
".",
"GraphKeys",
".",
"LOSSES",
",",
"reduction",
"=",
"Re... | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/losses/losses_impl.py#L772-L833 | ||
arangodb/arangodb | 0d658689c7d1b721b314fa3ca27d38303e1570c8 | 3rdParty/boost/1.78.0/libs/metaparse/tools/benchmark/benchmark.py | python | plot_diagram | (config, results, images_dir, out_filename) | Plot one diagram | Plot one diagram | [
"Plot",
"one",
"diagram"
] | def plot_diagram(config, results, images_dir, out_filename):
"""Plot one diagram"""
img_files = plot_temp_diagrams(config, results, images_dir)
join_images(img_files, out_filename)
for img_file in img_files:
os.remove(img_file) | [
"def",
"plot_diagram",
"(",
"config",
",",
"results",
",",
"images_dir",
",",
"out_filename",
")",
":",
"img_files",
"=",
"plot_temp_diagrams",
"(",
"config",
",",
"results",
",",
"images_dir",
")",
"join_images",
"(",
"img_files",
",",
"out_filename",
")",
"f... | https://github.com/arangodb/arangodb/blob/0d658689c7d1b721b314fa3ca27d38303e1570c8/3rdParty/boost/1.78.0/libs/metaparse/tools/benchmark/benchmark.py#L260-L265 | ||
daijifeng001/caffe-rfcn | 543f8f6a4b7c88256ea1445ae951a12d1ad9cffd | scripts/cpp_lint.py | python | _IncludeState.CanonicalizeAlphabeticalOrder | (self, header_path) | return header_path.replace('-inl.h', '.h').replace('-', '_').lower() | Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path. | Returns a path canonicalized for alphabetical comparison. | [
"Returns",
"a",
"path",
"canonicalized",
"for",
"alphabetical",
"comparison",
"."
] | def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower() | [
"def",
"CanonicalizeAlphabeticalOrder",
"(",
"self",
",",
"header_path",
")",
":",
"return",
"header_path",
".",
"replace",
"(",
"'-inl.h'",
",",
"'.h'",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
".",
"lower",
"(",
")"
] | https://github.com/daijifeng001/caffe-rfcn/blob/543f8f6a4b7c88256ea1445ae951a12d1ad9cffd/scripts/cpp_lint.py#L597-L610 | |
jeog/TDAmeritradeAPI | 91c738afd7d57b54f6231170bd64c2550fafd34d | python/tdma_api/get.py | python | OptionChainAnalyticalGetter.get_days_to_exp | (self) | return clib.get_val(self._abi('GetDaysToExp'), c_uint, self._obj) | Returns days until expiration(for calculations) being used. | Returns days until expiration(for calculations) being used. | [
"Returns",
"days",
"until",
"expiration",
"(",
"for",
"calculations",
")",
"being",
"used",
"."
] | def get_days_to_exp(self):
"""Returns days until expiration(for calculations) being used."""
return clib.get_val(self._abi('GetDaysToExp'), c_uint, self._obj) | [
"def",
"get_days_to_exp",
"(",
"self",
")",
":",
"return",
"clib",
".",
"get_val",
"(",
"self",
".",
"_abi",
"(",
"'GetDaysToExp'",
")",
",",
"c_uint",
",",
"self",
".",
"_obj",
")"
] | https://github.com/jeog/TDAmeritradeAPI/blob/91c738afd7d57b54f6231170bd64c2550fafd34d/python/tdma_api/get.py#L1051-L1053 | |
SFTtech/openage | d6a08c53c48dc1e157807471df92197f6ca9e04d | openage/convert/entity_object/conversion/aoc/genie_unit.py | python | GenieGameEntityGroup.is_unique | (self) | return enabling_civ_id > -1 | Groups are unique if they belong to a specific civ.
:returns: True if the group is tied to one specific civ. | Groups are unique if they belong to a specific civ. | [
"Groups",
"are",
"unique",
"if",
"they",
"belong",
"to",
"a",
"specific",
"civ",
"."
] | def is_unique(self):
"""
Groups are unique if they belong to a specific civ.
:returns: True if the group is tied to one specific civ.
"""
# Get the enabling research obj_id for the first unit in the line
head_unit = self.get_head_unit()
head_unit_id = head_unit["id0"].get_value()
if isinstance(self, GenieUnitLineGroup):
if head_unit_id in self.data.unit_connections.keys():
head_unit_connection = self.data.unit_connections[head_unit_id]
else:
# Animals or AoE1
return False
elif isinstance(self, GenieBuildingLineGroup):
if head_unit_id in self.data.building_connections.keys():
head_unit_connection = self.data.building_connections[head_unit_id]
else:
# AoE1
return False
enabling_research_id = head_unit_connection["enabling_research"].get_value()
# does not need to be enabled -> not unique
if enabling_research_id == -1:
return False
# Get enabling civ
enabling_research = self.data.genie_techs[enabling_research_id]
enabling_civ_id = enabling_research["civilization_id"].get_value()
# Enabling tech has no specific civ -> not unique
return enabling_civ_id > -1 | [
"def",
"is_unique",
"(",
"self",
")",
":",
"# Get the enabling research obj_id for the first unit in the line",
"head_unit",
"=",
"self",
".",
"get_head_unit",
"(",
")",
"head_unit_id",
"=",
"head_unit",
"[",
"\"id0\"",
"]",
".",
"get_value",
"(",
")",
"if",
"isinst... | https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/entity_object/conversion/aoc/genie_unit.py#L424-L461 | |
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/klampt/plan/kinetrajopt/utils.py | python | CostInterface.compute | (self, x, grad_level=0) | Evaluates the cost and possibly (determining on grad_level) the
derivative and/or Hessian.
Args:
x (ndarray): The evaluation point
grad_level (int, optional): Which order of derivatives are
computed. Defaults to 0, which only computes cost.
Returns:
tuple: the cost and optional derivatives, structured as follows:
- if grad_level == 0, it returns (cost,)
- if grad_level == 1, it returns (cost,gradient)
- if grad_level == 2, it returns (cost,gradient,hessian) | Evaluates the cost and possibly (determining on grad_level) the
derivative and/or Hessian. | [
"Evaluates",
"the",
"cost",
"and",
"possibly",
"(",
"determining",
"on",
"grad_level",
")",
"the",
"derivative",
"and",
"/",
"or",
"Hessian",
"."
] | def compute(self, x, grad_level=0):
"""Evaluates the cost and possibly (determining on grad_level) the
derivative and/or Hessian.
Args:
x (ndarray): The evaluation point
grad_level (int, optional): Which order of derivatives are
computed. Defaults to 0, which only computes cost.
Returns:
tuple: the cost and optional derivatives, structured as follows:
- if grad_level == 0, it returns (cost,)
- if grad_level == 1, it returns (cost,gradient)
- if grad_level == 2, it returns (cost,gradient,hessian)
"""
raise NotImplementedError("Sub-class has to implement function compute") | [
"def",
"compute",
"(",
"self",
",",
"x",
",",
"grad_level",
"=",
"0",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"Sub-class has to implement function compute\"",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/plan/kinetrajopt/utils.py#L7-L23 | ||
H-uru/Plasma | c2140ea046e82e9c199e257a7f2e7edb42602871 | Scripts/Python/plasma/Plasma.py | python | PtConsole | (command) | This will execute 'command' as if it were typed into the Plasma console. | This will execute 'command' as if it were typed into the Plasma console. | [
"This",
"will",
"execute",
"command",
"as",
"if",
"it",
"were",
"typed",
"into",
"the",
"Plasma",
"console",
"."
] | def PtConsole(command):
"""This will execute 'command' as if it were typed into the Plasma console."""
pass | [
"def",
"PtConsole",
"(",
"command",
")",
":",
"pass"
] | https://github.com/H-uru/Plasma/blob/c2140ea046e82e9c199e257a7f2e7edb42602871/Scripts/Python/plasma/Plasma.py#L149-L151 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.