nwo stringlengths 5 86 | sha stringlengths 40 40 | path stringlengths 4 189 | language stringclasses 1 value | identifier stringlengths 1 94 | parameters stringlengths 2 4.03k | argument_list stringclasses 1 value | return_statement stringlengths 0 11.5k | docstring stringlengths 1 33.2k | docstring_summary stringlengths 0 5.15k | docstring_tokens list | function stringlengths 34 151k | function_tokens list | url stringlengths 90 278 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/platform.py | python | _syscmd_uname | (option,default='') | Interface to the system's uname command. | Interface to the system's uname command. | [
"Interface",
"to",
"the",
"system",
"s",
"uname",
"command",
"."
] | def _syscmd_uname(option,default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError,os.error):
return default
output = string.strip(f.read())
rc = f.close()
if not output or rc:
return default
else:
return output | [
"def",
"_syscmd_uname",
"(",
"option",
",",
"default",
"=",
"''",
")",
":",
"if",
"sys",
".",
"platform",
"in",
"(",
"'dos'",
",",
"'win32'",
",",
"'win16'",
",",
"'os2'",
")",
":",
"# XXX Others too ?",
"return",
"default",
"try",
":",
"f",
"=",
"os",
".",
"popen",
"(",
"'uname %s 2> %s'",
"%",
"(",
"option",
",",
"DEV_NULL",
")",
")",
"except",
"(",
"AttributeError",
",",
"os",
".",
"error",
")",
":",
"return",
"default",
"output",
"=",
"string",
".",
"strip",
"(",
"f",
".",
"read",
"(",
")",
")",
"rc",
"=",
"f",
".",
"close",
"(",
")",
"if",
"not",
"output",
"or",
"rc",
":",
"return",
"default",
"else",
":",
"return",
"output"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/platform.py#L1000-L1016 | ||
v8/v8 | fee3bf095260bf657a3eea4d3d41f90c42c6c857 | tools/stats-viewer.py | python | CounterCollection.CounterSize | (self) | return 4 + self.max_name_size | Return the size of a single counter. | Return the size of a single counter. | [
"Return",
"the",
"size",
"of",
"a",
"single",
"counter",
"."
] | def CounterSize(self):
"""Return the size of a single counter."""
return 4 + self.max_name_size | [
"def",
"CounterSize",
"(",
"self",
")",
":",
"return",
"4",
"+",
"self",
".",
"max_name_size"
] | https://github.com/v8/v8/blob/fee3bf095260bf657a3eea4d3d41f90c42c6c857/tools/stats-viewer.py#L381-L383 | |
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Arch/ArchAxisSystem.py | python | _AxisSystem.getPoints | (self,obj) | return pts | returns the gridpoints of linked axes | returns the gridpoints of linked axes | [
"returns",
"the",
"gridpoints",
"of",
"linked",
"axes"
] | def getPoints(self,obj):
"returns the gridpoints of linked axes"
pts = []
if len(obj.Axes) == 1:
for e in obj.Axes[0].Shape.Edges:
pts.append(e.Vertexes[0].Point)
elif len(obj.Axes) == 2:
set1 = obj.Axes[0].Shape.Edges # X
set2 = obj.Axes[1].Shape.Edges # Y
for e1 in set1:
for e2 in set2:
pts.extend(DraftGeomUtils.findIntersection(e1,e2))
elif len(obj.Axes) == 3:
set1 = obj.Axes[0].Shape.Edges # X
set2 = obj.Axes[1].Shape.Edges # Y
set3 = obj.Axes[2].Shape.Edges # Z
bset = []
cv = None
for e1 in set1:
for e2 in set2:
bset.extend(DraftGeomUtils.findIntersection(e1,e2))
for e3 in set3:
if not cv:
cv = e3.Vertexes[0].Point
pts.extend(bset)
else:
v = e3.Vertexes[0].Point.sub(cv)
pts.extend([p.add(v) for p in bset])
return pts | [
"def",
"getPoints",
"(",
"self",
",",
"obj",
")",
":",
"pts",
"=",
"[",
"]",
"if",
"len",
"(",
"obj",
".",
"Axes",
")",
"==",
"1",
":",
"for",
"e",
"in",
"obj",
".",
"Axes",
"[",
"0",
"]",
".",
"Shape",
".",
"Edges",
":",
"pts",
".",
"append",
"(",
"e",
".",
"Vertexes",
"[",
"0",
"]",
".",
"Point",
")",
"elif",
"len",
"(",
"obj",
".",
"Axes",
")",
"==",
"2",
":",
"set1",
"=",
"obj",
".",
"Axes",
"[",
"0",
"]",
".",
"Shape",
".",
"Edges",
"# X",
"set2",
"=",
"obj",
".",
"Axes",
"[",
"1",
"]",
".",
"Shape",
".",
"Edges",
"# Y",
"for",
"e1",
"in",
"set1",
":",
"for",
"e2",
"in",
"set2",
":",
"pts",
".",
"extend",
"(",
"DraftGeomUtils",
".",
"findIntersection",
"(",
"e1",
",",
"e2",
")",
")",
"elif",
"len",
"(",
"obj",
".",
"Axes",
")",
"==",
"3",
":",
"set1",
"=",
"obj",
".",
"Axes",
"[",
"0",
"]",
".",
"Shape",
".",
"Edges",
"# X",
"set2",
"=",
"obj",
".",
"Axes",
"[",
"1",
"]",
".",
"Shape",
".",
"Edges",
"# Y",
"set3",
"=",
"obj",
".",
"Axes",
"[",
"2",
"]",
".",
"Shape",
".",
"Edges",
"# Z",
"bset",
"=",
"[",
"]",
"cv",
"=",
"None",
"for",
"e1",
"in",
"set1",
":",
"for",
"e2",
"in",
"set2",
":",
"bset",
".",
"extend",
"(",
"DraftGeomUtils",
".",
"findIntersection",
"(",
"e1",
",",
"e2",
")",
")",
"for",
"e3",
"in",
"set3",
":",
"if",
"not",
"cv",
":",
"cv",
"=",
"e3",
".",
"Vertexes",
"[",
"0",
"]",
".",
"Point",
"pts",
".",
"extend",
"(",
"bset",
")",
"else",
":",
"v",
"=",
"e3",
".",
"Vertexes",
"[",
"0",
"]",
".",
"Point",
".",
"sub",
"(",
"cv",
")",
"pts",
".",
"extend",
"(",
"[",
"p",
".",
"add",
"(",
"v",
")",
"for",
"p",
"in",
"bset",
"]",
")",
"return",
"pts"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Arch/ArchAxisSystem.py#L145-L175 | |
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | python/mxnet/io/io.py | python | DataIter.getlabel | (self) | Get label of the current batch.
Returns
-------
list of NDArray
The label of the current batch. | Get label of the current batch. | [
"Get",
"label",
"of",
"the",
"current",
"batch",
"."
] | def getlabel(self):
"""Get label of the current batch.
Returns
-------
list of NDArray
The label of the current batch.
"""
pass | [
"def",
"getlabel",
"(",
"self",
")",
":",
"pass"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/io/io.py#L252-L260 | ||
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | buildscripts/resmokelib/core/jasper_process.py | python | Process.stop | (self, mode=None) | Terminate the process. | Terminate the process. | [
"Terminate",
"the",
"process",
"."
] | def stop(self, mode=None):
"""Terminate the process."""
if mode is None:
mode = fixture_interface.TeardownMode.TERMINATE
if mode == fixture_interface.TeardownMode.KILL:
signal = self.pb.Signals.Value("KILL")
elif mode == fixture_interface.TeardownMode.TERMINATE:
signal = self.pb.Signals.Value("TERMINATE")
elif mode == fixture_interface.TeardownMode.ABORT:
signal = self.pb.Signals.Value("ABRT")
else:
raise errors.ProcessError("Process wrapper given unrecognized teardown mode: " +
mode.value)
signal_process = self.pb.SignalProcess(ProcessID=self._id, signal=signal)
try:
val = self._stub.Signal(signal_process)
except grpc.RpcError as err:
err.details = err.details()
if "cannot signal a process that has terminated" not in err.details \
and "os: process already finished" not in err.details:
raise
else:
if not val.success:
raise OSError("Failed to signal Jasper process with pid {}: {}".format(
self.pid, val.text))
finally:
JASPER_PIDS.discard(self.pid) | [
"def",
"stop",
"(",
"self",
",",
"mode",
"=",
"None",
")",
":",
"if",
"mode",
"is",
"None",
":",
"mode",
"=",
"fixture_interface",
".",
"TeardownMode",
".",
"TERMINATE",
"if",
"mode",
"==",
"fixture_interface",
".",
"TeardownMode",
".",
"KILL",
":",
"signal",
"=",
"self",
".",
"pb",
".",
"Signals",
".",
"Value",
"(",
"\"KILL\"",
")",
"elif",
"mode",
"==",
"fixture_interface",
".",
"TeardownMode",
".",
"TERMINATE",
":",
"signal",
"=",
"self",
".",
"pb",
".",
"Signals",
".",
"Value",
"(",
"\"TERMINATE\"",
")",
"elif",
"mode",
"==",
"fixture_interface",
".",
"TeardownMode",
".",
"ABORT",
":",
"signal",
"=",
"self",
".",
"pb",
".",
"Signals",
".",
"Value",
"(",
"\"ABRT\"",
")",
"else",
":",
"raise",
"errors",
".",
"ProcessError",
"(",
"\"Process wrapper given unrecognized teardown mode: \"",
"+",
"mode",
".",
"value",
")",
"signal_process",
"=",
"self",
".",
"pb",
".",
"SignalProcess",
"(",
"ProcessID",
"=",
"self",
".",
"_id",
",",
"signal",
"=",
"signal",
")",
"try",
":",
"val",
"=",
"self",
".",
"_stub",
".",
"Signal",
"(",
"signal_process",
")",
"except",
"grpc",
".",
"RpcError",
"as",
"err",
":",
"err",
".",
"details",
"=",
"err",
".",
"details",
"(",
")",
"if",
"\"cannot signal a process that has terminated\"",
"not",
"in",
"err",
".",
"details",
"and",
"\"os: process already finished\"",
"not",
"in",
"err",
".",
"details",
":",
"raise",
"else",
":",
"if",
"not",
"val",
".",
"success",
":",
"raise",
"OSError",
"(",
"\"Failed to signal Jasper process with pid {}: {}\"",
".",
"format",
"(",
"self",
".",
"pid",
",",
"val",
".",
"text",
")",
")",
"finally",
":",
"JASPER_PIDS",
".",
"discard",
"(",
"self",
".",
"pid",
")"
] | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/resmokelib/core/jasper_process.py#L62-L90 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/propgrid.py | python | PropertyGridPopulator.SetState | (*args, **kwargs) | return _propgrid.PropertyGridPopulator_SetState(*args, **kwargs) | SetState(self, state) | SetState(self, state) | [
"SetState",
"(",
"self",
"state",
")"
] | def SetState(*args, **kwargs):
"""SetState(self, state)"""
return _propgrid.PropertyGridPopulator_SetState(*args, **kwargs) | [
"def",
"SetState",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PropertyGridPopulator_SetState",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/propgrid.py#L2578-L2580 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/asyncio/unix_events.py | python | _UnixSelectorEventLoop._handle_signal | (self, sig) | Internal helper that is the actual signal handler. | Internal helper that is the actual signal handler. | [
"Internal",
"helper",
"that",
"is",
"the",
"actual",
"signal",
"handler",
"."
] | def _handle_signal(self, sig):
"""Internal helper that is the actual signal handler."""
handle = self._signal_handlers.get(sig)
if handle is None:
return # Assume it's some race condition.
if handle._cancelled:
self.remove_signal_handler(sig) # Remove it properly.
else:
self._add_callback_signalsafe(handle) | [
"def",
"_handle_signal",
"(",
"self",
",",
"sig",
")",
":",
"handle",
"=",
"self",
".",
"_signal_handlers",
".",
"get",
"(",
"sig",
")",
"if",
"handle",
"is",
"None",
":",
"return",
"# Assume it's some race condition.",
"if",
"handle",
".",
"_cancelled",
":",
"self",
".",
"remove_signal_handler",
"(",
"sig",
")",
"# Remove it properly.",
"else",
":",
"self",
".",
"_add_callback_signalsafe",
"(",
"handle",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/asyncio/unix_events.py#L123-L131 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_core.py | python | Image.AdjustChannels | (*args, **kwargs) | return _core_.Image_AdjustChannels(*args, **kwargs) | AdjustChannels(self, double factor_red, double factor_green, double factor_blue,
double factor_alpha=1.0) -> Image
This function muliplies all 4 channels (red, green, blue, alpha) with
a factor (around 1.0). Useful for gamma correction, colour correction
and to add a certain amount of transparency to a image (fade in fade
out effects). If factor_alpha is given but the original image has no
alpha channel then a alpha channel will be added. | AdjustChannels(self, double factor_red, double factor_green, double factor_blue,
double factor_alpha=1.0) -> Image | [
"AdjustChannels",
"(",
"self",
"double",
"factor_red",
"double",
"factor_green",
"double",
"factor_blue",
"double",
"factor_alpha",
"=",
"1",
".",
"0",
")",
"-",
">",
"Image"
] | def AdjustChannels(*args, **kwargs):
"""
AdjustChannels(self, double factor_red, double factor_green, double factor_blue,
double factor_alpha=1.0) -> Image
This function muliplies all 4 channels (red, green, blue, alpha) with
a factor (around 1.0). Useful for gamma correction, colour correction
and to add a certain amount of transparency to a image (fade in fade
out effects). If factor_alpha is given but the original image has no
alpha channel then a alpha channel will be added.
"""
return _core_.Image_AdjustChannels(*args, **kwargs) | [
"def",
"AdjustChannels",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Image_AdjustChannels",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L3680-L3691 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/build/waf-1.7.13/lmbrwaflib/msvs.py | python | msvs_generator.add_aliases | (self) | Add a specific target that emulates the "make all" necessary for Visual studio when pressing F7
We also add an alias for "make install" (disabled by default) | Add a specific target that emulates the "make all" necessary for Visual studio when pressing F7
We also add an alias for "make install" (disabled by default) | [
"Add",
"a",
"specific",
"target",
"that",
"emulates",
"the",
"make",
"all",
"necessary",
"for",
"Visual",
"studio",
"when",
"pressing",
"F7",
"We",
"also",
"add",
"an",
"alias",
"for",
"make",
"install",
"(",
"disabled",
"by",
"default",
")"
] | def add_aliases(self):
"""
Add a specific target that emulates the "make all" necessary for Visual studio when pressing F7
We also add an alias for "make install" (disabled by default)
"""
base = getattr(self, 'projects_dir', None) or self.tg.path
node_project = base.make_node('_WAF_' + self.project_extension) # Node.
p_build = self.vsnode_build_all(self, node_project)
p_build.collect_properties()
p_build.vs_globals = self.vs_globals
self.all_projects.append(p_build)
self.waf_project = p_build | [
"def",
"add_aliases",
"(",
"self",
")",
":",
"base",
"=",
"getattr",
"(",
"self",
",",
"'projects_dir'",
",",
"None",
")",
"or",
"self",
".",
"tg",
".",
"path",
"node_project",
"=",
"base",
".",
"make_node",
"(",
"'_WAF_'",
"+",
"self",
".",
"project_extension",
")",
"# Node.",
"p_build",
"=",
"self",
".",
"vsnode_build_all",
"(",
"self",
",",
"node_project",
")",
"p_build",
".",
"collect_properties",
"(",
")",
"p_build",
".",
"vs_globals",
"=",
"self",
".",
"vs_globals",
"self",
".",
"all_projects",
".",
"append",
"(",
"p_build",
")",
"self",
".",
"waf_project",
"=",
"p_build"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/lmbrwaflib/msvs.py#L2141-L2153 | ||
SFTtech/openage | d6a08c53c48dc1e157807471df92197f6ca9e04d | openage/convert/processor/conversion/aoc/nyan_subprocessor.py | python | AoCNyanSubprocessor.ambient_group_to_game_entity | (ambient_group) | Creates raw API objects for an ambient group.
:param ambient_group: Unit line that gets converted to a game entity.
:type ambient_group: ..dataformat.converter_object.ConverterObjectGroup | Creates raw API objects for an ambient group. | [
"Creates",
"raw",
"API",
"objects",
"for",
"an",
"ambient",
"group",
"."
] | def ambient_group_to_game_entity(ambient_group):
"""
Creates raw API objects for an ambient group.
:param ambient_group: Unit line that gets converted to a game entity.
:type ambient_group: ..dataformat.converter_object.ConverterObjectGroup
"""
ambient_unit = ambient_group.get_head_unit()
ambient_id = ambient_group.get_head_unit_id()
dataset = ambient_group.data
name_lookup_dict = internal_name_lookups.get_entity_lookups(dataset.game_version)
class_lookup_dict = internal_name_lookups.get_class_lookups(dataset.game_version)
# Start with the generic GameEntity
game_entity_name = name_lookup_dict[ambient_id][0]
obj_location = f"data/game_entity/generic/{name_lookup_dict[ambient_id][1]}/"
raw_api_object = RawAPIObject(game_entity_name, game_entity_name,
dataset.nyan_api_objects)
raw_api_object.add_raw_parent("engine.util.game_entity.GameEntity")
raw_api_object.set_location(obj_location)
raw_api_object.set_filename(name_lookup_dict[ambient_id][1])
ambient_group.add_raw_api_object(raw_api_object)
# =======================================================================
# Game Entity Types
# =======================================================================
# we give an ambient the types
# - util.game_entity_type.types.Ambient
# =======================================================================
# Create or use existing auxiliary types
types_set = []
type_obj = dataset.pregen_nyan_objects["util.game_entity_type.types.Ambient"].get_nyan_object()
types_set.append(type_obj)
unit_class = ambient_unit["unit_class"].get_value()
class_name = class_lookup_dict[unit_class]
class_obj_name = f"util.game_entity_type.types.{class_name}"
type_obj = dataset.pregen_nyan_objects[class_obj_name].get_nyan_object()
types_set.append(type_obj)
raw_api_object.add_raw_member("types", types_set, "engine.util.game_entity.GameEntity")
# =======================================================================
# Abilities
# =======================================================================
abilities_set = []
interaction_mode = ambient_unit["interaction_mode"].get_value()
if interaction_mode >= 0:
abilities_set.append(AoCAbilitySubprocessor.death_ability(ambient_group))
abilities_set.append(AoCAbilitySubprocessor.hitbox_ability(ambient_group))
abilities_set.append(AoCAbilitySubprocessor.idle_ability(ambient_group))
abilities_set.append(AoCAbilitySubprocessor.live_ability(ambient_group))
abilities_set.append(AoCAbilitySubprocessor.named_ability(ambient_group))
abilities_set.append(AoCAbilitySubprocessor.resistance_ability(ambient_group))
abilities_set.append(AoCAbilitySubprocessor.terrain_requirement_ability(ambient_group))
abilities_set.append(AoCAbilitySubprocessor.visibility_ability(ambient_group))
if interaction_mode >= 2:
abilities_set.extend(AoCAbilitySubprocessor.selectable_ability(ambient_group))
if ambient_group.is_passable():
abilities_set.append(AoCAbilitySubprocessor.passable_ability(ambient_group))
if ambient_group.is_harvestable():
abilities_set.append(AoCAbilitySubprocessor.harvestable_ability(ambient_group))
# =======================================================================
# Abilities
# =======================================================================
raw_api_object.add_raw_member("abilities", abilities_set,
"engine.util.game_entity.GameEntity")
# =======================================================================
# Modifiers
# =======================================================================
modifiers_set = []
raw_api_object.add_raw_member("modifiers", modifiers_set,
"engine.util.game_entity.GameEntity")
# =======================================================================
# TODO: Variants
# =======================================================================
raw_api_object.add_raw_member("variants", [], "engine.util.game_entity.GameEntity") | [
"def",
"ambient_group_to_game_entity",
"(",
"ambient_group",
")",
":",
"ambient_unit",
"=",
"ambient_group",
".",
"get_head_unit",
"(",
")",
"ambient_id",
"=",
"ambient_group",
".",
"get_head_unit_id",
"(",
")",
"dataset",
"=",
"ambient_group",
".",
"data",
"name_lookup_dict",
"=",
"internal_name_lookups",
".",
"get_entity_lookups",
"(",
"dataset",
".",
"game_version",
")",
"class_lookup_dict",
"=",
"internal_name_lookups",
".",
"get_class_lookups",
"(",
"dataset",
".",
"game_version",
")",
"# Start with the generic GameEntity",
"game_entity_name",
"=",
"name_lookup_dict",
"[",
"ambient_id",
"]",
"[",
"0",
"]",
"obj_location",
"=",
"f\"data/game_entity/generic/{name_lookup_dict[ambient_id][1]}/\"",
"raw_api_object",
"=",
"RawAPIObject",
"(",
"game_entity_name",
",",
"game_entity_name",
",",
"dataset",
".",
"nyan_api_objects",
")",
"raw_api_object",
".",
"add_raw_parent",
"(",
"\"engine.util.game_entity.GameEntity\"",
")",
"raw_api_object",
".",
"set_location",
"(",
"obj_location",
")",
"raw_api_object",
".",
"set_filename",
"(",
"name_lookup_dict",
"[",
"ambient_id",
"]",
"[",
"1",
"]",
")",
"ambient_group",
".",
"add_raw_api_object",
"(",
"raw_api_object",
")",
"# =======================================================================",
"# Game Entity Types",
"# =======================================================================",
"# we give an ambient the types",
"# - util.game_entity_type.types.Ambient",
"# =======================================================================",
"# Create or use existing auxiliary types",
"types_set",
"=",
"[",
"]",
"type_obj",
"=",
"dataset",
".",
"pregen_nyan_objects",
"[",
"\"util.game_entity_type.types.Ambient\"",
"]",
".",
"get_nyan_object",
"(",
")",
"types_set",
".",
"append",
"(",
"type_obj",
")",
"unit_class",
"=",
"ambient_unit",
"[",
"\"unit_class\"",
"]",
".",
"get_value",
"(",
")",
"class_name",
"=",
"class_lookup_dict",
"[",
"unit_class",
"]",
"class_obj_name",
"=",
"f\"util.game_entity_type.types.{class_name}\"",
"type_obj",
"=",
"dataset",
".",
"pregen_nyan_objects",
"[",
"class_obj_name",
"]",
".",
"get_nyan_object",
"(",
")",
"types_set",
".",
"append",
"(",
"type_obj",
")",
"raw_api_object",
".",
"add_raw_member",
"(",
"\"types\"",
",",
"types_set",
",",
"\"engine.util.game_entity.GameEntity\"",
")",
"# =======================================================================",
"# Abilities",
"# =======================================================================",
"abilities_set",
"=",
"[",
"]",
"interaction_mode",
"=",
"ambient_unit",
"[",
"\"interaction_mode\"",
"]",
".",
"get_value",
"(",
")",
"if",
"interaction_mode",
">=",
"0",
":",
"abilities_set",
".",
"append",
"(",
"AoCAbilitySubprocessor",
".",
"death_ability",
"(",
"ambient_group",
")",
")",
"abilities_set",
".",
"append",
"(",
"AoCAbilitySubprocessor",
".",
"hitbox_ability",
"(",
"ambient_group",
")",
")",
"abilities_set",
".",
"append",
"(",
"AoCAbilitySubprocessor",
".",
"idle_ability",
"(",
"ambient_group",
")",
")",
"abilities_set",
".",
"append",
"(",
"AoCAbilitySubprocessor",
".",
"live_ability",
"(",
"ambient_group",
")",
")",
"abilities_set",
".",
"append",
"(",
"AoCAbilitySubprocessor",
".",
"named_ability",
"(",
"ambient_group",
")",
")",
"abilities_set",
".",
"append",
"(",
"AoCAbilitySubprocessor",
".",
"resistance_ability",
"(",
"ambient_group",
")",
")",
"abilities_set",
".",
"append",
"(",
"AoCAbilitySubprocessor",
".",
"terrain_requirement_ability",
"(",
"ambient_group",
")",
")",
"abilities_set",
".",
"append",
"(",
"AoCAbilitySubprocessor",
".",
"visibility_ability",
"(",
"ambient_group",
")",
")",
"if",
"interaction_mode",
">=",
"2",
":",
"abilities_set",
".",
"extend",
"(",
"AoCAbilitySubprocessor",
".",
"selectable_ability",
"(",
"ambient_group",
")",
")",
"if",
"ambient_group",
".",
"is_passable",
"(",
")",
":",
"abilities_set",
".",
"append",
"(",
"AoCAbilitySubprocessor",
".",
"passable_ability",
"(",
"ambient_group",
")",
")",
"if",
"ambient_group",
".",
"is_harvestable",
"(",
")",
":",
"abilities_set",
".",
"append",
"(",
"AoCAbilitySubprocessor",
".",
"harvestable_ability",
"(",
"ambient_group",
")",
")",
"# =======================================================================",
"# Abilities",
"# =======================================================================",
"raw_api_object",
".",
"add_raw_member",
"(",
"\"abilities\"",
",",
"abilities_set",
",",
"\"engine.util.game_entity.GameEntity\"",
")",
"# =======================================================================",
"# Modifiers",
"# =======================================================================",
"modifiers_set",
"=",
"[",
"]",
"raw_api_object",
".",
"add_raw_member",
"(",
"\"modifiers\"",
",",
"modifiers_set",
",",
"\"engine.util.game_entity.GameEntity\"",
")",
"# =======================================================================",
"# TODO: Variants",
"# =======================================================================",
"raw_api_object",
".",
"add_raw_member",
"(",
"\"variants\"",
",",
"[",
"]",
",",
"\"engine.util.game_entity.GameEntity\"",
")"
] | https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/processor/conversion/aoc/nyan_subprocessor.py#L518-L606 | ||
vmware/concord-bft | ec036a384b4c81be0423d4b429bd37900b13b864 | scripts/prepare-code-coverage-artifact.py | python | prepare_html_report | (host_llvm_cov, profile, coverage_report_dir, binary) | Function is used to generation of code coverage report. | Function is used to generation of code coverage report. | [
"Function",
"is",
"used",
"to",
"generation",
"of",
"code",
"coverage",
"report",
"."
] | def prepare_html_report(host_llvm_cov, profile, coverage_report_dir, binary):
"""
Function is used to generation of code coverage report.
"""
print('\n:: Preparing html report for {0}...'.format(binary), end='')
sys.stdout.flush()
objects = []
objects.append(binary)
index_page = os.path.join(coverage_report_dir, 'index.html')
if not os.path.isdir(coverage_report_dir):
os.makedirs(coverage_report_dir)
cov_command = [host_llvm_cov, 'show'] + objects + [
'-format',
'html',
'-instr-profile',
profile,
'-o',
coverage_report_dir,
'-show-line-counts-or-regions',
'-Xdemangler',
'c++filt',
'-Xdemangler',
'-n',
'-project-title',
'Concord-bft Apollo Code Coverage'
]
cov_process = subprocess.run(cov_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
if cov_process.returncode != 0:
print('::Code coverage report generation failed')
print('Stdout - {}'.format(cov_process.stdout.decode('utf-8')))
print('Stderr - {}'.format(cov_process.stderr.decode('utf-8')))
sys.exit(1)
with open(os.path.join(coverage_report_dir, 'summary.txt'), 'wb') as Summary:
subprocess.check_call([host_llvm_cov, 'report'] + objects
+ ['-instr-profile', profile], stdout=Summary)
print('\n:: Merged Code Coverage Reports are in {}'.format(coverage_report_dir))
print('\n:: Open browser on {}'.format(index_page))
change_permissions_recursive(coverage_report_dir, 0o777) | [
"def",
"prepare_html_report",
"(",
"host_llvm_cov",
",",
"profile",
",",
"coverage_report_dir",
",",
"binary",
")",
":",
"print",
"(",
"'\\n:: Preparing html report for {0}...'",
".",
"format",
"(",
"binary",
")",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"objects",
"=",
"[",
"]",
"objects",
".",
"append",
"(",
"binary",
")",
"index_page",
"=",
"os",
".",
"path",
".",
"join",
"(",
"coverage_report_dir",
",",
"'index.html'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"coverage_report_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"coverage_report_dir",
")",
"cov_command",
"=",
"[",
"host_llvm_cov",
",",
"'show'",
"]",
"+",
"objects",
"+",
"[",
"'-format'",
",",
"'html'",
",",
"'-instr-profile'",
",",
"profile",
",",
"'-o'",
",",
"coverage_report_dir",
",",
"'-show-line-counts-or-regions'",
",",
"'-Xdemangler'",
",",
"'c++filt'",
",",
"'-Xdemangler'",
",",
"'-n'",
",",
"'-project-title'",
",",
"'Concord-bft Apollo Code Coverage'",
"]",
"cov_process",
"=",
"subprocess",
".",
"run",
"(",
"cov_command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"universal_newlines",
"=",
"True",
")",
"if",
"cov_process",
".",
"returncode",
"!=",
"0",
":",
"print",
"(",
"'::Code coverage report generation failed'",
")",
"print",
"(",
"'Stdout - {}'",
".",
"format",
"(",
"cov_process",
".",
"stdout",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"print",
"(",
"'Stderr - {}'",
".",
"format",
"(",
"cov_process",
".",
"stderr",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"coverage_report_dir",
",",
"'summary.txt'",
")",
",",
"'wb'",
")",
"as",
"Summary",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"host_llvm_cov",
",",
"'report'",
"]",
"+",
"objects",
"+",
"[",
"'-instr-profile'",
",",
"profile",
"]",
",",
"stdout",
"=",
"Summary",
")",
"print",
"(",
"'\\n:: Merged Code Coverage Reports are in {}'",
".",
"format",
"(",
"coverage_report_dir",
")",
")",
"print",
"(",
"'\\n:: Open browser on {}'",
".",
"format",
"(",
"index_page",
")",
")",
"change_permissions_recursive",
"(",
"coverage_report_dir",
",",
"0o777",
")"
] | https://github.com/vmware/concord-bft/blob/ec036a384b4c81be0423d4b429bd37900b13b864/scripts/prepare-code-coverage-artifact.py#L93-L135 | ||
Genius-x/genius-x | 9fc9f194e6d1fb92dd0e33d43db19ddb67cda7b0 | cocos2d/tools/bindings-generator/clang/cindex.py | python | CompileCommand.directory | (self) | return conf.lib.clang_CompileCommand_getDirectory(self.cmd) | Get the working directory for this CompileCommand | Get the working directory for this CompileCommand | [
"Get",
"the",
"working",
"directory",
"for",
"this",
"CompileCommand"
] | def directory(self):
"""Get the working directory for this CompileCommand"""
return conf.lib.clang_CompileCommand_getDirectory(self.cmd) | [
"def",
"directory",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_CompileCommand_getDirectory",
"(",
"self",
".",
"cmd",
")"
] | https://github.com/Genius-x/genius-x/blob/9fc9f194e6d1fb92dd0e33d43db19ddb67cda7b0/cocos2d/tools/bindings-generator/clang/cindex.py#L2569-L2571 | |
mixxxdj/mixxx | b519aba1d967a39c63b5f5c56cf5c3a95addec28 | tools/make_xone.py | python | get_key_name | (key) | return key | Optionally munge key name if an EQ | Optionally munge key name if an EQ | [
"Optionally",
"munge",
"key",
"name",
"if",
"an",
"EQ"
] | def get_key_name(key):
"""Optionally munge key name if an EQ"""
if key == "filterLow":
return "parameter1"
if key == "filterMid":
return "parameter2"
if key == "filterHigh":
return "parameter3"
if key == "filterLowKill":
return "button_parameter1"
if key == "filterMidKill":
return "button_parameter2"
if key == "filterLowKill":
return "button_parameter3"
return key | [
"def",
"get_key_name",
"(",
"key",
")",
":",
"if",
"key",
"==",
"\"filterLow\"",
":",
"return",
"\"parameter1\"",
"if",
"key",
"==",
"\"filterMid\"",
":",
"return",
"\"parameter2\"",
"if",
"key",
"==",
"\"filterHigh\"",
":",
"return",
"\"parameter3\"",
"if",
"key",
"==",
"\"filterLowKill\"",
":",
"return",
"\"button_parameter1\"",
"if",
"key",
"==",
"\"filterMidKill\"",
":",
"return",
"\"button_parameter2\"",
"if",
"key",
"==",
"\"filterLowKill\"",
":",
"return",
"\"button_parameter3\"",
"return",
"key"
] | https://github.com/mixxxdj/mixxx/blob/b519aba1d967a39c63b5f5c56cf5c3a95addec28/tools/make_xone.py#L429-L443 | |
wujixiu/helmet-detection | 8eff5c59ddfba5a29e0b76aeb48babcb49246178 | hardhat-wearing-detection/SSD-RPA/scripts/cpp_lint.py | python | IsBlankLine | (line) | return not line or line.isspace() | Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank. | Returns true if the given line is blank. | [
"Returns",
"true",
"if",
"the",
"given",
"line",
"is",
"blank",
"."
] | def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace() | [
"def",
"IsBlankLine",
"(",
"line",
")",
":",
"return",
"not",
"line",
"or",
"line",
".",
"isspace",
"(",
")"
] | https://github.com/wujixiu/helmet-detection/blob/8eff5c59ddfba5a29e0b76aeb48babcb49246178/hardhat-wearing-detection/SSD-RPA/scripts/cpp_lint.py#L2373-L2385 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/setuptools/py2/setuptools/msvc.py | python | SystemInfo.NetFxSdkVersion | (self) | return (('4.7.2', '4.7.1', '4.7',
'4.6.2', '4.6.1', '4.6',
'4.5.2', '4.5.1', '4.5')
if self.vs_ver >= 14.0 else ()) | Microsoft .NET Framework SDK versions.
Return
------
tuple of str
versions | Microsoft .NET Framework SDK versions. | [
"Microsoft",
".",
"NET",
"Framework",
"SDK",
"versions",
"."
] | def NetFxSdkVersion(self):
"""
Microsoft .NET Framework SDK versions.
Return
------
tuple of str
versions
"""
# Set FxSdk versions for specified VS version
return (('4.7.2', '4.7.1', '4.7',
'4.6.2', '4.6.1', '4.6',
'4.5.2', '4.5.1', '4.5')
if self.vs_ver >= 14.0 else ()) | [
"def",
"NetFxSdkVersion",
"(",
"self",
")",
":",
"# Set FxSdk versions for specified VS version",
"return",
"(",
"(",
"'4.7.2'",
",",
"'4.7.1'",
",",
"'4.7'",
",",
"'4.6.2'",
",",
"'4.6.1'",
",",
"'4.6'",
",",
"'4.5.2'",
",",
"'4.5.1'",
",",
"'4.5'",
")",
"if",
"self",
".",
"vs_ver",
">=",
"14.0",
"else",
"(",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/setuptools/msvc.py#L904-L917 | |
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/python/framework/ops.py | python | Operation.values | (self) | return tuple(self.outputs) | DEPRECATED: Use outputs. | DEPRECATED: Use outputs. | [
"DEPRECATED",
":",
"Use",
"outputs",
"."
] | def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs) | [
"def",
"values",
"(",
"self",
")",
":",
"return",
"tuple",
"(",
"self",
".",
"outputs",
")"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/framework/ops.py#L1269-L1271 | |
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/ops/linalg/linear_operator.py | python | LinearOperator.add_to_tensor | (self, x, name="add_to_tensor") | Add matrix represented by this operator to `x`. Equivalent to `A + x`.
Args:
x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`. | Add matrix represented by this operator to `x`. Equivalent to `A + x`. | [
"Add",
"matrix",
"represented",
"by",
"this",
"operator",
"to",
"x",
".",
"Equivalent",
"to",
"A",
"+",
"x",
"."
] | def add_to_tensor(self, x, name="add_to_tensor"):
"""Add matrix represented by this operator to `x`. Equivalent to `A + x`.
Args:
x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
return self._add_to_tensor(x) | [
"def",
"add_to_tensor",
"(",
"self",
",",
"x",
",",
"name",
"=",
"\"add_to_tensor\"",
")",
":",
"with",
"self",
".",
"_name_scope",
"(",
"name",
",",
"values",
"=",
"[",
"x",
"]",
")",
":",
"x",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"x",
",",
"name",
"=",
"\"x\"",
")",
"self",
".",
"_check_input_dtype",
"(",
"x",
")",
"return",
"self",
".",
"_add_to_tensor",
"(",
"x",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/linalg/linear_operator.py#L919-L932 | ||
eventql/eventql | 7ca0dbb2e683b525620ea30dc40540a22d5eb227 | deps/3rdparty/spidermonkey/mozjs/config/configobj.py | python | ConfigObj._write_marker | (self, indent_string, depth, entry, comment) | return '%s%s%s%s%s' % (
indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment)) | Write a section marker line | Write a section marker line | [
"Write",
"a",
"section",
"marker",
"line"
] | def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (
indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment)) | [
"def",
"_write_marker",
"(",
"self",
",",
"indent_string",
",",
"depth",
",",
"entry",
",",
"comment",
")",
":",
"return",
"'%s%s%s%s%s'",
"%",
"(",
"indent_string",
",",
"self",
".",
"_a_to_u",
"(",
"'['",
"*",
"depth",
")",
",",
"self",
".",
"_quote",
"(",
"self",
".",
"_decode_element",
"(",
"entry",
")",
",",
"multiline",
"=",
"False",
")",
",",
"self",
".",
"_a_to_u",
"(",
"']'",
"*",
"depth",
")",
",",
"self",
".",
"_decode_element",
"(",
"comment",
")",
")"
] | https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/config/configobj.py#L1871-L1878 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/collections/__init__.py | python | OrderedDict.__reversed__ | (self) | od.__reversed__() <==> reversed(od) | od.__reversed__() <==> reversed(od) | [
"od",
".",
"__reversed__",
"()",
"<",
"==",
">",
"reversed",
"(",
"od",
")"
] | def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev | [
"def",
"__reversed__",
"(",
"self",
")",
":",
"# Traverse the linked list in reverse order.",
"root",
"=",
"self",
".",
"__root",
"curr",
"=",
"root",
".",
"prev",
"while",
"curr",
"is",
"not",
"root",
":",
"yield",
"curr",
".",
"key",
"curr",
"=",
"curr",
".",
"prev"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/collections/__init__.py#L151-L158 | ||
openthread/openthread | 9fcdbed9c526c70f1556d1ed84099c1535c7cd32 | tools/otci/otci/otci.py | python | OTCI.ifconfig_down | (self) | Bring down the IPv6 interface. | Bring down the IPv6 interface. | [
"Bring",
"down",
"the",
"IPv6",
"interface",
"."
] | def ifconfig_down(self):
"""Bring down the IPv6 interface."""
self.execute_command('ifconfig down') | [
"def",
"ifconfig_down",
"(",
"self",
")",
":",
"self",
".",
"execute_command",
"(",
"'ifconfig down'",
")"
] | https://github.com/openthread/openthread/blob/9fcdbed9c526c70f1556d1ed84099c1535c7cd32/tools/otci/otci/otci.py#L189-L191 | ||
klzgrad/naiveproxy | ed2c513637c77b18721fe428d7ed395b4d284c83 | src/net/tools/truncate_net_log.py | python | get_file_size | (path) | return os.stat(path).st_size | Returns the filesize of |path| in bytes | Returns the filesize of |path| in bytes | [
"Returns",
"the",
"filesize",
"of",
"|path|",
"in",
"bytes"
] | def get_file_size(path):
'''Returns the filesize of |path| in bytes'''
return os.stat(path).st_size | [
"def",
"get_file_size",
"(",
"path",
")",
":",
"return",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_size"
] | https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/net/tools/truncate_net_log.py#L37-L39 | |
Slicer/SlicerGitSVNArchive | 65e92bb16c2b32ea47a1a66bee71f238891ee1ca | Base/Python/slicer/util.py | python | arrayFromVolume | (volumeNode) | return narray | Return voxel array from volume node as numpy array.
Voxels values are not copied. Voxel values in the volume node can be modified
by changing values in the numpy array.
After all modifications has been completed, call :py:meth:`arrayFromVolumeModified`.
.. warning:: Memory area of the returned array is managed by VTK, therefore
values in the array may be changed, but the array must not be reallocated
(change array size, shallow-copy content from other array most likely causes
application crash). To allow arbitrary numpy operations on a volume array:
1. Make a deep-copy of the returned VTK-managed array using :func:`numpy.copy`.
2. Perform any computations using the copied array.
3. Write results back to the image data using :py:meth:`updateVolumeFromArray`. | Return voxel array from volume node as numpy array.
Voxels values are not copied. Voxel values in the volume node can be modified
by changing values in the numpy array.
After all modifications has been completed, call :py:meth:`arrayFromVolumeModified`. | [
"Return",
"voxel",
"array",
"from",
"volume",
"node",
"as",
"numpy",
"array",
".",
"Voxels",
"values",
"are",
"not",
"copied",
".",
"Voxel",
"values",
"in",
"the",
"volume",
"node",
"can",
"be",
"modified",
"by",
"changing",
"values",
"in",
"the",
"numpy",
"array",
".",
"After",
"all",
"modifications",
"has",
"been",
"completed",
"call",
":",
"py",
":",
"meth",
":",
"arrayFromVolumeModified",
"."
] | def arrayFromVolume(volumeNode):
"""Return voxel array from volume node as numpy array.
Voxels values are not copied. Voxel values in the volume node can be modified
by changing values in the numpy array.
After all modifications has been completed, call :py:meth:`arrayFromVolumeModified`.
.. warning:: Memory area of the returned array is managed by VTK, therefore
values in the array may be changed, but the array must not be reallocated
(change array size, shallow-copy content from other array most likely causes
application crash). To allow arbitrary numpy operations on a volume array:
1. Make a deep-copy of the returned VTK-managed array using :func:`numpy.copy`.
2. Perform any computations using the copied array.
3. Write results back to the image data using :py:meth:`updateVolumeFromArray`.
"""
scalarTypes = ['vtkMRMLScalarVolumeNode', 'vtkMRMLLabelMapVolumeNode']
vectorTypes = ['vtkMRMLVectorVolumeNode', 'vtkMRMLMultiVolumeNode', 'vtkMRMLDiffusionWeightedVolumeNode']
tensorTypes = ['vtkMRMLDiffusionTensorVolumeNode']
vimage = volumeNode.GetImageData()
nshape = tuple(reversed(volumeNode.GetImageData().GetDimensions()))
import vtk.util.numpy_support
narray = None
if volumeNode.GetClassName() in scalarTypes:
narray = vtk.util.numpy_support.vtk_to_numpy(vimage.GetPointData().GetScalars()).reshape(nshape)
elif volumeNode.GetClassName() in vectorTypes:
components = vimage.GetNumberOfScalarComponents()
if components > 1:
nshape = nshape + (components,)
narray = vtk.util.numpy_support.vtk_to_numpy(vimage.GetPointData().GetScalars()).reshape(nshape)
elif volumeNode.GetClassName() in tensorTypes:
narray = vtk.util.numpy_support.vtk_to_numpy(vimage.GetPointData().GetTensors()).reshape(nshape+(3,3))
else:
raise RuntimeError("Unsupported volume type: "+volumeNode.GetClassName())
return narray | [
"def",
"arrayFromVolume",
"(",
"volumeNode",
")",
":",
"scalarTypes",
"=",
"[",
"'vtkMRMLScalarVolumeNode'",
",",
"'vtkMRMLLabelMapVolumeNode'",
"]",
"vectorTypes",
"=",
"[",
"'vtkMRMLVectorVolumeNode'",
",",
"'vtkMRMLMultiVolumeNode'",
",",
"'vtkMRMLDiffusionWeightedVolumeNode'",
"]",
"tensorTypes",
"=",
"[",
"'vtkMRMLDiffusionTensorVolumeNode'",
"]",
"vimage",
"=",
"volumeNode",
".",
"GetImageData",
"(",
")",
"nshape",
"=",
"tuple",
"(",
"reversed",
"(",
"volumeNode",
".",
"GetImageData",
"(",
")",
".",
"GetDimensions",
"(",
")",
")",
")",
"import",
"vtk",
".",
"util",
".",
"numpy_support",
"narray",
"=",
"None",
"if",
"volumeNode",
".",
"GetClassName",
"(",
")",
"in",
"scalarTypes",
":",
"narray",
"=",
"vtk",
".",
"util",
".",
"numpy_support",
".",
"vtk_to_numpy",
"(",
"vimage",
".",
"GetPointData",
"(",
")",
".",
"GetScalars",
"(",
")",
")",
".",
"reshape",
"(",
"nshape",
")",
"elif",
"volumeNode",
".",
"GetClassName",
"(",
")",
"in",
"vectorTypes",
":",
"components",
"=",
"vimage",
".",
"GetNumberOfScalarComponents",
"(",
")",
"if",
"components",
">",
"1",
":",
"nshape",
"=",
"nshape",
"+",
"(",
"components",
",",
")",
"narray",
"=",
"vtk",
".",
"util",
".",
"numpy_support",
".",
"vtk_to_numpy",
"(",
"vimage",
".",
"GetPointData",
"(",
")",
".",
"GetScalars",
"(",
")",
")",
".",
"reshape",
"(",
"nshape",
")",
"elif",
"volumeNode",
".",
"GetClassName",
"(",
")",
"in",
"tensorTypes",
":",
"narray",
"=",
"vtk",
".",
"util",
".",
"numpy_support",
".",
"vtk_to_numpy",
"(",
"vimage",
".",
"GetPointData",
"(",
")",
".",
"GetTensors",
"(",
")",
")",
".",
"reshape",
"(",
"nshape",
"+",
"(",
"3",
",",
"3",
")",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Unsupported volume type: \"",
"+",
"volumeNode",
".",
"GetClassName",
"(",
")",
")",
"return",
"narray"
] | https://github.com/Slicer/SlicerGitSVNArchive/blob/65e92bb16c2b32ea47a1a66bee71f238891ee1ca/Base/Python/slicer/util.py#L948-L981 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/src/extern/aui/framemanager.py | python | AuiPaneInfo.Centre | (self) | return self | Sets the pane to the center position of the frame.
The centre pane is the space in the middle after all border panes (left, top,
right, bottom) are subtracted from the layout.
:note: This is the same thing as calling :meth:`~AuiPaneInfo.Direction` with ``AUI_DOCK_CENTRE`` as
parameter. | Sets the pane to the center position of the frame. | [
"Sets",
"the",
"pane",
"to",
"the",
"center",
"position",
"of",
"the",
"frame",
"."
] | def Centre(self):
"""
Sets the pane to the center position of the frame.
The centre pane is the space in the middle after all border panes (left, top,
right, bottom) are subtracted from the layout.
:note: This is the same thing as calling :meth:`~AuiPaneInfo.Direction` with ``AUI_DOCK_CENTRE`` as
parameter.
"""
self.dock_direction = AUI_DOCK_CENTRE
return self | [
"def",
"Centre",
"(",
"self",
")",
":",
"self",
".",
"dock_direction",
"=",
"AUI_DOCK_CENTRE",
"return",
"self"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/extern/aui/framemanager.py#L969-L981 | |
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/geodesy/utm.py | python | fromLatLong | (latitude, longitude, altitude=float('nan')) | return UTMPoint(easting=e, northing=n, altitude=altitude, zone=z, band=b) | Generate :class:`UTMPoint` from latitude, longitude and (optional) altitude.
Latitude and longitude are expressed in degrees, relative to the
WGS84 ellipsoid.
:param latitude: [degrees], negative is South.
:param longitude: [degrees], negative is West.
:param altitude: [meters], negative is below the ellipsoid.
:returns: :class:`UTMPoint` object. | Generate :class:`UTMPoint` from latitude, longitude and (optional) altitude. | [
"Generate",
":",
"class",
":",
"UTMPoint",
"from",
"latitude",
"longitude",
"and",
"(",
"optional",
")",
"altitude",
"."
] | def fromLatLong(latitude, longitude, altitude=float('nan')):
"""Generate :class:`UTMPoint` from latitude, longitude and (optional) altitude.
Latitude and longitude are expressed in degrees, relative to the
WGS84 ellipsoid.
:param latitude: [degrees], negative is South.
:param longitude: [degrees], negative is West.
:param altitude: [meters], negative is below the ellipsoid.
:returns: :class:`UTMPoint` object.
"""
z, b = gridZone(latitude, longitude)
utm_proj = pyproj.Proj(proj='utm', zone=z, datum='WGS84')
e, n = utm_proj(longitude, latitude)
return UTMPoint(easting=e, northing=n, altitude=altitude, zone=z, band=b) | [
"def",
"fromLatLong",
"(",
"latitude",
",",
"longitude",
",",
"altitude",
"=",
"float",
"(",
"'nan'",
")",
")",
":",
"z",
",",
"b",
"=",
"gridZone",
"(",
"latitude",
",",
"longitude",
")",
"utm_proj",
"=",
"pyproj",
".",
"Proj",
"(",
"proj",
"=",
"'utm'",
",",
"zone",
"=",
"z",
",",
"datum",
"=",
"'WGS84'",
")",
"e",
",",
"n",
"=",
"utm_proj",
"(",
"longitude",
",",
"latitude",
")",
"return",
"UTMPoint",
"(",
"easting",
"=",
"e",
",",
"northing",
"=",
"n",
",",
"altitude",
"=",
"altitude",
",",
"zone",
"=",
"z",
",",
"band",
"=",
"b",
")"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/geodesy/utm.py#L131-L146 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/gzip.py | python | compress | (data, compresslevel=9) | return buf.getvalue() | Compress data in one shot and return the compressed string.
Optional argument is the compression level, in range of 0-9. | Compress data in one shot and return the compressed string.
Optional argument is the compression level, in range of 0-9. | [
"Compress",
"data",
"in",
"one",
"shot",
"and",
"return",
"the",
"compressed",
"string",
".",
"Optional",
"argument",
"is",
"the",
"compression",
"level",
"in",
"range",
"of",
"0",
"-",
"9",
"."
] | def compress(data, compresslevel=9):
"""Compress data in one shot and return the compressed string.
Optional argument is the compression level, in range of 0-9.
"""
buf = io.BytesIO()
with GzipFile(fileobj=buf, mode='wb', compresslevel=compresslevel) as f:
f.write(data)
return buf.getvalue() | [
"def",
"compress",
"(",
"data",
",",
"compresslevel",
"=",
"9",
")",
":",
"buf",
"=",
"io",
".",
"BytesIO",
"(",
")",
"with",
"GzipFile",
"(",
"fileobj",
"=",
"buf",
",",
"mode",
"=",
"'wb'",
",",
"compresslevel",
"=",
"compresslevel",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"data",
")",
"return",
"buf",
".",
"getvalue",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/gzip.py#L529-L536 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/richtext.py | python | TextBoxAttr.SetClearMode | (*args, **kwargs) | return _richtext.TextBoxAttr_SetClearMode(*args, **kwargs) | SetClearMode(self, int mode) | SetClearMode(self, int mode) | [
"SetClearMode",
"(",
"self",
"int",
"mode",
")"
] | def SetClearMode(*args, **kwargs):
"""SetClearMode(self, int mode)"""
return _richtext.TextBoxAttr_SetClearMode(*args, **kwargs) | [
"def",
"SetClearMode",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"TextBoxAttr_SetClearMode",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/richtext.py#L596-L598 | |
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/third_party/boto/boto/mturk/connection.py | python | MTurkConnection.create_qualification_type | (self,
name,
description,
status,
keywords=None,
retry_delay=None,
test=None,
answer_key=None,
answer_key_xml=None,
test_duration=None,
auto_granted=False,
auto_granted_value=1) | return self._process_request('CreateQualificationType', params,
[('QualificationType',
QualificationType)]) | Create a new Qualification Type.
name: This will be visible to workers and must be unique for a
given requester.
description: description shown to workers. Max 2000 characters.
status: 'Active' or 'Inactive'
keywords: list of keyword strings or comma separated string.
Max length of 1000 characters when concatenated with commas.
retry_delay: number of seconds after requesting a
qualification the worker must wait before they can ask again.
If not specified, workers can only request this qualification
once.
test: a QuestionForm
answer_key: an XML string of your answer key, for automatically
scored qualification tests.
(Consider implementing an AnswerKey class for this to support.)
test_duration: the number of seconds a worker has to complete the test.
auto_granted: if True, requests for the Qualification are granted
immediately. Can't coexist with a test.
auto_granted_value: auto_granted qualifications are given this value. | Create a new Qualification Type. | [
"Create",
"a",
"new",
"Qualification",
"Type",
"."
] | def create_qualification_type(self,
name,
description,
status,
keywords=None,
retry_delay=None,
test=None,
answer_key=None,
answer_key_xml=None,
test_duration=None,
auto_granted=False,
auto_granted_value=1):
"""
Create a new Qualification Type.
name: This will be visible to workers and must be unique for a
given requester.
description: description shown to workers. Max 2000 characters.
status: 'Active' or 'Inactive'
keywords: list of keyword strings or comma separated string.
Max length of 1000 characters when concatenated with commas.
retry_delay: number of seconds after requesting a
qualification the worker must wait before they can ask again.
If not specified, workers can only request this qualification
once.
test: a QuestionForm
answer_key: an XML string of your answer key, for automatically
scored qualification tests.
(Consider implementing an AnswerKey class for this to support.)
test_duration: the number of seconds a worker has to complete the test.
auto_granted: if True, requests for the Qualification are granted
immediately. Can't coexist with a test.
auto_granted_value: auto_granted qualifications are given this value.
"""
params = {'Name': name,
'Description': description,
'QualificationTypeStatus': status,
}
if retry_delay is not None:
params['RetryDelayInSeconds'] = retry_delay
if test is not None:
assert(isinstance(test, QuestionForm))
assert(test_duration is not None)
params['Test'] = test.get_as_xml()
if test_duration is not None:
params['TestDurationInSeconds'] = test_duration
if answer_key is not None:
if isinstance(answer_key, basestring):
params['AnswerKey'] = answer_key # xml
else:
raise TypeError
# Eventually someone will write an AnswerKey class.
if auto_granted:
assert(test is None)
params['AutoGranted'] = True
params['AutoGrantedValue'] = auto_granted_value
if keywords:
params['Keywords'] = self.get_keywords_as_string(keywords)
return self._process_request('CreateQualificationType', params,
[('QualificationType',
QualificationType)]) | [
"def",
"create_qualification_type",
"(",
"self",
",",
"name",
",",
"description",
",",
"status",
",",
"keywords",
"=",
"None",
",",
"retry_delay",
"=",
"None",
",",
"test",
"=",
"None",
",",
"answer_key",
"=",
"None",
",",
"answer_key_xml",
"=",
"None",
",",
"test_duration",
"=",
"None",
",",
"auto_granted",
"=",
"False",
",",
"auto_granted_value",
"=",
"1",
")",
":",
"params",
"=",
"{",
"'Name'",
":",
"name",
",",
"'Description'",
":",
"description",
",",
"'QualificationTypeStatus'",
":",
"status",
",",
"}",
"if",
"retry_delay",
"is",
"not",
"None",
":",
"params",
"[",
"'RetryDelayInSeconds'",
"]",
"=",
"retry_delay",
"if",
"test",
"is",
"not",
"None",
":",
"assert",
"(",
"isinstance",
"(",
"test",
",",
"QuestionForm",
")",
")",
"assert",
"(",
"test_duration",
"is",
"not",
"None",
")",
"params",
"[",
"'Test'",
"]",
"=",
"test",
".",
"get_as_xml",
"(",
")",
"if",
"test_duration",
"is",
"not",
"None",
":",
"params",
"[",
"'TestDurationInSeconds'",
"]",
"=",
"test_duration",
"if",
"answer_key",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"answer_key",
",",
"basestring",
")",
":",
"params",
"[",
"'AnswerKey'",
"]",
"=",
"answer_key",
"# xml",
"else",
":",
"raise",
"TypeError",
"# Eventually someone will write an AnswerKey class.",
"if",
"auto_granted",
":",
"assert",
"(",
"test",
"is",
"None",
")",
"params",
"[",
"'AutoGranted'",
"]",
"=",
"True",
"params",
"[",
"'AutoGrantedValue'",
"]",
"=",
"auto_granted_value",
"if",
"keywords",
":",
"params",
"[",
"'Keywords'",
"]",
"=",
"self",
".",
"get_keywords_as_string",
"(",
"keywords",
")",
"return",
"self",
".",
"_process_request",
"(",
"'CreateQualificationType'",
",",
"params",
",",
"[",
"(",
"'QualificationType'",
",",
"QualificationType",
")",
"]",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/mturk/connection.py#L618-L695 | |
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Editor/Python/windows/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py | python | ResourceManager.resource_filename | (self, package_or_requirement, resource_name) | return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
) | Return a true filesystem path for specified resource | Return a true filesystem path for specified resource | [
"Return",
"a",
"true",
"filesystem",
"path",
"for",
"specified",
"resource"
] | def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
) | [
"def",
"resource_filename",
"(",
"self",
",",
"package_or_requirement",
",",
"resource_name",
")",
":",
"return",
"get_provider",
"(",
"package_or_requirement",
")",
".",
"get_resource_filename",
"(",
"self",
",",
"resource_name",
")"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py#L1140-L1144 | |
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Draft/draftmake/make_wire.py | python | make_wire | (pointslist, closed=False, placement=None, face=None, support=None, bs2wire=False) | return obj | makeWire(pointslist,[closed],[placement])
Creates a Wire object from the given list of vectors. If face is
true (and wire is closed), the wire will appear filled. Instead of
a pointslist, you can also pass a Part Wire.
Parameters
----------
pointslist : [Base.Vector]
List of points to create the polyline
closed : bool
If closed is True or first and last points are identical,
the created polyline will be closed.
placement : Base.Placement
If a placement is given, it is used.
face : Bool
If face is False, the rectangle is shown as a wireframe,
otherwise as a face.
support :
TODO: Describe
bs2wire : bool
TODO: Describe | makeWire(pointslist,[closed],[placement])
Creates a Wire object from the given list of vectors. If face is
true (and wire is closed), the wire will appear filled. Instead of
a pointslist, you can also pass a Part Wire. | [
"makeWire",
"(",
"pointslist",
"[",
"closed",
"]",
"[",
"placement",
"]",
")",
"Creates",
"a",
"Wire",
"object",
"from",
"the",
"given",
"list",
"of",
"vectors",
".",
"If",
"face",
"is",
"true",
"(",
"and",
"wire",
"is",
"closed",
")",
"the",
"wire",
"will",
"appear",
"filled",
".",
"Instead",
"of",
"a",
"pointslist",
"you",
"can",
"also",
"pass",
"a",
"Part",
"Wire",
"."
] | def make_wire(pointslist, closed=False, placement=None, face=None, support=None, bs2wire=False):
"""makeWire(pointslist,[closed],[placement])
Creates a Wire object from the given list of vectors. If face is
true (and wire is closed), the wire will appear filled. Instead of
a pointslist, you can also pass a Part Wire.
Parameters
----------
pointslist : [Base.Vector]
List of points to create the polyline
closed : bool
If closed is True or first and last points are identical,
the created polyline will be closed.
placement : Base.Placement
If a placement is given, it is used.
face : Bool
If face is False, the rectangle is shown as a wireframe,
otherwise as a face.
support :
TODO: Describe
bs2wire : bool
TODO: Describe
"""
if not App.ActiveDocument:
App.Console.PrintError("No active document. Aborting\n")
return None
import Part
if isinstance(pointslist, (list,tuple)):
for pnt in pointslist:
if not isinstance(pnt, App.Vector):
App.Console.PrintError(
"Items must be Base.Vector objects, not {}\n".format(
type(pnt)))
return None
elif isinstance(pointslist, Part.Wire):
for edge in pointslist.Edges:
if not DraftGeomUtils.is_straight_line(edge):
App.Console.PrintError("All edges must be straight lines\n")
return None
closed = pointslist.isClosed()
pointslist = [v.Point for v in pointslist.OrderedVertexes]
else:
App.Console.PrintError("Can't make Draft Wire from {}\n".format(
type(pointslist)))
return None
if len(pointslist) == 0:
App.Console.PrintWarning("Draft Wire created with empty point list\n")
if placement:
utils.type_check([(placement, App.Placement)], "make_wire")
ipl = placement.inverse()
if not bs2wire:
pointslist = [ipl.multVec(p) for p in pointslist]
if len(pointslist) == 2:
fname = "Line"
else:
fname = "Wire"
obj = App.ActiveDocument.addObject("Part::Part2DObjectPython", fname)
Wire(obj)
obj.Points = pointslist
obj.Closed = closed
obj.Support = support
if face != None:
obj.MakeFace = face
if placement:
obj.Placement = placement
if App.GuiUp:
ViewProviderWire(obj.ViewObject)
gui_utils.format_object(obj)
gui_utils.select(obj)
return obj | [
"def",
"make_wire",
"(",
"pointslist",
",",
"closed",
"=",
"False",
",",
"placement",
"=",
"None",
",",
"face",
"=",
"None",
",",
"support",
"=",
"None",
",",
"bs2wire",
"=",
"False",
")",
":",
"if",
"not",
"App",
".",
"ActiveDocument",
":",
"App",
".",
"Console",
".",
"PrintError",
"(",
"\"No active document. Aborting\\n\"",
")",
"return",
"None",
"import",
"Part",
"if",
"isinstance",
"(",
"pointslist",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"pnt",
"in",
"pointslist",
":",
"if",
"not",
"isinstance",
"(",
"pnt",
",",
"App",
".",
"Vector",
")",
":",
"App",
".",
"Console",
".",
"PrintError",
"(",
"\"Items must be Base.Vector objects, not {}\\n\"",
".",
"format",
"(",
"type",
"(",
"pnt",
")",
")",
")",
"return",
"None",
"elif",
"isinstance",
"(",
"pointslist",
",",
"Part",
".",
"Wire",
")",
":",
"for",
"edge",
"in",
"pointslist",
".",
"Edges",
":",
"if",
"not",
"DraftGeomUtils",
".",
"is_straight_line",
"(",
"edge",
")",
":",
"App",
".",
"Console",
".",
"PrintError",
"(",
"\"All edges must be straight lines\\n\"",
")",
"return",
"None",
"closed",
"=",
"pointslist",
".",
"isClosed",
"(",
")",
"pointslist",
"=",
"[",
"v",
".",
"Point",
"for",
"v",
"in",
"pointslist",
".",
"OrderedVertexes",
"]",
"else",
":",
"App",
".",
"Console",
".",
"PrintError",
"(",
"\"Can't make Draft Wire from {}\\n\"",
".",
"format",
"(",
"type",
"(",
"pointslist",
")",
")",
")",
"return",
"None",
"if",
"len",
"(",
"pointslist",
")",
"==",
"0",
":",
"App",
".",
"Console",
".",
"PrintWarning",
"(",
"\"Draft Wire created with empty point list\\n\"",
")",
"if",
"placement",
":",
"utils",
".",
"type_check",
"(",
"[",
"(",
"placement",
",",
"App",
".",
"Placement",
")",
"]",
",",
"\"make_wire\"",
")",
"ipl",
"=",
"placement",
".",
"inverse",
"(",
")",
"if",
"not",
"bs2wire",
":",
"pointslist",
"=",
"[",
"ipl",
".",
"multVec",
"(",
"p",
")",
"for",
"p",
"in",
"pointslist",
"]",
"if",
"len",
"(",
"pointslist",
")",
"==",
"2",
":",
"fname",
"=",
"\"Line\"",
"else",
":",
"fname",
"=",
"\"Wire\"",
"obj",
"=",
"App",
".",
"ActiveDocument",
".",
"addObject",
"(",
"\"Part::Part2DObjectPython\"",
",",
"fname",
")",
"Wire",
"(",
"obj",
")",
"obj",
".",
"Points",
"=",
"pointslist",
"obj",
".",
"Closed",
"=",
"closed",
"obj",
".",
"Support",
"=",
"support",
"if",
"face",
"!=",
"None",
":",
"obj",
".",
"MakeFace",
"=",
"face",
"if",
"placement",
":",
"obj",
".",
"Placement",
"=",
"placement",
"if",
"App",
".",
"GuiUp",
":",
"ViewProviderWire",
"(",
"obj",
".",
"ViewObject",
")",
"gui_utils",
".",
"format_object",
"(",
"obj",
")",
"gui_utils",
".",
"select",
"(",
"obj",
")",
"return",
"obj"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftmake/make_wire.py#L40-L128 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/turtle.py | python | TNavigator._rotate | (self, angle) | Turn turtle counterclockwise by specified angle if angle > 0. | Turn turtle counterclockwise by specified angle if angle > 0. | [
"Turn",
"turtle",
"counterclockwise",
"by",
"specified",
"angle",
"if",
"angle",
">",
"0",
"."
] | def _rotate(self, angle):
"""Turn turtle counterclockwise by specified angle if angle > 0."""
angle *= self._degreesPerAU
self._orient = self._orient.rotate(angle) | [
"def",
"_rotate",
"(",
"self",
",",
"angle",
")",
":",
"angle",
"*=",
"self",
".",
"_degreesPerAU",
"self",
".",
"_orient",
"=",
"self",
".",
"_orient",
".",
"rotate",
"(",
"angle",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/turtle.py#L1607-L1610 | ||
lawy623/SVS | b7c7ae367c82a4797ff4a896a2ff304f02e7f724 | caffe/scripts/cpp_lint.py | python | CheckStyle | (filename, clean_lines, linenum, file_extension, nesting_state,
error) | Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found. | Checks rules from the 'C++ style rules' section of cppguide.html. | [
"Checks",
"rules",
"from",
"the",
"C",
"++",
"style",
"rules",
"section",
"of",
"cppguide",
".",
"html",
"."
] | def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for section labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) | [
"def",
"CheckStyle",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"file_extension",
",",
"nesting_state",
",",
"error",
")",
":",
"# Don't use \"elided\" lines here, otherwise we can't check commented lines.",
"# Don't want to use \"raw\" either, because we don't want to check inside C++11",
"# raw strings,",
"raw_lines",
"=",
"clean_lines",
".",
"lines_without_raw_strings",
"line",
"=",
"raw_lines",
"[",
"linenum",
"]",
"if",
"line",
".",
"find",
"(",
"'\\t'",
")",
"!=",
"-",
"1",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/tab'",
",",
"1",
",",
"'Tab found; better to use spaces'",
")",
"# One or three blank spaces at the beginning of the line is weird; it's",
"# hard to reconcile that with 2-space indents.",
"# NOTE: here are the conditions rob pike used for his tests. Mine aren't",
"# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces",
"# if(RLENGTH > 20) complain = 0;",
"# if(match($0, \" +(error|private|public|protected):\")) complain = 0;",
"# if(match(prev, \"&& *$\")) complain = 0;",
"# if(match(prev, \"\\\\|\\\\| *$\")) complain = 0;",
"# if(match(prev, \"[\\\",=><] *$\")) complain = 0;",
"# if(match($0, \" <<\")) complain = 0;",
"# if(match(prev, \" +for \\\\(\")) complain = 0;",
"# if(prevodd && match(prevprev, \" +for \\\\(\")) complain = 0;",
"initial_spaces",
"=",
"0",
"cleansed_line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"while",
"initial_spaces",
"<",
"len",
"(",
"line",
")",
"and",
"line",
"[",
"initial_spaces",
"]",
"==",
"' '",
":",
"initial_spaces",
"+=",
"1",
"if",
"line",
"and",
"line",
"[",
"-",
"1",
"]",
".",
"isspace",
"(",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/end_of_line'",
",",
"4",
",",
"'Line ends in whitespace. Consider deleting these extra spaces.'",
")",
"# There are certain situations we allow one space, notably for section labels",
"elif",
"(",
"(",
"initial_spaces",
"==",
"1",
"or",
"initial_spaces",
"==",
"3",
")",
"and",
"not",
"Match",
"(",
"r'\\s*\\w+\\s*:\\s*$'",
",",
"cleansed_line",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/indent'",
",",
"3",
",",
"'Weird number of spaces at line-start. '",
"'Are you using a 2-space indent?'",
")",
"# Check if the line is a header guard.",
"is_header_guard",
"=",
"False",
"if",
"file_extension",
"==",
"'h'",
":",
"cppvar",
"=",
"GetHeaderGuardCPPVariable",
"(",
"filename",
")",
"if",
"(",
"line",
".",
"startswith",
"(",
"'#ifndef %s'",
"%",
"cppvar",
")",
"or",
"line",
".",
"startswith",
"(",
"'#define %s'",
"%",
"cppvar",
")",
"or",
"line",
".",
"startswith",
"(",
"'#endif // %s'",
"%",
"cppvar",
")",
")",
":",
"is_header_guard",
"=",
"True",
"# #include lines and header guards can be long, since there's no clean way to",
"# split them.",
"#",
"# URLs can be long too. It's possible to split these, but it makes them",
"# harder to cut&paste.",
"#",
"# The \"$Id:...$\" comment may also get very long without it being the",
"# developers fault.",
"if",
"(",
"not",
"line",
".",
"startswith",
"(",
"'#include'",
")",
"and",
"not",
"is_header_guard",
"and",
"not",
"Match",
"(",
"r'^\\s*//.*http(s?)://\\S*$'",
",",
"line",
")",
"and",
"not",
"Match",
"(",
"r'^// \\$Id:.*#[0-9]+ \\$$'",
",",
"line",
")",
")",
":",
"line_width",
"=",
"GetLineWidth",
"(",
"line",
")",
"extended_length",
"=",
"int",
"(",
"(",
"_line_length",
"*",
"1.25",
")",
")",
"if",
"line_width",
">",
"extended_length",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/line_length'",
",",
"4",
",",
"'Lines should very rarely be longer than %i characters'",
"%",
"extended_length",
")",
"elif",
"line_width",
">",
"_line_length",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/line_length'",
",",
"2",
",",
"'Lines should be <= %i characters long'",
"%",
"_line_length",
")",
"if",
"(",
"cleansed_line",
".",
"count",
"(",
"';'",
")",
">",
"1",
"and",
"# for loops are allowed two ;'s (and may run over two lines).",
"cleansed_line",
".",
"find",
"(",
"'for'",
")",
"==",
"-",
"1",
"and",
"(",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
".",
"find",
"(",
"'for'",
")",
"==",
"-",
"1",
"or",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
".",
"find",
"(",
"';'",
")",
"!=",
"-",
"1",
")",
"and",
"# It's ok to have many commands in a switch case that fits in 1 line",
"not",
"(",
"(",
"cleansed_line",
".",
"find",
"(",
"'case '",
")",
"!=",
"-",
"1",
"or",
"cleansed_line",
".",
"find",
"(",
"'default:'",
")",
"!=",
"-",
"1",
")",
"and",
"cleansed_line",
".",
"find",
"(",
"'break;'",
")",
"!=",
"-",
"1",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/newline'",
",",
"0",
",",
"'More than one command on the same line'",
")",
"# Some more style checks",
"CheckBraces",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckEmptyBlockBody",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckAccess",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
"CheckSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
"CheckCheck",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckAltTokens",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"classinfo",
"=",
"nesting_state",
".",
"InnermostClass",
"(",
")",
"if",
"classinfo",
":",
"CheckSectionSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"classinfo",
",",
"linenum",
",",
"error",
")"
] | https://github.com/lawy623/SVS/blob/b7c7ae367c82a4797ff4a896a2ff304f02e7f724/caffe/scripts/cpp_lint.py#L3459-L3563 | ||
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Code/Tools/waf-1.7.13/waflib/Utils.py | python | nogc | (fun) | return f | Decorator: let a function disable the garbage collector during its execution.
It is used in the build context when storing/loading the build cache file (pickle)
:param fun: function to execute
:type fun: function
:return: the return value of the function executed | Decorator: let a function disable the garbage collector during its execution.
It is used in the build context when storing/loading the build cache file (pickle) | [
"Decorator",
":",
"let",
"a",
"function",
"disable",
"the",
"garbage",
"collector",
"during",
"its",
"execution",
".",
"It",
"is",
"used",
"in",
"the",
"build",
"context",
"when",
"storing",
"/",
"loading",
"the",
"build",
"cache",
"file",
"(",
"pickle",
")"
] | def nogc(fun):
"""
Decorator: let a function disable the garbage collector during its execution.
It is used in the build context when storing/loading the build cache file (pickle)
:param fun: function to execute
:type fun: function
:return: the return value of the function executed
"""
def f(*k, **kw):
try:
gc.disable()
ret = fun(*k, **kw)
finally:
gc.enable()
return ret
f.__doc__ = fun.__doc__
return f | [
"def",
"nogc",
"(",
"fun",
")",
":",
"def",
"f",
"(",
"*",
"k",
",",
"*",
"*",
"kw",
")",
":",
"try",
":",
"gc",
".",
"disable",
"(",
")",
"ret",
"=",
"fun",
"(",
"*",
"k",
",",
"*",
"*",
"kw",
")",
"finally",
":",
"gc",
".",
"enable",
"(",
")",
"return",
"ret",
"f",
".",
"__doc__",
"=",
"fun",
".",
"__doc__",
"return",
"f"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/Utils.py#L678-L695 | |
jackaudio/jack2 | 21b293dbc37d42446141a08922cdec0d2550c6a0 | waflib/Tools/ccroot.py | python | read_shlib | (self, name, paths=[], export_includes=[], export_defines=[]) | return self(name=name, features='fake_lib', lib_paths=paths, lib_type='shlib', export_includes=export_includes, export_defines=export_defines) | Read a system shared library, enabling its use as a local library. Will trigger a rebuild if the file changes::
def build(bld):
bld.read_shlib('m')
bld.program(source='main.c', use='m') | Read a system shared library, enabling its use as a local library. Will trigger a rebuild if the file changes:: | [
"Read",
"a",
"system",
"shared",
"library",
"enabling",
"its",
"use",
"as",
"a",
"local",
"library",
".",
"Will",
"trigger",
"a",
"rebuild",
"if",
"the",
"file",
"changes",
"::"
] | def read_shlib(self, name, paths=[], export_includes=[], export_defines=[]):
"""
Read a system shared library, enabling its use as a local library. Will trigger a rebuild if the file changes::
def build(bld):
bld.read_shlib('m')
bld.program(source='main.c', use='m')
"""
return self(name=name, features='fake_lib', lib_paths=paths, lib_type='shlib', export_includes=export_includes, export_defines=export_defines) | [
"def",
"read_shlib",
"(",
"self",
",",
"name",
",",
"paths",
"=",
"[",
"]",
",",
"export_includes",
"=",
"[",
"]",
",",
"export_defines",
"=",
"[",
"]",
")",
":",
"return",
"self",
"(",
"name",
"=",
"name",
",",
"features",
"=",
"'fake_lib'",
",",
"lib_paths",
"=",
"paths",
",",
"lib_type",
"=",
"'shlib'",
",",
"export_includes",
"=",
"export_includes",
",",
"export_defines",
"=",
"export_defines",
")"
] | https://github.com/jackaudio/jack2/blob/21b293dbc37d42446141a08922cdec0d2550c6a0/waflib/Tools/ccroot.py#L671-L679 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/_vendor/pyparsing.py | python | ParseResults.haskeys | ( self ) | return bool(self.__tokdict) | Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names. | Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names. | [
"Since",
"keys",
"()",
"returns",
"an",
"iterator",
"this",
"method",
"is",
"helpful",
"in",
"bypassing",
"code",
"that",
"looks",
"for",
"the",
"existence",
"of",
"any",
"defined",
"results",
"names",
"."
] | def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict) | [
"def",
"haskeys",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"__tokdict",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/_vendor/pyparsing.py#L506-L509 | |
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Editor/Python/windows/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py | python | Distribution.load_entry_point | (self, group, name) | return ep.load() | Return the `name` entry point of `group` or raise ImportError | Return the `name` entry point of `group` or raise ImportError | [
"Return",
"the",
"name",
"entry",
"point",
"of",
"group",
"or",
"raise",
"ImportError"
] | def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load() | [
"def",
"load_entry_point",
"(",
"self",
",",
"group",
",",
"name",
")",
":",
"ep",
"=",
"self",
".",
"get_entry_info",
"(",
"group",
",",
"name",
")",
"if",
"ep",
"is",
"None",
":",
"raise",
"ImportError",
"(",
"\"Entry point %r not found\"",
"%",
"(",
"(",
"group",
",",
"name",
")",
",",
")",
")",
"return",
"ep",
".",
"load",
"(",
")"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py#L2655-L2660 | |
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/drill/model/DrillExportModel.py | python | DrillExportModel._onTaskError | (self, name, msg) | Triggered when the export failed.
Args:
name (str): the task name
msg (str): error msg | Triggered when the export failed. | [
"Triggered",
"when",
"the",
"export",
"failed",
"."
] | def _onTaskError(self, name, msg):
"""
Triggered when the export failed.
Args:
name (str): the task name
msg (str): error msg
"""
name = name.split(':')
wsName = name[0]
filename = name[1]
logger.error("Error while exporting workspace {}.".format(wsName))
logger.error(msg)
if wsName in self._exports:
self._exports[wsName].discard(filename)
if not self._exports[wsName]:
del self._exports[wsName]
self._logSuccessExport(wsName) | [
"def",
"_onTaskError",
"(",
"self",
",",
"name",
",",
"msg",
")",
":",
"name",
"=",
"name",
".",
"split",
"(",
"':'",
")",
"wsName",
"=",
"name",
"[",
"0",
"]",
"filename",
"=",
"name",
"[",
"1",
"]",
"logger",
".",
"error",
"(",
"\"Error while exporting workspace {}.\"",
".",
"format",
"(",
"wsName",
")",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"if",
"wsName",
"in",
"self",
".",
"_exports",
":",
"self",
".",
"_exports",
"[",
"wsName",
"]",
".",
"discard",
"(",
"filename",
")",
"if",
"not",
"self",
".",
"_exports",
"[",
"wsName",
"]",
":",
"del",
"self",
".",
"_exports",
"[",
"wsName",
"]",
"self",
".",
"_logSuccessExport",
"(",
"wsName",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/drill/model/DrillExportModel.py#L182-L201 | ||
triton-inference-server/server | 11a11d9cb1e9734ed9fd305e752da70f07d1992f | qa/common/gen_tag_sigdef.py | python | create_savedmodel | (models_dir,
model_version=1,
dims=16,
model_name="sig_tag",
tag_name="testTag",
signature_def_name="testSigDef") | Creates 4 SavedModels that have different combinations of model_name and tag_name.
The models multiplies the input tensor by a multiplier and the multiplier value is different for each model.
Naming convention and config used:
<model_name>0: tag: "serve", signature_def: "serving_default", multiplier 1
<model_name>1: tag: "serve", signature_def: <signature_def_name>, multiplier 2
<model_name>2: tag: <tag_name>, signature_def: "serving_default", multiplier 3
<model_name>3: tag: <tag_name>, signature_def: <signature_def_name>, multiplier 4 | Creates 4 SavedModels that have different combinations of model_name and tag_name.
The models multiplies the input tensor by a multiplier and the multiplier value is different for each model.
Naming convention and config used:
<model_name>0: tag: "serve", signature_def: "serving_default", multiplier 1
<model_name>1: tag: "serve", signature_def: <signature_def_name>, multiplier 2
<model_name>2: tag: <tag_name>, signature_def: "serving_default", multiplier 3
<model_name>3: tag: <tag_name>, signature_def: <signature_def_name>, multiplier 4 | [
"Creates",
"4",
"SavedModels",
"that",
"have",
"different",
"combinations",
"of",
"model_name",
"and",
"tag_name",
".",
"The",
"models",
"multiplies",
"the",
"input",
"tensor",
"by",
"a",
"multiplier",
"and",
"the",
"multiplier",
"value",
"is",
"different",
"for",
"each",
"model",
".",
"Naming",
"convention",
"and",
"config",
"used",
":",
"<model_name",
">",
"0",
":",
"tag",
":",
"serve",
"signature_def",
":",
"serving_default",
"multiplier",
"1",
"<model_name",
">",
"1",
":",
"tag",
":",
"serve",
"signature_def",
":",
"<signature_def_name",
">",
"multiplier",
"2",
"<model_name",
">",
"2",
":",
"tag",
":",
"<tag_name",
">",
"signature_def",
":",
"serving_default",
"multiplier",
"3",
"<model_name",
">",
"3",
":",
"tag",
":",
"<tag_name",
">",
"signature_def",
":",
"<signature_def_name",
">",
"multiplier",
"4"
] | def create_savedmodel(models_dir,
model_version=1,
dims=16,
model_name="sig_tag",
tag_name="testTag",
signature_def_name="testSigDef"):
"""
Creates 4 SavedModels that have different combinations of model_name and tag_name.
The models multiplies the input tensor by a multiplier and the multiplier value is different for each model.
Naming convention and config used:
<model_name>0: tag: "serve", signature_def: "serving_default", multiplier 1
<model_name>1: tag: "serve", signature_def: <signature_def_name>, multiplier 2
<model_name>2: tag: <tag_name>, signature_def: "serving_default", multiplier 3
<model_name>3: tag: <tag_name>, signature_def: <signature_def_name>, multiplier 4
"""
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with tf.Session() as sess:
input_tensor = tf.placeholder(tf.float32, [dims], "TENSOR_INPUT")
# tag:"serve", signature_def:"serving_default"
multiplier_0 = tf.constant(1.0, name="multiplier_0")
# tag:"serve", signature_def:signature_def_name
multiplier_1 = tf.constant(2.0, name="multiplier_1")
# tag:tag_name, signature_def:"serving_default"
multiplier_2 = tf.constant(3.0, name="multiplier_2")
# tag:tag_name, signature_def:signature_def_name
multiplier_3 = tf.constant(4.0, name="multiplier_3")
output_tensor_0 = tf.multiply(multiplier_0,
input_tensor,
name="TENSOR_OUTPUT")
output_tensor_1 = tf.multiply(multiplier_1,
input_tensor,
name="TENSOR_OUTPUT")
output_tensor_2 = tf.multiply(multiplier_2,
input_tensor,
name="TENSOR_OUTPUT")
output_tensor_3 = tf.multiply(multiplier_3,
input_tensor,
name="TENSOR_OUTPUT")
# build_tensor_info_op could be used if build_tensor_info is deprecated
input_tensor_info = tf.saved_model.utils.build_tensor_info(input_tensor)
output_tensor_info_0 = tf.saved_model.utils.build_tensor_info(
output_tensor_0)
output_tensor_info_1 = tf.saved_model.utils.build_tensor_info(
output_tensor_1)
output_tensor_info_2 = tf.saved_model.utils.build_tensor_info(
output_tensor_2)
output_tensor_info_3 = tf.saved_model.utils.build_tensor_info(
output_tensor_3)
# Using predict method name because simple save uses it
# tag:"serve", signature_def:"serving_default"
signature_0 = tf.saved_model.signature_def_utils.build_signature_def(
inputs={"INPUT": input_tensor_info},
outputs={"OUTPUT": output_tensor_info_0},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
# tag:"serve", signature_def:signature_def_name
signature_1 = tf.saved_model.signature_def_utils.build_signature_def(
inputs={"INPUT": input_tensor_info},
outputs={"OUTPUT": output_tensor_info_1},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
# tag:tag_name, signature_def:"serving_default"
signature_2 = tf.saved_model.signature_def_utils.build_signature_def(
inputs={"INPUT": input_tensor_info},
outputs={"OUTPUT": output_tensor_info_2},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
# tag:tag_name, signature_def:signature_def_name
signature_3 = tf.saved_model.signature_def_utils.build_signature_def(
inputs={"INPUT": input_tensor_info},
outputs={"OUTPUT": output_tensor_info_3},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
signature_def_map_0 = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_0,
signature_def_name: signature_1
}
signature_def_map_1 = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_2,
signature_def_name: signature_3
}
b = builder.SavedModelBuilder(model_version_dir + "/model.savedmodel")
b.add_meta_graph_and_variables(sess,
tags=[tag_constants.SERVING],
signature_def_map=signature_def_map_0,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
clear_devices=True)
b.add_meta_graph(tags=[tag_name],
signature_def_map=signature_def_map_1,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
clear_devices=True)
b.save() | [
"def",
"create_savedmodel",
"(",
"models_dir",
",",
"model_version",
"=",
"1",
",",
"dims",
"=",
"16",
",",
"model_name",
"=",
"\"sig_tag\"",
",",
"tag_name",
"=",
"\"testTag\"",
",",
"signature_def_name",
"=",
"\"testSigDef\"",
")",
":",
"model_version_dir",
"=",
"models_dir",
"+",
"\"/\"",
"+",
"model_name",
"+",
"\"/\"",
"+",
"str",
"(",
"model_version",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"model_version_dir",
")",
"except",
"OSError",
"as",
"ex",
":",
"pass",
"# ignore existing dir",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"input_tensor",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"[",
"dims",
"]",
",",
"\"TENSOR_INPUT\"",
")",
"# tag:\"serve\", signature_def:\"serving_default\"",
"multiplier_0",
"=",
"tf",
".",
"constant",
"(",
"1.0",
",",
"name",
"=",
"\"multiplier_0\"",
")",
"# tag:\"serve\", signature_def:signature_def_name",
"multiplier_1",
"=",
"tf",
".",
"constant",
"(",
"2.0",
",",
"name",
"=",
"\"multiplier_1\"",
")",
"# tag:tag_name, signature_def:\"serving_default\"",
"multiplier_2",
"=",
"tf",
".",
"constant",
"(",
"3.0",
",",
"name",
"=",
"\"multiplier_2\"",
")",
"# tag:tag_name, signature_def:signature_def_name",
"multiplier_3",
"=",
"tf",
".",
"constant",
"(",
"4.0",
",",
"name",
"=",
"\"multiplier_3\"",
")",
"output_tensor_0",
"=",
"tf",
".",
"multiply",
"(",
"multiplier_0",
",",
"input_tensor",
",",
"name",
"=",
"\"TENSOR_OUTPUT\"",
")",
"output_tensor_1",
"=",
"tf",
".",
"multiply",
"(",
"multiplier_1",
",",
"input_tensor",
",",
"name",
"=",
"\"TENSOR_OUTPUT\"",
")",
"output_tensor_2",
"=",
"tf",
".",
"multiply",
"(",
"multiplier_2",
",",
"input_tensor",
",",
"name",
"=",
"\"TENSOR_OUTPUT\"",
")",
"output_tensor_3",
"=",
"tf",
".",
"multiply",
"(",
"multiplier_3",
",",
"input_tensor",
",",
"name",
"=",
"\"TENSOR_OUTPUT\"",
")",
"# build_tensor_info_op could be used if build_tensor_info is deprecated",
"input_tensor_info",
"=",
"tf",
".",
"saved_model",
".",
"utils",
".",
"build_tensor_info",
"(",
"input_tensor",
")",
"output_tensor_info_0",
"=",
"tf",
".",
"saved_model",
".",
"utils",
".",
"build_tensor_info",
"(",
"output_tensor_0",
")",
"output_tensor_info_1",
"=",
"tf",
".",
"saved_model",
".",
"utils",
".",
"build_tensor_info",
"(",
"output_tensor_1",
")",
"output_tensor_info_2",
"=",
"tf",
".",
"saved_model",
".",
"utils",
".",
"build_tensor_info",
"(",
"output_tensor_2",
")",
"output_tensor_info_3",
"=",
"tf",
".",
"saved_model",
".",
"utils",
".",
"build_tensor_info",
"(",
"output_tensor_3",
")",
"# Using predict method name because simple save uses it",
"# tag:\"serve\", signature_def:\"serving_default\"",
"signature_0",
"=",
"tf",
".",
"saved_model",
".",
"signature_def_utils",
".",
"build_signature_def",
"(",
"inputs",
"=",
"{",
"\"INPUT\"",
":",
"input_tensor_info",
"}",
",",
"outputs",
"=",
"{",
"\"OUTPUT\"",
":",
"output_tensor_info_0",
"}",
",",
"method_name",
"=",
"tf",
".",
"saved_model",
".",
"signature_constants",
".",
"PREDICT_METHOD_NAME",
")",
"# tag:\"serve\", signature_def:signature_def_name",
"signature_1",
"=",
"tf",
".",
"saved_model",
".",
"signature_def_utils",
".",
"build_signature_def",
"(",
"inputs",
"=",
"{",
"\"INPUT\"",
":",
"input_tensor_info",
"}",
",",
"outputs",
"=",
"{",
"\"OUTPUT\"",
":",
"output_tensor_info_1",
"}",
",",
"method_name",
"=",
"tf",
".",
"saved_model",
".",
"signature_constants",
".",
"PREDICT_METHOD_NAME",
")",
"# tag:tag_name, signature_def:\"serving_default\"",
"signature_2",
"=",
"tf",
".",
"saved_model",
".",
"signature_def_utils",
".",
"build_signature_def",
"(",
"inputs",
"=",
"{",
"\"INPUT\"",
":",
"input_tensor_info",
"}",
",",
"outputs",
"=",
"{",
"\"OUTPUT\"",
":",
"output_tensor_info_2",
"}",
",",
"method_name",
"=",
"tf",
".",
"saved_model",
".",
"signature_constants",
".",
"PREDICT_METHOD_NAME",
")",
"# tag:tag_name, signature_def:signature_def_name",
"signature_3",
"=",
"tf",
".",
"saved_model",
".",
"signature_def_utils",
".",
"build_signature_def",
"(",
"inputs",
"=",
"{",
"\"INPUT\"",
":",
"input_tensor_info",
"}",
",",
"outputs",
"=",
"{",
"\"OUTPUT\"",
":",
"output_tensor_info_3",
"}",
",",
"method_name",
"=",
"tf",
".",
"saved_model",
".",
"signature_constants",
".",
"PREDICT_METHOD_NAME",
")",
"signature_def_map_0",
"=",
"{",
"signature_constants",
".",
"DEFAULT_SERVING_SIGNATURE_DEF_KEY",
":",
"signature_0",
",",
"signature_def_name",
":",
"signature_1",
"}",
"signature_def_map_1",
"=",
"{",
"signature_constants",
".",
"DEFAULT_SERVING_SIGNATURE_DEF_KEY",
":",
"signature_2",
",",
"signature_def_name",
":",
"signature_3",
"}",
"b",
"=",
"builder",
".",
"SavedModelBuilder",
"(",
"model_version_dir",
"+",
"\"/model.savedmodel\"",
")",
"b",
".",
"add_meta_graph_and_variables",
"(",
"sess",
",",
"tags",
"=",
"[",
"tag_constants",
".",
"SERVING",
"]",
",",
"signature_def_map",
"=",
"signature_def_map_0",
",",
"assets_collection",
"=",
"ops",
".",
"get_collection",
"(",
"ops",
".",
"GraphKeys",
".",
"ASSET_FILEPATHS",
")",
",",
"clear_devices",
"=",
"True",
")",
"b",
".",
"add_meta_graph",
"(",
"tags",
"=",
"[",
"tag_name",
"]",
",",
"signature_def_map",
"=",
"signature_def_map_1",
",",
"assets_collection",
"=",
"ops",
".",
"get_collection",
"(",
"ops",
".",
"GraphKeys",
".",
"ASSET_FILEPATHS",
")",
",",
"clear_devices",
"=",
"True",
")",
"b",
".",
"save",
"(",
")"
] | https://github.com/triton-inference-server/server/blob/11a11d9cb1e9734ed9fd305e752da70f07d1992f/qa/common/gen_tag_sigdef.py#L43-L144 | ||
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/data/experimental/ops/error_ops.py | python | ignore_errors | (log_warning=False) | return _apply_fn | Creates a `Dataset` from another `Dataset` and silently ignores any errors.
Use this transformation to produce a dataset that contains the same elements
as the input, but silently drops any elements that caused an error. For
example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])
# Computing `tf.debugging.check_numerics(1. / 0.)` will raise an
InvalidArgumentError.
dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, "error"))
# Using `ignore_errors()` will drop the element that causes an error.
dataset =
dataset.apply(tf.data.experimental.ignore_errors()) # ==> {1., 0.5, 0.2}
```
Args:
log_warning: (Optional.) A 'tf.bool' scalar indicating whether ignored
errors should be logged to stderr. Defaults to 'False'.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`. | Creates a `Dataset` from another `Dataset` and silently ignores any errors. | [
"Creates",
"a",
"Dataset",
"from",
"another",
"Dataset",
"and",
"silently",
"ignores",
"any",
"errors",
"."
] | def ignore_errors(log_warning=False):
"""Creates a `Dataset` from another `Dataset` and silently ignores any errors.
Use this transformation to produce a dataset that contains the same elements
as the input, but silently drops any elements that caused an error. For
example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])
# Computing `tf.debugging.check_numerics(1. / 0.)` will raise an
InvalidArgumentError.
dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, "error"))
# Using `ignore_errors()` will drop the element that causes an error.
dataset =
dataset.apply(tf.data.experimental.ignore_errors()) # ==> {1., 0.5, 0.2}
```
Args:
log_warning: (Optional.) A 'tf.bool' scalar indicating whether ignored
errors should be logged to stderr. Defaults to 'False'.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _IgnoreErrorsDataset(dataset, log_warning)
return _apply_fn | [
"def",
"ignore_errors",
"(",
"log_warning",
"=",
"False",
")",
":",
"def",
"_apply_fn",
"(",
"dataset",
")",
":",
"return",
"_IgnoreErrorsDataset",
"(",
"dataset",
",",
"log_warning",
")",
"return",
"_apply_fn"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/data/experimental/ops/error_ops.py#L22-L52 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/rfc822.py | python | parsedate_tz | (data) | return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset) | Convert a date string to a time tuple.
Accounts for military timezones. | Convert a date string to a time tuple. | [
"Convert",
"a",
"date",
"string",
"to",
"a",
"time",
"tuple",
"."
] | def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
if not data:
return None
data = data.split()
if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
# no space after the "weekday,"?
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if not mm in _monthnames:
dd, mm = mm, dd.lower()
if not mm in _monthnames:
return None
mm = _monthnames.index(mm)+1
if mm > 12: mm = mm - 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset) | [
"def",
"parsedate_tz",
"(",
"data",
")",
":",
"if",
"not",
"data",
":",
"return",
"None",
"data",
"=",
"data",
".",
"split",
"(",
")",
"if",
"data",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"in",
"(",
"','",
",",
"'.'",
")",
"or",
"data",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"in",
"_daynames",
":",
"# There's a dayname here. Skip it",
"del",
"data",
"[",
"0",
"]",
"else",
":",
"# no space after the \"weekday,\"?",
"i",
"=",
"data",
"[",
"0",
"]",
".",
"rfind",
"(",
"','",
")",
"if",
"i",
">=",
"0",
":",
"data",
"[",
"0",
"]",
"=",
"data",
"[",
"0",
"]",
"[",
"i",
"+",
"1",
":",
"]",
"if",
"len",
"(",
"data",
")",
"==",
"3",
":",
"# RFC 850 date, deprecated",
"stuff",
"=",
"data",
"[",
"0",
"]",
".",
"split",
"(",
"'-'",
")",
"if",
"len",
"(",
"stuff",
")",
"==",
"3",
":",
"data",
"=",
"stuff",
"+",
"data",
"[",
"1",
":",
"]",
"if",
"len",
"(",
"data",
")",
"==",
"4",
":",
"s",
"=",
"data",
"[",
"3",
"]",
"i",
"=",
"s",
".",
"find",
"(",
"'+'",
")",
"if",
"i",
">",
"0",
":",
"data",
"[",
"3",
":",
"]",
"=",
"[",
"s",
"[",
":",
"i",
"]",
",",
"s",
"[",
"i",
"+",
"1",
":",
"]",
"]",
"else",
":",
"data",
".",
"append",
"(",
"''",
")",
"# Dummy tz",
"if",
"len",
"(",
"data",
")",
"<",
"5",
":",
"return",
"None",
"data",
"=",
"data",
"[",
":",
"5",
"]",
"[",
"dd",
",",
"mm",
",",
"yy",
",",
"tm",
",",
"tz",
"]",
"=",
"data",
"mm",
"=",
"mm",
".",
"lower",
"(",
")",
"if",
"not",
"mm",
"in",
"_monthnames",
":",
"dd",
",",
"mm",
"=",
"mm",
",",
"dd",
".",
"lower",
"(",
")",
"if",
"not",
"mm",
"in",
"_monthnames",
":",
"return",
"None",
"mm",
"=",
"_monthnames",
".",
"index",
"(",
"mm",
")",
"+",
"1",
"if",
"mm",
">",
"12",
":",
"mm",
"=",
"mm",
"-",
"12",
"if",
"dd",
"[",
"-",
"1",
"]",
"==",
"','",
":",
"dd",
"=",
"dd",
"[",
":",
"-",
"1",
"]",
"i",
"=",
"yy",
".",
"find",
"(",
"':'",
")",
"if",
"i",
">",
"0",
":",
"yy",
",",
"tm",
"=",
"tm",
",",
"yy",
"if",
"yy",
"[",
"-",
"1",
"]",
"==",
"','",
":",
"yy",
"=",
"yy",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"yy",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"yy",
",",
"tz",
"=",
"tz",
",",
"yy",
"if",
"tm",
"[",
"-",
"1",
"]",
"==",
"','",
":",
"tm",
"=",
"tm",
"[",
":",
"-",
"1",
"]",
"tm",
"=",
"tm",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"tm",
")",
"==",
"2",
":",
"[",
"thh",
",",
"tmm",
"]",
"=",
"tm",
"tss",
"=",
"'0'",
"elif",
"len",
"(",
"tm",
")",
"==",
"3",
":",
"[",
"thh",
",",
"tmm",
",",
"tss",
"]",
"=",
"tm",
"else",
":",
"return",
"None",
"try",
":",
"yy",
"=",
"int",
"(",
"yy",
")",
"dd",
"=",
"int",
"(",
"dd",
")",
"thh",
"=",
"int",
"(",
"thh",
")",
"tmm",
"=",
"int",
"(",
"tmm",
")",
"tss",
"=",
"int",
"(",
"tss",
")",
"except",
"ValueError",
":",
"return",
"None",
"tzoffset",
"=",
"None",
"tz",
"=",
"tz",
".",
"upper",
"(",
")",
"if",
"tz",
"in",
"_timezones",
":",
"tzoffset",
"=",
"_timezones",
"[",
"tz",
"]",
"else",
":",
"try",
":",
"tzoffset",
"=",
"int",
"(",
"tz",
")",
"except",
"ValueError",
":",
"pass",
"# Convert a timezone offset into seconds ; -0500 -> -18000",
"if",
"tzoffset",
":",
"if",
"tzoffset",
"<",
"0",
":",
"tzsign",
"=",
"-",
"1",
"tzoffset",
"=",
"-",
"tzoffset",
"else",
":",
"tzsign",
"=",
"1",
"tzoffset",
"=",
"tzsign",
"*",
"(",
"(",
"tzoffset",
"//",
"100",
")",
"*",
"3600",
"+",
"(",
"tzoffset",
"%",
"100",
")",
"*",
"60",
")",
"return",
"(",
"yy",
",",
"mm",
",",
"dd",
",",
"thh",
",",
"tmm",
",",
"tss",
",",
"0",
",",
"1",
",",
"0",
",",
"tzoffset",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/rfc822.py#L850-L932 | |
gwaldron/osgearth | 4c521857d59a69743e4a9cedba00afe570f984e8 | src/third_party/tinygltf/deps/cpplint.py | python | _BlockInfo.CheckEnd | (self, filename, clean_lines, linenum, error) | Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Run checks that applies to text after the closing brace. | [
"Run",
"checks",
"that",
"applies",
"to",
"text",
"after",
"the",
"closing",
"brace",
"."
] | def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass | [
"def",
"CheckEnd",
"(",
"self",
",",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"pass"
] | https://github.com/gwaldron/osgearth/blob/4c521857d59a69743e4a9cedba00afe570f984e8/src/third_party/tinygltf/deps/cpplint.py#L2021-L2032 | ||
crosslife/OpenBird | 9e0198a1a2295f03fa1e8676e216e22c9c7d380b | cocos2d/tools/bindings-generator/clang/cindex.py | python | Cursor.lexical_parent | (self) | return self._lexical_parent | Return the lexical parent for this cursor. | Return the lexical parent for this cursor. | [
"Return",
"the",
"lexical",
"parent",
"for",
"this",
"cursor",
"."
] | def lexical_parent(self):
"""Return the lexical parent for this cursor."""
if not hasattr(self, '_lexical_parent'):
self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self)
return self._lexical_parent | [
"def",
"lexical_parent",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_lexical_parent'",
")",
":",
"self",
".",
"_lexical_parent",
"=",
"conf",
".",
"lib",
".",
"clang_getCursorLexicalParent",
"(",
"self",
")",
"return",
"self",
".",
"_lexical_parent"
] | https://github.com/crosslife/OpenBird/blob/9e0198a1a2295f03fa1e8676e216e22c9c7d380b/cocos2d/tools/bindings-generator/clang/cindex.py#L1408-L1413 | |
RamadhanAmizudin/malware | 2c6c53c8b0d556f5d8078d6ca0fc4448f4697cf1 | Fuzzbunch/fuzzbunch/pyreadline/modes/basemode.py | python | BaseMode.forward_char | (self, e) | Move forward a character. | Move forward a character. | [
"Move",
"forward",
"a",
"character",
"."
] | def forward_char(self, e): # (C-f)
'''Move forward a character. '''
self.l_buffer.forward_char(self.argument_reset) | [
"def",
"forward_char",
"(",
"self",
",",
"e",
")",
":",
"# (C-f)",
"self",
".",
"l_buffer",
".",
"forward_char",
"(",
"self",
".",
"argument_reset",
")"
] | https://github.com/RamadhanAmizudin/malware/blob/2c6c53c8b0d556f5d8078d6ca0fc4448f4697cf1/Fuzzbunch/fuzzbunch/pyreadline/modes/basemode.py#L245-L247 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py3/sklearn/preprocessing/_data.py | python | Binarizer.transform | (self, X, copy=None) | return binarize(X, threshold=self.threshold, copy=copy) | Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
copy : bool
Copy the input X or not. | Binarize each element of X | [
"Binarize",
"each",
"element",
"of",
"X"
] | def transform(self, X, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
copy : bool
Copy the input X or not.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy) | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"copy",
"=",
"None",
")",
":",
"copy",
"=",
"copy",
"if",
"copy",
"is",
"not",
"None",
"else",
"self",
".",
"copy",
"return",
"binarize",
"(",
"X",
",",
"threshold",
"=",
"self",
".",
"threshold",
",",
"copy",
"=",
"copy",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py3/sklearn/preprocessing/_data.py#L1951-L1965 | |
crosslife/OpenBird | 9e0198a1a2295f03fa1e8676e216e22c9c7d380b | cocos2d/tools/bindings-generator/clang/cindex.py | python | Cursor.hash | (self) | return self._hash | Returns a hash of the cursor as an int. | Returns a hash of the cursor as an int. | [
"Returns",
"a",
"hash",
"of",
"the",
"cursor",
"as",
"an",
"int",
"."
] | def hash(self):
"""Returns a hash of the cursor as an int."""
if not hasattr(self, '_hash'):
self._hash = conf.lib.clang_hashCursor(self)
return self._hash | [
"def",
"hash",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_hash'",
")",
":",
"self",
".",
"_hash",
"=",
"conf",
".",
"lib",
".",
"clang_hashCursor",
"(",
"self",
")",
"return",
"self",
".",
"_hash"
] | https://github.com/crosslife/OpenBird/blob/9e0198a1a2295f03fa1e8676e216e22c9c7d380b/cocos2d/tools/bindings-generator/clang/cindex.py#L1392-L1397 | |
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/metrics/histograms/update_extension_functions.py | python | ReadHistogramValues | (filename) | return result | Returns a list of pairs (label, value) corresponding to HistogramValue.
Reads the extension_functions_histogram_value.h file, locates the
HistogramValue enum definition and returns a pair for each entry. | Returns a list of pairs (label, value) corresponding to HistogramValue. | [
"Returns",
"a",
"list",
"of",
"pairs",
"(",
"label",
"value",
")",
"corresponding",
"to",
"HistogramValue",
"."
] | def ReadHistogramValues(filename):
"""Returns a list of pairs (label, value) corresponding to HistogramValue.
Reads the extension_functions_histogram_value.h file, locates the
HistogramValue enum definition and returns a pair for each entry.
"""
# Read the file as a list of lines
with open(filename) as f:
content = f.readlines()
# Locate the enum definition and collect all entries in it
inside_enum = False # We haven't found the enum definition yet
result = []
for line in content:
line = line.strip()
if inside_enum:
# Exit condition: we reached last enum value
if re.match(ENUM_END_MARKER, line):
inside_enum = False
else:
# Inside enum: generate new xml entry
label = ExtractRegexGroup(line.strip(), "^([\w]+)")
if label:
result.append((label, enum_value))
enum_value += 1
else:
if re.match(ENUM_START_MARKER, line):
inside_enum = True
enum_value = 0 # Start at 'UNKNOWN'
return result | [
"def",
"ReadHistogramValues",
"(",
"filename",
")",
":",
"# Read the file as a list of lines",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"readlines",
"(",
")",
"# Locate the enum definition and collect all entries in it",
"inside_enum",
"=",
"False",
"# We haven't found the enum definition yet",
"result",
"=",
"[",
"]",
"for",
"line",
"in",
"content",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"inside_enum",
":",
"# Exit condition: we reached last enum value",
"if",
"re",
".",
"match",
"(",
"ENUM_END_MARKER",
",",
"line",
")",
":",
"inside_enum",
"=",
"False",
"else",
":",
"# Inside enum: generate new xml entry",
"label",
"=",
"ExtractRegexGroup",
"(",
"line",
".",
"strip",
"(",
")",
",",
"\"^([\\w]+)\"",
")",
"if",
"label",
":",
"result",
".",
"append",
"(",
"(",
"label",
",",
"enum_value",
")",
")",
"enum_value",
"+=",
"1",
"else",
":",
"if",
"re",
".",
"match",
"(",
"ENUM_START_MARKER",
",",
"line",
")",
":",
"inside_enum",
"=",
"True",
"enum_value",
"=",
"0",
"# Start at 'UNKNOWN'",
"return",
"result"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/metrics/histograms/update_extension_functions.py#L45-L75 | |
etotheipi/BitcoinArmory | 2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98 | announcefetch.py | python | AnnounceDataFetcher.__runFetchLoop | (self) | All this code runs in a separate thread (your app will freeze if
you don't call this with the async=True argument). It will
periodically check for new announce data, and update members that
are visible to other threads.
By default, it will check once per hour. If you call
self.forceCheckFlag.set()
It will skip the time check and force a download right now.
Using getAnnounceFile(forceCheck=True) will do this for you,
and will wait until the operation completes before returning
the result. | All this code runs in a separate thread (your app will freeze if
you don't call this with the async=True argument). It will
periodically check for new announce data, and update members that
are visible to other threads. | [
"All",
"this",
"code",
"runs",
"in",
"a",
"separate",
"thread",
"(",
"your",
"app",
"will",
"freeze",
"if",
"you",
"don",
"t",
"call",
"this",
"with",
"the",
"async",
"=",
"True",
"argument",
")",
".",
"It",
"will",
"periodically",
"check",
"for",
"new",
"announce",
"data",
"and",
"update",
"members",
"that",
"are",
"visible",
"to",
"other",
"threads",
"."
] | def __runFetchLoop(self):
"""
All this code runs in a separate thread (your app will freeze if
you don't call this with the async=True argument). It will
periodically check for new announce data, and update members that
are visible to other threads.
By default, it will check once per hour. If you call
self.forceCheckFlag.set()
It will skip the time check and force a download right now.
Using getAnnounceFile(forceCheck=True) will do this for you,
and will wait until the operation completes before returning
the result.
"""
while True:
try:
if self.isDisabled() or self.shutdownFlag.isSet():
self.shutdownFlag.clear()
break
##### Only check once per hour unless force flag is set
if not self.forceCheckFlag.isSet():
if RightNow()-self.lastFetch < self.fetchInterval:
continue
else:
LOGINFO('Forcing announce data fetch')
self.forceIsFinished.clear()
self.loopIsIdle.clear()
self.__runFetchSequence()
except:
self.numConsecutiveExceptions += 1
LOGEXCEPT('Failed download')
if self.numConsecutiveExceptions > 20:
self.setDisabled(True)
finally:
self.loopIsIdle.set()
time.sleep(0.5) | [
"def",
"__runFetchLoop",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"if",
"self",
".",
"isDisabled",
"(",
")",
"or",
"self",
".",
"shutdownFlag",
".",
"isSet",
"(",
")",
":",
"self",
".",
"shutdownFlag",
".",
"clear",
"(",
")",
"break",
"##### Only check once per hour unless force flag is set",
"if",
"not",
"self",
".",
"forceCheckFlag",
".",
"isSet",
"(",
")",
":",
"if",
"RightNow",
"(",
")",
"-",
"self",
".",
"lastFetch",
"<",
"self",
".",
"fetchInterval",
":",
"continue",
"else",
":",
"LOGINFO",
"(",
"'Forcing announce data fetch'",
")",
"self",
".",
"forceIsFinished",
".",
"clear",
"(",
")",
"self",
".",
"loopIsIdle",
".",
"clear",
"(",
")",
"self",
".",
"__runFetchSequence",
"(",
")",
"except",
":",
"self",
".",
"numConsecutiveExceptions",
"+=",
"1",
"LOGEXCEPT",
"(",
"'Failed download'",
")",
"if",
"self",
".",
"numConsecutiveExceptions",
">",
"20",
":",
"self",
".",
"setDisabled",
"(",
"True",
")",
"finally",
":",
"self",
".",
"loopIsIdle",
".",
"set",
"(",
")",
"time",
".",
"sleep",
"(",
"0.5",
")"
] | https://github.com/etotheipi/BitcoinArmory/blob/2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98/announcefetch.py#L362-L402 | ||
networkit/networkit | 695b7a786a894a303fa8587597d5ef916e797729 | networkit/stopwatch.py | python | Timer.start_time | (self) | return self.__start | The time at which the Timer instance was created. | The time at which the Timer instance was created. | [
"The",
"time",
"at",
"which",
"the",
"Timer",
"instance",
"was",
"created",
"."
] | def start_time(self):
"""The time at which the Timer instance was created.
"""
return self.__start | [
"def",
"start_time",
"(",
"self",
")",
":",
"return",
"self",
".",
"__start"
] | https://github.com/networkit/networkit/blob/695b7a786a894a303fa8587597d5ef916e797729/networkit/stopwatch.py#L59-L62 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_controls.py | python | TextUrlEvent.GetURLEnd | (*args, **kwargs) | return _controls_.TextUrlEvent_GetURLEnd(*args, **kwargs) | GetURLEnd(self) -> long | GetURLEnd(self) -> long | [
"GetURLEnd",
"(",
"self",
")",
"-",
">",
"long"
] | def GetURLEnd(*args, **kwargs):
"""GetURLEnd(self) -> long"""
return _controls_.TextUrlEvent_GetURLEnd(*args, **kwargs) | [
"def",
"GetURLEnd",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"TextUrlEvent_GetURLEnd",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_controls.py#L2116-L2118 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/stringold.py | python | lstrip | (s) | return s.lstrip() | lstrip(s) -> string
Return a copy of the string s with leading whitespace removed. | lstrip(s) -> string | [
"lstrip",
"(",
"s",
")",
"-",
">",
"string"
] | def lstrip(s):
"""lstrip(s) -> string
Return a copy of the string s with leading whitespace removed.
"""
return s.lstrip() | [
"def",
"lstrip",
"(",
"s",
")",
":",
"return",
"s",
".",
"lstrip",
"(",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/stringold.py#L84-L90 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/metrics_impl.py | python | precision_at_k | (labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None) | Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled. | Computes precision@k of the predictions with respect to sparse labels. | [
"Computes",
"precision@k",
"of",
"the",
"predictions",
"with",
"respect",
"to",
"sparse",
"labels",
"."
] | def precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sparse_precision_at_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return precision_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope) | [
"def",
"precision_at_k",
"(",
"labels",
",",
"predictions",
",",
"k",
",",
"class_id",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"metrics_collections",
"=",
"None",
",",
"updates_collections",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"context",
".",
"executing_eagerly",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"'tf.metrics.sparse_precision_at_k is not '",
"'supported when eager execution is enabled.'",
")",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"_at_k_name",
"(",
"'precision'",
",",
"k",
",",
"class_id",
"=",
"class_id",
")",
",",
"(",
"predictions",
",",
"labels",
",",
"weights",
")",
")",
"as",
"scope",
":",
"_",
",",
"top_k_idx",
"=",
"nn",
".",
"top_k",
"(",
"predictions",
",",
"k",
")",
"return",
"precision_at_top_k",
"(",
"labels",
"=",
"labels",
",",
"predictions_idx",
"=",
"top_k_idx",
",",
"k",
"=",
"k",
",",
"class_id",
"=",
"class_id",
",",
"weights",
"=",
"weights",
",",
"metrics_collections",
"=",
"metrics_collections",
",",
"updates_collections",
"=",
"updates_collections",
",",
"name",
"=",
"scope",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/metrics_impl.py#L3529-L3619 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/richtext.py | python | RichTextParagraph.GetLines | (*args, **kwargs) | return _richtext.RichTextParagraph_GetLines(*args, **kwargs) | GetLines(self) -> wxRichTextLineList | GetLines(self) -> wxRichTextLineList | [
"GetLines",
"(",
"self",
")",
"-",
">",
"wxRichTextLineList"
] | def GetLines(*args, **kwargs):
"""GetLines(self) -> wxRichTextLineList"""
return _richtext.RichTextParagraph_GetLines(*args, **kwargs) | [
"def",
"GetLines",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"RichTextParagraph_GetLines",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/richtext.py#L1983-L1985 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/io/stata.py | python | StataValueLabel._encode | (self, s) | return s.encode(self._encoding) | Python 3 compatibility shim | Python 3 compatibility shim | [
"Python",
"3",
"compatibility",
"shim"
] | def _encode(self, s):
"""
Python 3 compatibility shim
"""
return s.encode(self._encoding) | [
"def",
"_encode",
"(",
"self",
",",
"s",
")",
":",
"return",
"s",
".",
"encode",
"(",
"self",
".",
"_encoding",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/io/stata.py#L655-L659 | |
okex/V3-Open-API-SDK | c5abb0db7e2287718e0055e17e57672ce0ec7fd9 | okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_internal/resolve.py | python | Resolver.get_installation_order | (self, req_set) | return order | Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees. | Create the installation order. | [
"Create",
"the",
"installation",
"order",
"."
] | def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[InstallRequirement]
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set() # type: Set[InstallRequirement]
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._discovered_dependencies[req.name]:
schedule(dep)
order.append(req)
for install_req in req_set.requirements.values():
schedule(install_req)
return order | [
"def",
"get_installation_order",
"(",
"self",
",",
"req_set",
")",
":",
"# type: (RequirementSet) -> List[InstallRequirement]",
"# The current implementation, which we may change at any point",
"# installs the user specified things in the order given, except when",
"# dependencies must come earlier to achieve topological order.",
"order",
"=",
"[",
"]",
"ordered_reqs",
"=",
"set",
"(",
")",
"# type: Set[InstallRequirement]",
"def",
"schedule",
"(",
"req",
")",
":",
"if",
"req",
".",
"satisfied_by",
"or",
"req",
"in",
"ordered_reqs",
":",
"return",
"if",
"req",
".",
"constraint",
":",
"return",
"ordered_reqs",
".",
"add",
"(",
"req",
")",
"for",
"dep",
"in",
"self",
".",
"_discovered_dependencies",
"[",
"req",
".",
"name",
"]",
":",
"schedule",
"(",
"dep",
")",
"order",
".",
"append",
"(",
"req",
")",
"for",
"install_req",
"in",
"req_set",
".",
"requirements",
".",
"values",
"(",
")",
":",
"schedule",
"(",
"install_req",
")",
"return",
"order"
] | https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_internal/resolve.py#L367-L393 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_misc.py | python | RegisterId | (*args, **kwargs) | return _misc_.RegisterId(*args, **kwargs) | RegisterId(long id) | RegisterId(long id) | [
"RegisterId",
"(",
"long",
"id",
")"
] | def RegisterId(*args, **kwargs):
"""RegisterId(long id)"""
return _misc_.RegisterId(*args, **kwargs) | [
"def",
"RegisterId",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"RegisterId",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_misc.py#L286-L288 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/lib/scimath.py | python | arcsin | (x) | return nx.arcsin(x) | Compute the inverse sine of x.
Return the "principal value" (for a description of this, see
`numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
`abs(x) <= 1`, this is a real number in the closed interval
:math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
returned.
Parameters
----------
x : array_like or scalar
The value(s) whose arcsin is (are) required.
Returns
-------
out : ndarray or scalar
The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
is `out`, otherwise an array object is returned.
See Also
--------
numpy.arcsin
Notes
-----
For an arcsin() that returns ``NAN`` when real `x` is not in the
interval ``[-1,1]``, use `numpy.arcsin`.
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arcsin(0)
0.0
>>> np.emath.arcsin([0,1])
array([0. , 1.5708]) | Compute the inverse sine of x. | [
"Compute",
"the",
"inverse",
"sine",
"of",
"x",
"."
] | def arcsin(x):
"""
Compute the inverse sine of x.
Return the "principal value" (for a description of this, see
`numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
`abs(x) <= 1`, this is a real number in the closed interval
:math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
returned.
Parameters
----------
x : array_like or scalar
The value(s) whose arcsin is (are) required.
Returns
-------
out : ndarray or scalar
The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
is `out`, otherwise an array object is returned.
See Also
--------
numpy.arcsin
Notes
-----
For an arcsin() that returns ``NAN`` when real `x` is not in the
interval ``[-1,1]``, use `numpy.arcsin`.
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arcsin(0)
0.0
>>> np.emath.arcsin([0,1])
array([0. , 1.5708])
"""
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x) | [
"def",
"arcsin",
"(",
"x",
")",
":",
"x",
"=",
"_fix_real_abs_gt_1",
"(",
"x",
")",
"return",
"nx",
".",
"arcsin",
"(",
"x",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/lib/scimath.py#L510-L552 | |
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Draft/draftguitools/gui_scale.py | python | Scale.Activated | (self) | Execute when the command is called. | Execute when the command is called. | [
"Execute",
"when",
"the",
"command",
"is",
"called",
"."
] | def Activated(self):
"""Execute when the command is called."""
super(Scale, self).Activated(name="Scale")
if not self.ui:
return
self.ghosts = []
self.get_object_selection() | [
"def",
"Activated",
"(",
"self",
")",
":",
"super",
"(",
"Scale",
",",
"self",
")",
".",
"Activated",
"(",
"name",
"=",
"\"Scale\"",
")",
"if",
"not",
"self",
".",
"ui",
":",
"return",
"self",
".",
"ghosts",
"=",
"[",
"]",
"self",
".",
"get_object_selection",
"(",
")"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftguitools/gui_scale.py#L74-L80 | ||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | models/AI-Model-Zoo/caffe-xilinx/tools/extra/parse_log.py | python | parse_line_for_net_output | (regex_obj, row, row_dict_list,
line, iteration, seconds, learning_rate) | return row_dict_list, row | Parse a single line for training or test output
Returns a a tuple with (row_dict_list, row)
row: may be either a new row or an augmented version of the current row
row_dict_list: may be either the current row_dict_list or an augmented
version of the current row_dict_list | Parse a single line for training or test output | [
"Parse",
"a",
"single",
"line",
"for",
"training",
"or",
"test",
"output"
] | def parse_line_for_net_output(regex_obj, row, row_dict_list,
line, iteration, seconds, learning_rate):
"""Parse a single line for training or test output
Returns a a tuple with (row_dict_list, row)
row: may be either a new row or an augmented version of the current row
row_dict_list: may be either the current row_dict_list or an augmented
version of the current row_dict_list
"""
output_match = regex_obj.search(line)
if output_match:
if not row or row['NumIters'] != iteration:
# Push the last row and start a new one
if row:
# If we're on a new iteration, push the last row
# This will probably only happen for the first row; otherwise
# the full row checking logic below will push and clear full
# rows
row_dict_list.append(row)
row = OrderedDict([
('NumIters', iteration),
('Seconds', seconds),
('LearningRate', learning_rate)
])
# output_num is not used; may be used in the future
# output_num = output_match.group(1)
output_name = output_match.group(2)
output_val = output_match.group(3)
row[output_name] = float(output_val)
if row and len(row_dict_list) >= 1 and len(row) == len(row_dict_list[0]):
# The row is full, based on the fact that it has the same number of
# columns as the first row; append it to the list
row_dict_list.append(row)
row = None
return row_dict_list, row | [
"def",
"parse_line_for_net_output",
"(",
"regex_obj",
",",
"row",
",",
"row_dict_list",
",",
"line",
",",
"iteration",
",",
"seconds",
",",
"learning_rate",
")",
":",
"output_match",
"=",
"regex_obj",
".",
"search",
"(",
"line",
")",
"if",
"output_match",
":",
"if",
"not",
"row",
"or",
"row",
"[",
"'NumIters'",
"]",
"!=",
"iteration",
":",
"# Push the last row and start a new one",
"if",
"row",
":",
"# If we're on a new iteration, push the last row",
"# This will probably only happen for the first row; otherwise",
"# the full row checking logic below will push and clear full",
"# rows",
"row_dict_list",
".",
"append",
"(",
"row",
")",
"row",
"=",
"OrderedDict",
"(",
"[",
"(",
"'NumIters'",
",",
"iteration",
")",
",",
"(",
"'Seconds'",
",",
"seconds",
")",
",",
"(",
"'LearningRate'",
",",
"learning_rate",
")",
"]",
")",
"# output_num is not used; may be used in the future",
"# output_num = output_match.group(1)",
"output_name",
"=",
"output_match",
".",
"group",
"(",
"2",
")",
"output_val",
"=",
"output_match",
".",
"group",
"(",
"3",
")",
"row",
"[",
"output_name",
"]",
"=",
"float",
"(",
"output_val",
")",
"if",
"row",
"and",
"len",
"(",
"row_dict_list",
")",
">=",
"1",
"and",
"len",
"(",
"row",
")",
"==",
"len",
"(",
"row_dict_list",
"[",
"0",
"]",
")",
":",
"# The row is full, based on the fact that it has the same number of",
"# columns as the first row; append it to the list",
"row_dict_list",
".",
"append",
"(",
"row",
")",
"row",
"=",
"None",
"return",
"row_dict_list",
",",
"row"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/models/AI-Model-Zoo/caffe-xilinx/tools/extra/parse_log.py#L77-L116 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/build/waf-1.7.13/waflib/Runner.py | python | TaskConsumer.__init__ | (self) | Obtain :py:class:`waflib.Task.TaskBase` instances from this queue. | Obtain :py:class:`waflib.Task.TaskBase` instances from this queue. | [
"Obtain",
":",
"py",
":",
"class",
":",
"waflib",
".",
"Task",
".",
"TaskBase",
"instances",
"from",
"this",
"queue",
"."
] | def __init__(self):
Utils.threading.Thread.__init__(self)
self.ready = Queue()
"""
Obtain :py:class:`waflib.Task.TaskBase` instances from this queue.
"""
self.setDaemon(1)
self.start() | [
"def",
"__init__",
"(",
"self",
")",
":",
"Utils",
".",
"threading",
".",
"Thread",
".",
"__init__",
"(",
"self",
")",
"self",
".",
"ready",
"=",
"Queue",
"(",
")",
"self",
".",
"setDaemon",
"(",
"1",
")",
"self",
".",
"start",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/waflib/Runner.py#L27-L34 | ||
SpaceNetChallenge/BuildingDetectors | 3def3c44b5847c744cd2f3356182892d92496579 | qinhaifang/src/lib/caffeWrapper/SolverWrapper.py | python | SolverWrapper.snapshot | (self) | Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time. | Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time. | [
"Take",
"a",
"snapshot",
"of",
"the",
"network",
"after",
"unnormalizing",
"the",
"learned",
"bounding",
"-",
"box",
"regression",
"weights",
".",
"This",
"enables",
"easy",
"use",
"at",
"test",
"-",
"time",
"."
] | def snapshot(self):
""" Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.solver.net
# I'm wondering whether I still need to keep it if only faster-RCNN is needed
scale_bbox_params = (cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS and
'bbox_pred' in net.params)
if scale_bbox_params:
# save original values
orig_0 = net.params['bbox_pred'][0].data.copy()
orig_1 = net.params['bbox_pred'][1].data.copy()
if cfg.CFM_MODE:
cfm_mean = self.bbox_means.ravel()
cfm_std = self.bbox_stds.ravel()
net.params['bbox_pred'][0].data[...] = \
(net.params['bbox_pred'][0].data * cfm_std[:, np.newaxis])
net.params['bbox_pred'][1].data[...] = \
(net.params['bbox_pred'][1].data * cfm_std + cfm_mean)
else:
# scale and shift with transform reg unnormalization; then save snapshot
net.params['bbox_pred'][0].data[...] = \
(net.params['bbox_pred'][0].data *
self.bbox_stds[:, np.newaxis])
net.params['bbox_pred'][1].data[...] = \
(net.params['bbox_pred'][1].data *
self.bbox_stds + self.bbox_means)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# If we specify an infix in the configuration
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (self.solver_param.snapshot_prefix + infix +
'_iter_{:d}'.format(self.solver.iter) + '.caffemodel')
# For snapshot caffemodel, since MNC use shared parameters
# but caffe save parameters according to layer name instead of
# parameter names, its size will exceed 2GB, which make program crash
# Luckily, we may save it to HDF5 to avoid this issues
# not save .h5 caffemodel
if not cfg.MNC_MODE:
filename = os.path.join(self.output_dir, filename)
net.save(str(filename))
else:
filename = os.path.join(self.output_dir, filename + '.h5')
net.save_to_hdf5(str(filename), False)
print 'Wrote snapshot to: {:s}'.format(filename)
if scale_bbox_params:
# restore net to original state
net.params['bbox_pred'][0].data[...] = orig_0
net.params['bbox_pred'][1].data[...] = orig_1 | [
"def",
"snapshot",
"(",
"self",
")",
":",
"net",
"=",
"self",
".",
"solver",
".",
"net",
"# I'm wondering whether I still need to keep it if only faster-RCNN is needed",
"scale_bbox_params",
"=",
"(",
"cfg",
".",
"TRAIN",
".",
"BBOX_REG",
"and",
"cfg",
".",
"TRAIN",
".",
"BBOX_NORMALIZE_TARGETS",
"and",
"'bbox_pred'",
"in",
"net",
".",
"params",
")",
"if",
"scale_bbox_params",
":",
"# save original values",
"orig_0",
"=",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"0",
"]",
".",
"data",
".",
"copy",
"(",
")",
"orig_1",
"=",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"1",
"]",
".",
"data",
".",
"copy",
"(",
")",
"if",
"cfg",
".",
"CFM_MODE",
":",
"cfm_mean",
"=",
"self",
".",
"bbox_means",
".",
"ravel",
"(",
")",
"cfm_std",
"=",
"self",
".",
"bbox_stds",
".",
"ravel",
"(",
")",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"0",
"]",
".",
"data",
"[",
"...",
"]",
"=",
"(",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"0",
"]",
".",
"data",
"*",
"cfm_std",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"1",
"]",
".",
"data",
"[",
"...",
"]",
"=",
"(",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"1",
"]",
".",
"data",
"*",
"cfm_std",
"+",
"cfm_mean",
")",
"else",
":",
"# scale and shift with transform reg unnormalization; then save snapshot",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"0",
"]",
".",
"data",
"[",
"...",
"]",
"=",
"(",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"0",
"]",
".",
"data",
"*",
"self",
".",
"bbox_stds",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"1",
"]",
".",
"data",
"[",
"...",
"]",
"=",
"(",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"1",
"]",
".",
"data",
"*",
"self",
".",
"bbox_stds",
"+",
"self",
".",
"bbox_means",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"output_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"output_dir",
")",
"# If we specify an infix in the configuration",
"infix",
"=",
"(",
"'_'",
"+",
"cfg",
".",
"TRAIN",
".",
"SNAPSHOT_INFIX",
"if",
"cfg",
".",
"TRAIN",
".",
"SNAPSHOT_INFIX",
"!=",
"''",
"else",
"''",
")",
"filename",
"=",
"(",
"self",
".",
"solver_param",
".",
"snapshot_prefix",
"+",
"infix",
"+",
"'_iter_{:d}'",
".",
"format",
"(",
"self",
".",
"solver",
".",
"iter",
")",
"+",
"'.caffemodel'",
")",
"# For snapshot caffemodel, since MNC use shared parameters",
"# but caffe save parameters according to layer name instead of",
"# parameter names, its size will exceed 2GB, which make program crash",
"# Luckily, we may save it to HDF5 to avoid this issues",
"# not save .h5 caffemodel",
"if",
"not",
"cfg",
".",
"MNC_MODE",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"output_dir",
",",
"filename",
")",
"net",
".",
"save",
"(",
"str",
"(",
"filename",
")",
")",
"else",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"output_dir",
",",
"filename",
"+",
"'.h5'",
")",
"net",
".",
"save_to_hdf5",
"(",
"str",
"(",
"filename",
")",
",",
"False",
")",
"print",
"'Wrote snapshot to: {:s}'",
".",
"format",
"(",
"filename",
")",
"if",
"scale_bbox_params",
":",
"# restore net to original state",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"0",
"]",
".",
"data",
"[",
"...",
"]",
"=",
"orig_0",
"net",
".",
"params",
"[",
"'bbox_pred'",
"]",
"[",
"1",
"]",
".",
"data",
"[",
"...",
"]",
"=",
"orig_1"
] | https://github.com/SpaceNetChallenge/BuildingDetectors/blob/3def3c44b5847c744cd2f3356182892d92496579/qinhaifang/src/lib/caffeWrapper/SolverWrapper.py#L70-L124 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_controls.py | python | SpinCtrl.SetValue | (*args, **kwargs) | return _controls_.SpinCtrl_SetValue(*args, **kwargs) | SetValue(self, int value) | SetValue(self, int value) | [
"SetValue",
"(",
"self",
"int",
"value",
")"
] | def SetValue(*args, **kwargs):
"""SetValue(self, int value)"""
return _controls_.SpinCtrl_SetValue(*args, **kwargs) | [
"def",
"SetValue",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"SpinCtrl_SetValue",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L2368-L2370 | |
nileshkulkarni/csm | 0e6e0e7d4f725fd36f2414c0be4b9d83197aa1fc | csm/preprocess/pascal/p3d_uv_to_vertex_id_image.py | python | triangle_direction_intersection | (tri, trg) | Finds where an origin-centered ray going in direction trg intersects a triangle.
Args:
tri: 3 X 3 vertex locations. tri[0, :] is 0th vertex.
Returns:
alpha, beta, gamma | Finds where an origin-centered ray going in direction trg intersects a triangle.
Args:
tri: 3 X 3 vertex locations. tri[0, :] is 0th vertex.
Returns:
alpha, beta, gamma | [
"Finds",
"where",
"an",
"origin",
"-",
"centered",
"ray",
"going",
"in",
"direction",
"trg",
"intersects",
"a",
"triangle",
".",
"Args",
":",
"tri",
":",
"3",
"X",
"3",
"vertex",
"locations",
".",
"tri",
"[",
"0",
":",
"]",
"is",
"0th",
"vertex",
".",
"Returns",
":",
"alpha",
"beta",
"gamma"
] | def triangle_direction_intersection(tri, trg):
'''
Finds where an origin-centered ray going in direction trg intersects a triangle.
Args:
tri: 3 X 3 vertex locations. tri[0, :] is 0th vertex.
Returns:
alpha, beta, gamma
'''
p0 = np.copy(tri[0, :])
# Don't normalize
d1 = np.copy(tri[1, :]) - p0;
d2 = np.copy(tri[2, :]) - p0;
d = trg / np.linalg.norm(trg)
mat = np.stack([d1, d2, d], axis=1)
try:
inv_mat = np.linalg.inv(mat)
except np.linalg.LinAlgError:
return False, 0
# inv_mat = np.linalg.inv(mat)
a_b_mg = -1*np.matmul(inv_mat, p0)
is_valid = (a_b_mg[0] >= 0) and (a_b_mg[1] >= 0) and ((a_b_mg[0] + a_b_mg[1]) <= 1) and (a_b_mg[2] < 0)
if is_valid:
return True, -a_b_mg[2]*d
else:
return False, 0 | [
"def",
"triangle_direction_intersection",
"(",
"tri",
",",
"trg",
")",
":",
"p0",
"=",
"np",
".",
"copy",
"(",
"tri",
"[",
"0",
",",
":",
"]",
")",
"# Don't normalize",
"d1",
"=",
"np",
".",
"copy",
"(",
"tri",
"[",
"1",
",",
":",
"]",
")",
"-",
"p0",
"d2",
"=",
"np",
".",
"copy",
"(",
"tri",
"[",
"2",
",",
":",
"]",
")",
"-",
"p0",
"d",
"=",
"trg",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"trg",
")",
"mat",
"=",
"np",
".",
"stack",
"(",
"[",
"d1",
",",
"d2",
",",
"d",
"]",
",",
"axis",
"=",
"1",
")",
"try",
":",
"inv_mat",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"mat",
")",
"except",
"np",
".",
"linalg",
".",
"LinAlgError",
":",
"return",
"False",
",",
"0",
"# inv_mat = np.linalg.inv(mat)",
"a_b_mg",
"=",
"-",
"1",
"*",
"np",
".",
"matmul",
"(",
"inv_mat",
",",
"p0",
")",
"is_valid",
"=",
"(",
"a_b_mg",
"[",
"0",
"]",
">=",
"0",
")",
"and",
"(",
"a_b_mg",
"[",
"1",
"]",
">=",
"0",
")",
"and",
"(",
"(",
"a_b_mg",
"[",
"0",
"]",
"+",
"a_b_mg",
"[",
"1",
"]",
")",
"<=",
"1",
")",
"and",
"(",
"a_b_mg",
"[",
"2",
"]",
"<",
"0",
")",
"if",
"is_valid",
":",
"return",
"True",
",",
"-",
"a_b_mg",
"[",
"2",
"]",
"*",
"d",
"else",
":",
"return",
"False",
",",
"0"
] | https://github.com/nileshkulkarni/csm/blob/0e6e0e7d4f725fd36f2414c0be4b9d83197aa1fc/csm/preprocess/pascal/p3d_uv_to_vertex_id_image.py#L21-L49 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/html.py | python | HtmlWindow.SetPage | (*args, **kwargs) | return _html.HtmlWindow_SetPage(*args, **kwargs) | SetPage(self, String source) -> bool | SetPage(self, String source) -> bool | [
"SetPage",
"(",
"self",
"String",
"source",
")",
"-",
">",
"bool"
] | def SetPage(*args, **kwargs):
"""SetPage(self, String source) -> bool"""
return _html.HtmlWindow_SetPage(*args, **kwargs) | [
"def",
"SetPage",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_html",
".",
"HtmlWindow_SetPage",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/html.py#L986-L988 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/polynomial/hermite_e.py | python | hermeint | (c, m=1, k=[], lbnd=0, scl=1, axis=0) | return c | Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary | Integrate a Hermite_e series. | [
"Integrate",
"a",
"Hermite_e",
"series",
"."
] | def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt = pu._deprecate_as_int(m, "the order of integration")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if np.ndim(lbnd) != 0:
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c | [
"def",
"hermeint",
"(",
"c",
",",
"m",
"=",
"1",
",",
"k",
"=",
"[",
"]",
",",
"lbnd",
"=",
"0",
",",
"scl",
"=",
"1",
",",
"axis",
"=",
"0",
")",
":",
"c",
"=",
"np",
".",
"array",
"(",
"c",
",",
"ndmin",
"=",
"1",
",",
"copy",
"=",
"True",
")",
"if",
"c",
".",
"dtype",
".",
"char",
"in",
"'?bBhHiIlLqQpP'",
":",
"c",
"=",
"c",
".",
"astype",
"(",
"np",
".",
"double",
")",
"if",
"not",
"np",
".",
"iterable",
"(",
"k",
")",
":",
"k",
"=",
"[",
"k",
"]",
"cnt",
"=",
"pu",
".",
"_deprecate_as_int",
"(",
"m",
",",
"\"the order of integration\"",
")",
"iaxis",
"=",
"pu",
".",
"_deprecate_as_int",
"(",
"axis",
",",
"\"the axis\"",
")",
"if",
"cnt",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"The order of integration must be non-negative\"",
")",
"if",
"len",
"(",
"k",
")",
">",
"cnt",
":",
"raise",
"ValueError",
"(",
"\"Too many integration constants\"",
")",
"if",
"np",
".",
"ndim",
"(",
"lbnd",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"lbnd must be a scalar.\"",
")",
"if",
"np",
".",
"ndim",
"(",
"scl",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"scl must be a scalar.\"",
")",
"iaxis",
"=",
"normalize_axis_index",
"(",
"iaxis",
",",
"c",
".",
"ndim",
")",
"if",
"cnt",
"==",
"0",
":",
"return",
"c",
"c",
"=",
"np",
".",
"moveaxis",
"(",
"c",
",",
"iaxis",
",",
"0",
")",
"k",
"=",
"list",
"(",
"k",
")",
"+",
"[",
"0",
"]",
"*",
"(",
"cnt",
"-",
"len",
"(",
"k",
")",
")",
"for",
"i",
"in",
"range",
"(",
"cnt",
")",
":",
"n",
"=",
"len",
"(",
"c",
")",
"c",
"*=",
"scl",
"if",
"n",
"==",
"1",
"and",
"np",
".",
"all",
"(",
"c",
"[",
"0",
"]",
"==",
"0",
")",
":",
"c",
"[",
"0",
"]",
"+=",
"k",
"[",
"i",
"]",
"else",
":",
"tmp",
"=",
"np",
".",
"empty",
"(",
"(",
"n",
"+",
"1",
",",
")",
"+",
"c",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"c",
".",
"dtype",
")",
"tmp",
"[",
"0",
"]",
"=",
"c",
"[",
"0",
"]",
"*",
"0",
"tmp",
"[",
"1",
"]",
"=",
"c",
"[",
"0",
"]",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"n",
")",
":",
"tmp",
"[",
"j",
"+",
"1",
"]",
"=",
"c",
"[",
"j",
"]",
"/",
"(",
"j",
"+",
"1",
")",
"tmp",
"[",
"0",
"]",
"+=",
"k",
"[",
"i",
"]",
"-",
"hermeval",
"(",
"lbnd",
",",
"tmp",
")",
"c",
"=",
"tmp",
"c",
"=",
"np",
".",
"moveaxis",
"(",
"c",
",",
"0",
",",
"iaxis",
")",
"return",
"c"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/polynomial/hermite_e.py#L653-L772 | |
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/ros/roslib/src/roslib/gentools.py | python | compute_md5_v1 | (get_deps_dict) | return _compute_hash_v1(get_deps_dict, hashlib.md5()) | Compute original V1 md5 hash for message/service. This was replaced with V2 in ROS 0.6.
@param get_deps_dict: dictionary returned by get_dependencies call
@type get_deps_dict: dict
@return: md5 hash
@rtype: str | Compute original V1 md5 hash for message/service. This was replaced with V2 in ROS 0.6. | [
"Compute",
"original",
"V1",
"md5",
"hash",
"for",
"message",
"/",
"service",
".",
"This",
"was",
"replaced",
"with",
"V2",
"in",
"ROS",
"0",
".",
"6",
"."
] | def compute_md5_v1(get_deps_dict):
"""
Compute original V1 md5 hash for message/service. This was replaced with V2 in ROS 0.6.
@param get_deps_dict: dictionary returned by get_dependencies call
@type get_deps_dict: dict
@return: md5 hash
@rtype: str
"""
import hashlib
return _compute_hash_v1(get_deps_dict, hashlib.md5()) | [
"def",
"compute_md5_v1",
"(",
"get_deps_dict",
")",
":",
"import",
"hashlib",
"return",
"_compute_hash_v1",
"(",
"get_deps_dict",
",",
"hashlib",
".",
"md5",
"(",
")",
")"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/ros/roslib/src/roslib/gentools.py#L200-L209 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/src/extern/flatnotebook.py | python | FlatNotebook.SetPageShapeAngle | (self, page_index, angle) | Sets the angle associated to a tab. | Sets the angle associated to a tab. | [
"Sets",
"the",
"angle",
"associated",
"to",
"a",
"tab",
"."
] | def SetPageShapeAngle(self, page_index, angle):
""" Sets the angle associated to a tab. """
if page_index < 0 or page_index >= len(self._pages._pagesInfoVec):
return
if angle > 15:
return
self._pages._pagesInfoVec[page_index].SetTabAngle(angle) | [
"def",
"SetPageShapeAngle",
"(",
"self",
",",
"page_index",
",",
"angle",
")",
":",
"if",
"page_index",
"<",
"0",
"or",
"page_index",
">=",
"len",
"(",
"self",
".",
"_pages",
".",
"_pagesInfoVec",
")",
":",
"return",
"if",
"angle",
">",
"15",
":",
"return",
"self",
".",
"_pages",
".",
"_pagesInfoVec",
"[",
"page_index",
"]",
".",
"SetTabAngle",
"(",
"angle",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/extern/flatnotebook.py#L3495-L3504 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_core.py | python | Point.Set | (*args, **kwargs) | return _core_.Point_Set(*args, **kwargs) | Set(self, long x, long y)
Set both the x and y properties | Set(self, long x, long y) | [
"Set",
"(",
"self",
"long",
"x",
"long",
"y",
")"
] | def Set(*args, **kwargs):
"""
Set(self, long x, long y)
Set both the x and y properties
"""
return _core_.Point_Set(*args, **kwargs) | [
"def",
"Set",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Point_Set",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L1219-L1225 | |
Slicer/Slicer | ba9fadf332cb0303515b68d8d06a344c82e3e3e5 | Modules/Scripted/DICOMPlugins/DICOMScalarVolumePlugin.py | python | DICOMScalarVolumePluginClass.compareVolumeNodes | (volumeNode1,volumeNode2) | return comparison | Given two mrml volume nodes, return true of the numpy arrays have identical data
and other metadata matches. Returns empty string on match, otherwise
a string with a list of differences separated by newlines. | Given two mrml volume nodes, return true of the numpy arrays have identical data
and other metadata matches. Returns empty string on match, otherwise
a string with a list of differences separated by newlines. | [
"Given",
"two",
"mrml",
"volume",
"nodes",
"return",
"true",
"of",
"the",
"numpy",
"arrays",
"have",
"identical",
"data",
"and",
"other",
"metadata",
"matches",
".",
"Returns",
"empty",
"string",
"on",
"match",
"otherwise",
"a",
"string",
"with",
"a",
"list",
"of",
"differences",
"separated",
"by",
"newlines",
"."
] | def compareVolumeNodes(volumeNode1,volumeNode2):
"""
Given two mrml volume nodes, return true of the numpy arrays have identical data
and other metadata matches. Returns empty string on match, otherwise
a string with a list of differences separated by newlines.
"""
volumesLogic = slicer.modules.volumes.logic()
comparison = ""
comparison += volumesLogic.CompareVolumeGeometry(volumeNode1, volumeNode2)
image1 = volumeNode1.GetImageData()
image2 = volumeNode2.GetImageData()
if image1.GetScalarType() != image2.GetScalarType():
comparison += f"First volume is {image1.GetScalarTypeAsString()}, but second is {image2.GetScalarTypeAsString()}"
array1 = slicer.util.array(volumeNode1.GetID())
array2 = slicer.util.array(volumeNode2.GetID())
if not numpy.all(array1 == array2):
comparison += "Pixel data mismatch\n"
return comparison | [
"def",
"compareVolumeNodes",
"(",
"volumeNode1",
",",
"volumeNode2",
")",
":",
"volumesLogic",
"=",
"slicer",
".",
"modules",
".",
"volumes",
".",
"logic",
"(",
")",
"comparison",
"=",
"\"\"",
"comparison",
"+=",
"volumesLogic",
".",
"CompareVolumeGeometry",
"(",
"volumeNode1",
",",
"volumeNode2",
")",
"image1",
"=",
"volumeNode1",
".",
"GetImageData",
"(",
")",
"image2",
"=",
"volumeNode2",
".",
"GetImageData",
"(",
")",
"if",
"image1",
".",
"GetScalarType",
"(",
")",
"!=",
"image2",
".",
"GetScalarType",
"(",
")",
":",
"comparison",
"+=",
"f\"First volume is {image1.GetScalarTypeAsString()}, but second is {image2.GetScalarTypeAsString()}\"",
"array1",
"=",
"slicer",
".",
"util",
".",
"array",
"(",
"volumeNode1",
".",
"GetID",
"(",
")",
")",
"array2",
"=",
"slicer",
".",
"util",
".",
"array",
"(",
"volumeNode2",
".",
"GetID",
"(",
")",
")",
"if",
"not",
"numpy",
".",
"all",
"(",
"array1",
"==",
"array2",
")",
":",
"comparison",
"+=",
"\"Pixel data mismatch\\n\"",
"return",
"comparison"
] | https://github.com/Slicer/Slicer/blob/ba9fadf332cb0303515b68d8d06a344c82e3e3e5/Modules/Scripted/DICOMPlugins/DICOMScalarVolumePlugin.py#L106-L123 | |
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/telemetry/third_party/png/png.py | python | read_pam_header | (infile) | return 'P7', width, height, depth, maxval | Read (the rest of a) PAM header. `infile` should be positioned
immediately after the initial 'P7' line (at the beginning of the
second line). Returns are as for `read_pnm_header`. | Read (the rest of a) PAM header. `infile` should be positioned
immediately after the initial 'P7' line (at the beginning of the
second line). Returns are as for `read_pnm_header`. | [
"Read",
"(",
"the",
"rest",
"of",
"a",
")",
"PAM",
"header",
".",
"infile",
"should",
"be",
"positioned",
"immediately",
"after",
"the",
"initial",
"P7",
"line",
"(",
"at",
"the",
"beginning",
"of",
"the",
"second",
"line",
")",
".",
"Returns",
"are",
"as",
"for",
"read_pnm_header",
"."
] | def read_pam_header(infile):
"""
Read (the rest of a) PAM header. `infile` should be positioned
immediately after the initial 'P7' line (at the beginning of the
second line). Returns are as for `read_pnm_header`.
"""
# Unlike PBM, PGM, and PPM, we can read the header a line at a time.
header = dict()
while True:
l = infile.readline().strip()
if l == strtobytes('ENDHDR'):
break
if not l:
raise EOFError('PAM ended prematurely')
if l[0] == strtobytes('#'):
continue
l = l.split(None, 1)
if l[0] not in header:
header[l[0]] = l[1]
else:
header[l[0]] += strtobytes(' ') + l[1]
required = ['WIDTH', 'HEIGHT', 'DEPTH', 'MAXVAL']
required = [strtobytes(x) for x in required]
WIDTH,HEIGHT,DEPTH,MAXVAL = required
present = [x for x in required if x in header]
if len(present) != len(required):
raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
width = int(header[WIDTH])
height = int(header[HEIGHT])
depth = int(header[DEPTH])
maxval = int(header[MAXVAL])
if (width <= 0 or
height <= 0 or
depth <= 0 or
maxval <= 0):
raise Error(
'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
return 'P7', width, height, depth, maxval | [
"def",
"read_pam_header",
"(",
"infile",
")",
":",
"# Unlike PBM, PGM, and PPM, we can read the header a line at a time.",
"header",
"=",
"dict",
"(",
")",
"while",
"True",
":",
"l",
"=",
"infile",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"if",
"l",
"==",
"strtobytes",
"(",
"'ENDHDR'",
")",
":",
"break",
"if",
"not",
"l",
":",
"raise",
"EOFError",
"(",
"'PAM ended prematurely'",
")",
"if",
"l",
"[",
"0",
"]",
"==",
"strtobytes",
"(",
"'#'",
")",
":",
"continue",
"l",
"=",
"l",
".",
"split",
"(",
"None",
",",
"1",
")",
"if",
"l",
"[",
"0",
"]",
"not",
"in",
"header",
":",
"header",
"[",
"l",
"[",
"0",
"]",
"]",
"=",
"l",
"[",
"1",
"]",
"else",
":",
"header",
"[",
"l",
"[",
"0",
"]",
"]",
"+=",
"strtobytes",
"(",
"' '",
")",
"+",
"l",
"[",
"1",
"]",
"required",
"=",
"[",
"'WIDTH'",
",",
"'HEIGHT'",
",",
"'DEPTH'",
",",
"'MAXVAL'",
"]",
"required",
"=",
"[",
"strtobytes",
"(",
"x",
")",
"for",
"x",
"in",
"required",
"]",
"WIDTH",
",",
"HEIGHT",
",",
"DEPTH",
",",
"MAXVAL",
"=",
"required",
"present",
"=",
"[",
"x",
"for",
"x",
"in",
"required",
"if",
"x",
"in",
"header",
"]",
"if",
"len",
"(",
"present",
")",
"!=",
"len",
"(",
"required",
")",
":",
"raise",
"Error",
"(",
"'PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL'",
")",
"width",
"=",
"int",
"(",
"header",
"[",
"WIDTH",
"]",
")",
"height",
"=",
"int",
"(",
"header",
"[",
"HEIGHT",
"]",
")",
"depth",
"=",
"int",
"(",
"header",
"[",
"DEPTH",
"]",
")",
"maxval",
"=",
"int",
"(",
"header",
"[",
"MAXVAL",
"]",
")",
"if",
"(",
"width",
"<=",
"0",
"or",
"height",
"<=",
"0",
"or",
"depth",
"<=",
"0",
"or",
"maxval",
"<=",
"0",
")",
":",
"raise",
"Error",
"(",
"'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers'",
")",
"return",
"'P7'",
",",
"width",
",",
"height",
",",
"depth",
",",
"maxval"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/telemetry/third_party/png/png.py#L3557-L3596 | |
klzgrad/naiveproxy | ed2c513637c77b18721fe428d7ed395b4d284c83 | src/tools/grit/grit/gather/interface.py | python | GathererBase.Parse | (self) | Reads and parses the contents of what is being gathered. | Reads and parses the contents of what is being gathered. | [
"Reads",
"and",
"parses",
"the",
"contents",
"of",
"what",
"is",
"being",
"gathered",
"."
] | def Parse(self):
'''Reads and parses the contents of what is being gathered.'''
raise NotImplementedError() | [
"def",
"Parse",
"(",
"self",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/tools/grit/grit/gather/interface.py#L81-L83 | ||
intel/llvm | e6d0547e9d99b5a56430c4749f6c7e328bf221ab | lldb/utils/lui/lldbutil.py | python | get_caller_symbol | (thread) | Returns the symbol name for the call site of the leaf function. | Returns the symbol name for the call site of the leaf function. | [
"Returns",
"the",
"symbol",
"name",
"for",
"the",
"call",
"site",
"of",
"the",
"leaf",
"function",
"."
] | def get_caller_symbol(thread):
"""
Returns the symbol name for the call site of the leaf function.
"""
depth = thread.GetNumFrames()
if depth <= 1:
return None
caller = thread.GetFrameAtIndex(1).GetSymbol()
if caller:
return caller.GetName()
else:
return None | [
"def",
"get_caller_symbol",
"(",
"thread",
")",
":",
"depth",
"=",
"thread",
".",
"GetNumFrames",
"(",
")",
"if",
"depth",
"<=",
"1",
":",
"return",
"None",
"caller",
"=",
"thread",
".",
"GetFrameAtIndex",
"(",
"1",
")",
".",
"GetSymbol",
"(",
")",
"if",
"caller",
":",
"return",
"caller",
".",
"GetName",
"(",
")",
"else",
":",
"return",
"None"
] | https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/lldb/utils/lui/lldbutil.py#L690-L701 | ||
Kitware/ParaView | f760af9124ff4634b23ebbeab95a4f56e0261955 | Wrapping/Python/paraview/servermanager.py | python | _createInitialize | (group, name) | return aInitialize | Internal method to create an Initialize() method for the sub-classes
of Proxy | Internal method to create an Initialize() method for the sub-classes
of Proxy | [
"Internal",
"method",
"to",
"create",
"an",
"Initialize",
"()",
"method",
"for",
"the",
"sub",
"-",
"classes",
"of",
"Proxy"
] | def _createInitialize(group, name):
"""Internal method to create an Initialize() method for the sub-classes
of Proxy"""
pgroup = group
pname = name
def aInitialize(self, connection=None, update=True):
if not connection:
connection = ActiveConnection
if not connection:
raise RuntimeError ('Cannot create a proxy without a session.')
if not connection.Session.GetProxyDefinitionManager().HasDefinition(pgroup, pname):
error_msg = "The connection does not provide any definition for %s." % pname
raise RuntimeError (error_msg)
self.InitializeFromProxy(\
CreateProxy(pgroup, pname, connection.Session), update)
return aInitialize | [
"def",
"_createInitialize",
"(",
"group",
",",
"name",
")",
":",
"pgroup",
"=",
"group",
"pname",
"=",
"name",
"def",
"aInitialize",
"(",
"self",
",",
"connection",
"=",
"None",
",",
"update",
"=",
"True",
")",
":",
"if",
"not",
"connection",
":",
"connection",
"=",
"ActiveConnection",
"if",
"not",
"connection",
":",
"raise",
"RuntimeError",
"(",
"'Cannot create a proxy without a session.'",
")",
"if",
"not",
"connection",
".",
"Session",
".",
"GetProxyDefinitionManager",
"(",
")",
".",
"HasDefinition",
"(",
"pgroup",
",",
"pname",
")",
":",
"error_msg",
"=",
"\"The connection does not provide any definition for %s.\"",
"%",
"pname",
"raise",
"RuntimeError",
"(",
"error_msg",
")",
"self",
".",
"InitializeFromProxy",
"(",
"CreateProxy",
"(",
"pgroup",
",",
"pname",
",",
"connection",
".",
"Session",
")",
",",
"update",
")",
"return",
"aInitialize"
] | https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/Wrapping/Python/paraview/servermanager.py#L2581-L2596 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/floatspin.py | python | FixedPoint.set_precision | (self, precision=DEFAULT_PRECISION) | Change the precision carried by this :class:`FixedPoint` to `precision`.
:param `precision`: must be an int >= 0, and defaults to
``DEFAULT_PRECISION``.
:note: If `precision` is less than this :class:`FixedPoint`'s current precision,
information may be lost to rounding. | Change the precision carried by this :class:`FixedPoint` to `precision`. | [
"Change",
"the",
"precision",
"carried",
"by",
"this",
":",
"class",
":",
"FixedPoint",
"to",
"precision",
"."
] | def set_precision(self, precision=DEFAULT_PRECISION):
"""
Change the precision carried by this :class:`FixedPoint` to `precision`.
:param `precision`: must be an int >= 0, and defaults to
``DEFAULT_PRECISION``.
:note: If `precision` is less than this :class:`FixedPoint`'s current precision,
information may be lost to rounding.
"""
try:
p = int(precision)
except:
raise TypeError("precision not convertable to int: " +
`precision`)
if p < 0:
raise ValueError("precision must be >= 0: " + `precision`)
if p > self.p:
self.n = self.n * _tento(p - self.p)
elif p < self.p:
self.n = _roundquotient(self.n, _tento(self.p - p))
self.p = p | [
"def",
"set_precision",
"(",
"self",
",",
"precision",
"=",
"DEFAULT_PRECISION",
")",
":",
"try",
":",
"p",
"=",
"int",
"(",
"precision",
")",
"except",
":",
"raise",
"TypeError",
"(",
"\"precision not convertable to int: \"",
"+",
"`precision`",
")",
"if",
"p",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"precision must be >= 0: \"",
"+",
"`precision`",
")",
"if",
"p",
">",
"self",
".",
"p",
":",
"self",
".",
"n",
"=",
"self",
".",
"n",
"*",
"_tento",
"(",
"p",
"-",
"self",
".",
"p",
")",
"elif",
"p",
"<",
"self",
".",
"p",
":",
"self",
".",
"n",
"=",
"_roundquotient",
"(",
"self",
".",
"n",
",",
"_tento",
"(",
"self",
".",
"p",
"-",
"p",
")",
")",
"self",
".",
"p",
"=",
"p"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/floatspin.py#L1417-L1440 | ||
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | tools/code_coverage/coverage_posix.py | python | Coverage.GenerateLcovPosix | (self) | Convert profile data to lcov on Mac or Linux. | Convert profile data to lcov on Mac or Linux. | [
"Convert",
"profile",
"data",
"to",
"lcov",
"on",
"Mac",
"or",
"Linux",
"."
] | def GenerateLcovPosix(self):
"""Convert profile data to lcov on Mac or Linux."""
start_dir = os.getcwd()
logging.info('GenerateLcovPosix: start_dir=' + start_dir)
if self.IsLinux():
# With Linux/make (e.g. the coverage_run target), the current
# directory for this command is .../build/src/chrome but we need
# to be in .../build/src for the relative path of source files
# to be correct. However, when run from buildbot, the current
# directory is .../build. Accommodate.
# On Mac source files are compiled with abs paths so this isn't
# a problem.
# This is a bit of a hack. The best answer is to require this
# script be run in a specific directory for all cases (from
# Makefile or from buildbot).
if start_dir.endswith('chrome'):
logging.info('coverage_posix.py: doing a "cd .." '
'to accomodate Linux/make PWD')
os.chdir('..')
elif start_dir.endswith('build'):
logging.info('coverage_posix.py: doing a "cd src" '
'to accomodate buildbot PWD')
os.chdir('src')
else:
logging.info('coverage_posix.py: NOT changing directory.')
elif self.IsMac():
pass
command = [self.mcov,
'--directory',
os.path.join(start_dir, self.directory_parent),
'--output',
os.path.join(start_dir, self.coverage_info_file)]
logging.info('Assembly command: ' + ' '.join(command))
retcode = subprocess.call(command)
if retcode:
logging.fatal('COVERAGE: %s failed; return code: %d' %
(command[0], retcode))
if self.options.strict:
sys.exit(retcode)
if self.IsLinux():
os.chdir(start_dir)
if not os.path.exists(self.coverage_info_file):
logging.fatal('%s was not created. Coverage run failed.' %
self.coverage_info_file)
sys.exit(1) | [
"def",
"GenerateLcovPosix",
"(",
"self",
")",
":",
"start_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"logging",
".",
"info",
"(",
"'GenerateLcovPosix: start_dir='",
"+",
"start_dir",
")",
"if",
"self",
".",
"IsLinux",
"(",
")",
":",
"# With Linux/make (e.g. the coverage_run target), the current",
"# directory for this command is .../build/src/chrome but we need",
"# to be in .../build/src for the relative path of source files",
"# to be correct. However, when run from buildbot, the current",
"# directory is .../build. Accommodate.",
"# On Mac source files are compiled with abs paths so this isn't",
"# a problem.",
"# This is a bit of a hack. The best answer is to require this",
"# script be run in a specific directory for all cases (from",
"# Makefile or from buildbot).",
"if",
"start_dir",
".",
"endswith",
"(",
"'chrome'",
")",
":",
"logging",
".",
"info",
"(",
"'coverage_posix.py: doing a \"cd ..\" '",
"'to accomodate Linux/make PWD'",
")",
"os",
".",
"chdir",
"(",
"'..'",
")",
"elif",
"start_dir",
".",
"endswith",
"(",
"'build'",
")",
":",
"logging",
".",
"info",
"(",
"'coverage_posix.py: doing a \"cd src\" '",
"'to accomodate buildbot PWD'",
")",
"os",
".",
"chdir",
"(",
"'src'",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'coverage_posix.py: NOT changing directory.'",
")",
"elif",
"self",
".",
"IsMac",
"(",
")",
":",
"pass",
"command",
"=",
"[",
"self",
".",
"mcov",
",",
"'--directory'",
",",
"os",
".",
"path",
".",
"join",
"(",
"start_dir",
",",
"self",
".",
"directory_parent",
")",
",",
"'--output'",
",",
"os",
".",
"path",
".",
"join",
"(",
"start_dir",
",",
"self",
".",
"coverage_info_file",
")",
"]",
"logging",
".",
"info",
"(",
"'Assembly command: '",
"+",
"' '",
".",
"join",
"(",
"command",
")",
")",
"retcode",
"=",
"subprocess",
".",
"call",
"(",
"command",
")",
"if",
"retcode",
":",
"logging",
".",
"fatal",
"(",
"'COVERAGE: %s failed; return code: %d'",
"%",
"(",
"command",
"[",
"0",
"]",
",",
"retcode",
")",
")",
"if",
"self",
".",
"options",
".",
"strict",
":",
"sys",
".",
"exit",
"(",
"retcode",
")",
"if",
"self",
".",
"IsLinux",
"(",
")",
":",
"os",
".",
"chdir",
"(",
"start_dir",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"coverage_info_file",
")",
":",
"logging",
".",
"fatal",
"(",
"'%s was not created. Coverage run failed.'",
"%",
"self",
".",
"coverage_info_file",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/tools/code_coverage/coverage_posix.py#L781-L826 | ||
Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | SymbolExtractorAndRenamer/llvm/utils/lit/lit/LitConfig.py | python | LitConfig.maxIndividualTestTime | (self, value) | Interface for setting maximum time to spend executing
a single test | Interface for setting maximum time to spend executing
a single test | [
"Interface",
"for",
"setting",
"maximum",
"time",
"to",
"spend",
"executing",
"a",
"single",
"test"
] | def maxIndividualTestTime(self, value):
"""
Interface for setting maximum time to spend executing
a single test
"""
self._maxIndividualTestTime = value
if self.maxIndividualTestTime > 0:
# The current implementation needs psutil to set
# a timeout per test. Check it's available.
# See lit.util.killProcessAndChildren()
try:
import psutil # noqa: F401
except ImportError:
self.fatal("Setting a timeout per test requires the"
" Python psutil module but it could not be"
" found. Try installing it via pip or via"
" your operating system's package manager.")
elif self.maxIndividualTestTime < 0:
self.fatal('The timeout per test must be >= 0 seconds') | [
"def",
"maxIndividualTestTime",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_maxIndividualTestTime",
"=",
"value",
"if",
"self",
".",
"maxIndividualTestTime",
">",
"0",
":",
"# The current implementation needs psutil to set",
"# a timeout per test. Check it's available.",
"# See lit.util.killProcessAndChildren()",
"try",
":",
"import",
"psutil",
"# noqa: F401",
"except",
"ImportError",
":",
"self",
".",
"fatal",
"(",
"\"Setting a timeout per test requires the\"",
"\" Python psutil module but it could not be\"",
"\" found. Try installing it via pip or via\"",
"\" your operating system's package manager.\"",
")",
"elif",
"self",
".",
"maxIndividualTestTime",
"<",
"0",
":",
"self",
".",
"fatal",
"(",
"'The timeout per test must be >= 0 seconds'",
")"
] | https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/llvm/utils/lit/lit/LitConfig.py#L77-L95 | ||
yuxng/PoseCNN | 9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04 | lib/datasets/ycb.py | python | ycb.label_path_from_index | (self, index) | return label_path | Construct an metadata path from the image's "index" identifier. | Construct an metadata path from the image's "index" identifier. | [
"Construct",
"an",
"metadata",
"path",
"from",
"the",
"image",
"s",
"index",
"identifier",
"."
] | def label_path_from_index(self, index):
"""
Construct an metadata path from the image's "index" identifier.
"""
label_path = os.path.join(self._data_path, index + '-label' + self._image_ext)
assert os.path.exists(label_path), \
'Path does not exist: {}'.format(label_path)
return label_path | [
"def",
"label_path_from_index",
"(",
"self",
",",
"index",
")",
":",
"label_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_data_path",
",",
"index",
"+",
"'-label'",
"+",
"self",
".",
"_image_ext",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"label_path",
")",
",",
"'Path does not exist: {}'",
".",
"format",
"(",
"label_path",
")",
"return",
"label_path"
] | https://github.com/yuxng/PoseCNN/blob/9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04/lib/datasets/ycb.py#L87-L94 | |
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | example/bayesian-methods/utils.py | python | copy_param | (exe, new_param=None) | return new_param | Create copy of parameters | Create copy of parameters | [
"Create",
"copy",
"of",
"parameters"
] | def copy_param(exe, new_param=None):
"""Create copy of parameters"""
if new_param is None:
new_param = {k: nd.empty(v.shape, ctx=mx.cpu()) for k, v in exe.arg_dict.items()}
for k, v in new_param.items():
exe.arg_dict[k].copyto(v)
return new_param | [
"def",
"copy_param",
"(",
"exe",
",",
"new_param",
"=",
"None",
")",
":",
"if",
"new_param",
"is",
"None",
":",
"new_param",
"=",
"{",
"k",
":",
"nd",
".",
"empty",
"(",
"v",
".",
"shape",
",",
"ctx",
"=",
"mx",
".",
"cpu",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"exe",
".",
"arg_dict",
".",
"items",
"(",
")",
"}",
"for",
"k",
",",
"v",
"in",
"new_param",
".",
"items",
"(",
")",
":",
"exe",
".",
"arg_dict",
"[",
"k",
"]",
".",
"copyto",
"(",
"v",
")",
"return",
"new_param"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/example/bayesian-methods/utils.py#L69-L75 | |
eldar/deepcut-cnn | 928bf2f224fce132f6e4404b4c95fb017297a5e0 | python/caffe/io.py | python | datum_to_array | (datum) | Converts a datum to an array. Note that the label is not returned,
as one can easily get it by calling datum.label. | Converts a datum to an array. Note that the label is not returned,
as one can easily get it by calling datum.label. | [
"Converts",
"a",
"datum",
"to",
"an",
"array",
".",
"Note",
"that",
"the",
"label",
"is",
"not",
"returned",
"as",
"one",
"can",
"easily",
"get",
"it",
"by",
"calling",
"datum",
".",
"label",
"."
] | def datum_to_array(datum):
"""Converts a datum to an array. Note that the label is not returned,
as one can easily get it by calling datum.label.
"""
if len(datum.data):
return np.fromstring(datum.data, dtype=np.uint8).reshape(
datum.channels, datum.height, datum.width)
else:
return np.array(datum.float_data).astype(float).reshape(
datum.channels, datum.height, datum.width) | [
"def",
"datum_to_array",
"(",
"datum",
")",
":",
"if",
"len",
"(",
"datum",
".",
"data",
")",
":",
"return",
"np",
".",
"fromstring",
"(",
"datum",
".",
"data",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
".",
"reshape",
"(",
"datum",
".",
"channels",
",",
"datum",
".",
"height",
",",
"datum",
".",
"width",
")",
"else",
":",
"return",
"np",
".",
"array",
"(",
"datum",
".",
"float_data",
")",
".",
"astype",
"(",
"float",
")",
".",
"reshape",
"(",
"datum",
".",
"channels",
",",
"datum",
".",
"height",
",",
"datum",
".",
"width",
")"
] | https://github.com/eldar/deepcut-cnn/blob/928bf2f224fce132f6e4404b4c95fb017297a5e0/python/caffe/io.py#L83-L92 | ||
openmm/openmm | cb293447c4fc8b03976dfe11399f107bab70f3d9 | wrappers/python/openmm/app/pdbxfile.py | python | PDBxFile.writeModel | (topology, positions, file=sys.stdout, modelIndex=1, keepIds=False) | Write out a model to a PDBx/mmCIF file.
Parameters
----------
topology : Topology
The Topology defining the model to write
positions : list
The list of atomic positions to write
file : file=stdout
A file to write the model to
modelIndex : int=1
The model number of this frame
keepIds : bool=False
If True, keep the residue and chain IDs specified in the Topology
rather than generating new ones. Warning: It is up to the caller to
make sure these are valid IDs that satisfy the requirements of the
PDBx/mmCIF format. Otherwise, the output file will be invalid. | Write out a model to a PDBx/mmCIF file. | [
"Write",
"out",
"a",
"model",
"to",
"a",
"PDBx",
"/",
"mmCIF",
"file",
"."
] | def writeModel(topology, positions, file=sys.stdout, modelIndex=1, keepIds=False):
"""Write out a model to a PDBx/mmCIF file.
Parameters
----------
topology : Topology
The Topology defining the model to write
positions : list
The list of atomic positions to write
file : file=stdout
A file to write the model to
modelIndex : int=1
The model number of this frame
keepIds : bool=False
If True, keep the residue and chain IDs specified in the Topology
rather than generating new ones. Warning: It is up to the caller to
make sure these are valid IDs that satisfy the requirements of the
PDBx/mmCIF format. Otherwise, the output file will be invalid.
"""
if len(list(topology.atoms())) != len(positions):
raise ValueError('The number of positions must match the number of atoms')
if is_quantity(positions):
positions = positions.value_in_unit(angstroms)
if any(math.isnan(norm(pos)) for pos in positions):
raise ValueError('Particle position is NaN')
if any(math.isinf(norm(pos)) for pos in positions):
raise ValueError('Particle position is infinite')
nonHeterogens = PDBFile._standardResidues[:]
nonHeterogens.remove('HOH')
atomIndex = 1
posIndex = 0
for (chainIndex, chain) in enumerate(topology.chains()):
if keepIds:
chainName = chain.id
else:
chainName = chr(ord('A')+chainIndex%26)
residues = list(chain.residues())
for (resIndex, res) in enumerate(residues):
if keepIds:
resId = res.id
resIC = (res.insertionCode if res.insertionCode.strip() else '.')
else:
resId = resIndex + 1
resIC = '.'
if res.name in nonHeterogens:
recordName = "ATOM"
else:
recordName = "HETATM"
for atom in res.atoms():
coords = positions[posIndex]
if atom.element is not None:
symbol = atom.element.symbol
else:
symbol = '?'
line = "%s %5d %-3s %-4s . %-4s %s ? %5s %s %10.4f %10.4f %10.4f 0.0 0.0 ? ? ? ? ? . %5s %4s %s %4s %5d"
print(line % (recordName, atomIndex, symbol, atom.name, res.name, chainName, resId, resIC, coords[0], coords[1], coords[2],
resId, res.name, chainName, atom.name, modelIndex), file=file)
posIndex += 1
atomIndex += 1 | [
"def",
"writeModel",
"(",
"topology",
",",
"positions",
",",
"file",
"=",
"sys",
".",
"stdout",
",",
"modelIndex",
"=",
"1",
",",
"keepIds",
"=",
"False",
")",
":",
"if",
"len",
"(",
"list",
"(",
"topology",
".",
"atoms",
"(",
")",
")",
")",
"!=",
"len",
"(",
"positions",
")",
":",
"raise",
"ValueError",
"(",
"'The number of positions must match the number of atoms'",
")",
"if",
"is_quantity",
"(",
"positions",
")",
":",
"positions",
"=",
"positions",
".",
"value_in_unit",
"(",
"angstroms",
")",
"if",
"any",
"(",
"math",
".",
"isnan",
"(",
"norm",
"(",
"pos",
")",
")",
"for",
"pos",
"in",
"positions",
")",
":",
"raise",
"ValueError",
"(",
"'Particle position is NaN'",
")",
"if",
"any",
"(",
"math",
".",
"isinf",
"(",
"norm",
"(",
"pos",
")",
")",
"for",
"pos",
"in",
"positions",
")",
":",
"raise",
"ValueError",
"(",
"'Particle position is infinite'",
")",
"nonHeterogens",
"=",
"PDBFile",
".",
"_standardResidues",
"[",
":",
"]",
"nonHeterogens",
".",
"remove",
"(",
"'HOH'",
")",
"atomIndex",
"=",
"1",
"posIndex",
"=",
"0",
"for",
"(",
"chainIndex",
",",
"chain",
")",
"in",
"enumerate",
"(",
"topology",
".",
"chains",
"(",
")",
")",
":",
"if",
"keepIds",
":",
"chainName",
"=",
"chain",
".",
"id",
"else",
":",
"chainName",
"=",
"chr",
"(",
"ord",
"(",
"'A'",
")",
"+",
"chainIndex",
"%",
"26",
")",
"residues",
"=",
"list",
"(",
"chain",
".",
"residues",
"(",
")",
")",
"for",
"(",
"resIndex",
",",
"res",
")",
"in",
"enumerate",
"(",
"residues",
")",
":",
"if",
"keepIds",
":",
"resId",
"=",
"res",
".",
"id",
"resIC",
"=",
"(",
"res",
".",
"insertionCode",
"if",
"res",
".",
"insertionCode",
".",
"strip",
"(",
")",
"else",
"'.'",
")",
"else",
":",
"resId",
"=",
"resIndex",
"+",
"1",
"resIC",
"=",
"'.'",
"if",
"res",
".",
"name",
"in",
"nonHeterogens",
":",
"recordName",
"=",
"\"ATOM\"",
"else",
":",
"recordName",
"=",
"\"HETATM\"",
"for",
"atom",
"in",
"res",
".",
"atoms",
"(",
")",
":",
"coords",
"=",
"positions",
"[",
"posIndex",
"]",
"if",
"atom",
".",
"element",
"is",
"not",
"None",
":",
"symbol",
"=",
"atom",
".",
"element",
".",
"symbol",
"else",
":",
"symbol",
"=",
"'?'",
"line",
"=",
"\"%s %5d %-3s %-4s . %-4s %s ? %5s %s %10.4f %10.4f %10.4f 0.0 0.0 ? ? ? ? ? . %5s %4s %s %4s %5d\"",
"print",
"(",
"line",
"%",
"(",
"recordName",
",",
"atomIndex",
",",
"symbol",
",",
"atom",
".",
"name",
",",
"res",
".",
"name",
",",
"chainName",
",",
"resId",
",",
"resIC",
",",
"coords",
"[",
"0",
"]",
",",
"coords",
"[",
"1",
"]",
",",
"coords",
"[",
"2",
"]",
",",
"resId",
",",
"res",
".",
"name",
",",
"chainName",
",",
"atom",
".",
"name",
",",
"modelIndex",
")",
",",
"file",
"=",
"file",
")",
"posIndex",
"+=",
"1",
"atomIndex",
"+=",
"1"
] | https://github.com/openmm/openmm/blob/cb293447c4fc8b03976dfe11399f107bab70f3d9/wrappers/python/openmm/app/pdbxfile.py#L383-L441 | ||
jackaudio/jack2 | 21b293dbc37d42446141a08922cdec0d2550c6a0 | autooptions/__init__.py | python | AutoOption.summarize | (self, conf) | This function displays a result summary with the help text and
the result of the configuration. | This function displays a result summary with the help text and
the result of the configuration. | [
"This",
"function",
"displays",
"a",
"result",
"summary",
"with",
"the",
"help",
"text",
"and",
"the",
"result",
"of",
"the",
"configuration",
"."
] | def summarize(self, conf):
"""
This function displays a result summary with the help text and
the result of the configuration.
"""
if self.help:
if self.enable:
conf.msg(self.help, 'yes', color='GREEN')
else:
conf.msg(self.help, 'no', color='YELLOW') | [
"def",
"summarize",
"(",
"self",
",",
"conf",
")",
":",
"if",
"self",
".",
"help",
":",
"if",
"self",
".",
"enable",
":",
"conf",
".",
"msg",
"(",
"self",
".",
"help",
",",
"'yes'",
",",
"color",
"=",
"'GREEN'",
")",
"else",
":",
"conf",
".",
"msg",
"(",
"self",
".",
"help",
",",
"'no'",
",",
"color",
"=",
"'YELLOW'",
")"
] | https://github.com/jackaudio/jack2/blob/21b293dbc37d42446141a08922cdec0d2550c6a0/autooptions/__init__.py#L275-L284 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/cmd.py | python | Cmd.preloop | (self) | Hook method executed once when the cmdloop() method is called. | Hook method executed once when the cmdloop() method is called. | [
"Hook",
"method",
"executed",
"once",
"when",
"the",
"cmdloop",
"()",
"method",
"is",
"called",
"."
] | def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass | [
"def",
"preloop",
"(",
"self",
")",
":",
"pass"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/cmd.py#L161-L163 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/swagger_spec_validator/validator20.py | python | validate_defaults_in_parameters | (params_spec, deref) | Validates that default values for api parameters are
of the parameter type
:param params_spec: list of parameter objects (#/paths/<path>/<http_verb>/parameters)
:param deref: callable that dereferences $refs
:raises: :py:class:`swagger_spec_validator.SwaggerValidationError` | Validates that default values for api parameters are
of the parameter type | [
"Validates",
"that",
"default",
"values",
"for",
"api",
"parameters",
"are",
"of",
"the",
"parameter",
"type"
] | def validate_defaults_in_parameters(params_spec, deref):
"""
Validates that default values for api parameters are
of the parameter type
:param params_spec: list of parameter objects (#/paths/<path>/<http_verb>/parameters)
:param deref: callable that dereferences $refs
:raises: :py:class:`swagger_spec_validator.SwaggerValidationError`
"""
# Note: this functions is preserved to avoid public signature updates (it's not used internally)
for param_spec in params_spec:
validate_default_in_parameter(param_spec, deref) | [
"def",
"validate_defaults_in_parameters",
"(",
"params_spec",
",",
"deref",
")",
":",
"# Note: this functions is preserved to avoid public signature updates (it's not used internally)",
"for",
"param_spec",
"in",
"params_spec",
":",
"validate_default_in_parameter",
"(",
"param_spec",
",",
"deref",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/swagger_spec_validator/validator20.py#L250-L262 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_core.py | python | StdDialogButtonSizer.SetNegativeButton | (*args, **kwargs) | return _core_.StdDialogButtonSizer_SetNegativeButton(*args, **kwargs) | SetNegativeButton(self, wxButton button) | SetNegativeButton(self, wxButton button) | [
"SetNegativeButton",
"(",
"self",
"wxButton",
"button",
")"
] | def SetNegativeButton(*args, **kwargs):
"""SetNegativeButton(self, wxButton button)"""
return _core_.StdDialogButtonSizer_SetNegativeButton(*args, **kwargs) | [
"def",
"SetNegativeButton",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"StdDialogButtonSizer_SetNegativeButton",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L15516-L15518 | |
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/python/ops/session_ops.py | python | TensorHandle.eval | (self) | return self._session.run(reader, feed_dict={holder: self._handle}) | Return the value of the tensor represented by this handle. | Return the value of the tensor represented by this handle. | [
"Return",
"the",
"value",
"of",
"the",
"tensor",
"represented",
"by",
"this",
"handle",
"."
] | def eval(self):
"""Return the value of the tensor represented by this handle."""
if not self._auto_gc_enabled:
raise TypeError("Persistent tensor %s may have already been deleted."
% self.handle)
holder, reader = _get_handle_reader(self._session.graph, self._handle,
self._dtype)
return self._session.run(reader, feed_dict={holder: self._handle}) | [
"def",
"eval",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_auto_gc_enabled",
":",
"raise",
"TypeError",
"(",
"\"Persistent tensor %s may have already been deleted.\"",
"%",
"self",
".",
"handle",
")",
"holder",
",",
"reader",
"=",
"_get_handle_reader",
"(",
"self",
".",
"_session",
".",
"graph",
",",
"self",
".",
"_handle",
",",
"self",
".",
"_dtype",
")",
"return",
"self",
".",
"_session",
".",
"run",
"(",
"reader",
",",
"feed_dict",
"=",
"{",
"holder",
":",
"self",
".",
"_handle",
"}",
")"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/ops/session_ops.py#L69-L76 | |
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/layers/convolutional.py | python | separable_conv2d | (inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None) | return layer.apply(inputs) | Functional interface for the depthwise separable 2D convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled. | Functional interface for the depthwise separable 2D convolution layer. | [
"Functional",
"interface",
"for",
"the",
"depthwise",
"separable",
"2D",
"convolution",
"layer",
"."
] | def separable_conv2d(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the depthwise separable 2D convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
if context.in_eager_mode():
raise ValueError(
'Functional layers are currently not compatible with eager execution.'
'Use tf.layers.SeparableConv2D instead.')
layer = SeparableConv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs) | [
"def",
"separable_conv2d",
"(",
"inputs",
",",
"filters",
",",
"kernel_size",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"padding",
"=",
"'valid'",
",",
"data_format",
"=",
"'channels_last'",
",",
"dilation_rate",
"=",
"(",
"1",
",",
"1",
")",
",",
"depth_multiplier",
"=",
"1",
",",
"activation",
"=",
"None",
",",
"use_bias",
"=",
"True",
",",
"depthwise_initializer",
"=",
"None",
",",
"pointwise_initializer",
"=",
"None",
",",
"bias_initializer",
"=",
"init_ops",
".",
"zeros_initializer",
"(",
")",
",",
"depthwise_regularizer",
"=",
"None",
",",
"pointwise_regularizer",
"=",
"None",
",",
"bias_regularizer",
"=",
"None",
",",
"activity_regularizer",
"=",
"None",
",",
"depthwise_constraint",
"=",
"None",
",",
"pointwise_constraint",
"=",
"None",
",",
"bias_constraint",
"=",
"None",
",",
"trainable",
"=",
"True",
",",
"name",
"=",
"None",
",",
"reuse",
"=",
"None",
")",
":",
"if",
"context",
".",
"in_eager_mode",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Functional layers are currently not compatible with eager execution.'",
"'Use tf.layers.SeparableConv2D instead.'",
")",
"layer",
"=",
"SeparableConv2D",
"(",
"filters",
"=",
"filters",
",",
"kernel_size",
"=",
"kernel_size",
",",
"strides",
"=",
"strides",
",",
"padding",
"=",
"padding",
",",
"data_format",
"=",
"data_format",
",",
"dilation_rate",
"=",
"dilation_rate",
",",
"depth_multiplier",
"=",
"depth_multiplier",
",",
"activation",
"=",
"activation",
",",
"use_bias",
"=",
"use_bias",
",",
"depthwise_initializer",
"=",
"depthwise_initializer",
",",
"pointwise_initializer",
"=",
"pointwise_initializer",
",",
"bias_initializer",
"=",
"bias_initializer",
",",
"depthwise_regularizer",
"=",
"depthwise_regularizer",
",",
"pointwise_regularizer",
"=",
"pointwise_regularizer",
",",
"bias_regularizer",
"=",
"bias_regularizer",
",",
"activity_regularizer",
"=",
"activity_regularizer",
",",
"depthwise_constraint",
"=",
"depthwise_constraint",
",",
"pointwise_constraint",
"=",
"pointwise_constraint",
",",
"bias_constraint",
"=",
"bias_constraint",
",",
"trainable",
"=",
"trainable",
",",
"name",
"=",
"name",
",",
"_reuse",
"=",
"reuse",
",",
"_scope",
"=",
"name",
")",
"return",
"layer",
".",
"apply",
"(",
"inputs",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/layers/convolutional.py#L1038-L1160 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/tools/compatibility/tf_upgrade_v2.py | python | _contrib_layers_l1_regularizer_transformer | (
parent, node, full_name, name, logs) | return node | Replace slim l1 regularizer with Keras one.
This entails renaming the 'scale' arg to 'l' and dropping any
provided scope arg. | Replace slim l1 regularizer with Keras one. | [
"Replace",
"slim",
"l1",
"regularizer",
"with",
"Keras",
"one",
"."
] | def _contrib_layers_l1_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l1 regularizer with Keras one.
This entails renaming the 'scale' arg to 'l' and dropping any
provided scope arg.
"""
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renaming scale arg of regularizer\n"))
keyword.arg = "l"
if keyword.arg == "scope":
scope_keyword = keyword
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l1"
return node | [
"def",
"_contrib_layers_l1_regularizer_transformer",
"(",
"parent",
",",
"node",
",",
"full_name",
",",
"name",
",",
"logs",
")",
":",
"# Check if we have a scale or scope keyword arg",
"scope_keyword",
"=",
"None",
"for",
"keyword",
"in",
"node",
".",
"keywords",
":",
"if",
"keyword",
".",
"arg",
"==",
"\"scale\"",
":",
"logs",
".",
"append",
"(",
"(",
"ast_edits",
".",
"INFO",
",",
"node",
".",
"lineno",
",",
"node",
".",
"col_offset",
",",
"\"Renaming scale arg of regularizer\\n\"",
")",
")",
"keyword",
".",
"arg",
"=",
"\"l\"",
"if",
"keyword",
".",
"arg",
"==",
"\"scope\"",
":",
"scope_keyword",
"=",
"keyword",
"# Remove the scope keyword or arg if it is present",
"if",
"scope_keyword",
":",
"logs",
".",
"append",
"(",
"(",
"ast_edits",
".",
"INFO",
",",
"node",
".",
"lineno",
",",
"node",
".",
"col_offset",
",",
"\"Dropping scope arg from tf.contrib.layers.l1_regularizer,\"",
"\" because it is unsupported in tf.keras.regularizers.l1\\n\"",
")",
")",
"node",
".",
"keywords",
".",
"remove",
"(",
"scope_keyword",
")",
"if",
"len",
"(",
"node",
".",
"args",
")",
">",
"1",
":",
"node",
".",
"args",
"=",
"node",
".",
"args",
"[",
":",
"1",
"]",
"logs",
".",
"append",
"(",
"(",
"ast_edits",
".",
"INFO",
",",
"node",
".",
"lineno",
",",
"node",
".",
"col_offset",
",",
"\"Dropping scope arg from tf.contrib.layers.l1_regularizer,\"",
"\" because it is unsupported in tf.keras.regularizers.l1\\n\"",
")",
")",
"lineno",
"=",
"node",
".",
"func",
".",
"value",
".",
"lineno",
"col_offset",
"=",
"node",
".",
"func",
".",
"value",
".",
"col_offset",
"node",
".",
"func",
".",
"value",
"=",
"ast_edits",
".",
"full_name_node",
"(",
"\"tf.keras.regularizers\"",
")",
"node",
".",
"func",
".",
"value",
".",
"lineno",
"=",
"lineno",
"node",
".",
"func",
".",
"value",
".",
"col_offset",
"=",
"col_offset",
"node",
".",
"func",
".",
"attr",
"=",
"\"l1\"",
"return",
"node"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/tools/compatibility/tf_upgrade_v2.py#L2325-L2361 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/setuptools/py3/pkg_resources/__init__.py | python | IMetadataProvider.get_metadata | (name) | The named metadata resource as a string | The named metadata resource as a string | [
"The",
"named",
"metadata",
"resource",
"as",
"a",
"string"
] | def get_metadata(name):
"""The named metadata resource as a string""" | [
"def",
"get_metadata",
"(",
"name",
")",
":"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py3/pkg_resources/__init__.py#L496-L497 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_misc.py | python | Joystick.GetManufacturerId | (*args, **kwargs) | return _misc_.Joystick_GetManufacturerId(*args, **kwargs) | GetManufacturerId(self) -> int | GetManufacturerId(self) -> int | [
"GetManufacturerId",
"(",
"self",
")",
"-",
">",
"int"
] | def GetManufacturerId(*args, **kwargs):
"""GetManufacturerId(self) -> int"""
return _misc_.Joystick_GetManufacturerId(*args, **kwargs) | [
"def",
"GetManufacturerId",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"Joystick_GetManufacturerId",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L2174-L2176 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py2/scipy/linalg/matfuncs.py | python | cosm | (A) | Compute the matrix cosine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array
Returns
-------
cosm : (N, N) ndarray
Matrix cosine of A
Examples
--------
>>> from scipy.linalg import expm, sinm, cosm
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) | Compute the matrix cosine. | [
"Compute",
"the",
"matrix",
"cosine",
"."
] | def cosm(A):
"""
Compute the matrix cosine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array
Returns
-------
cosm : (N, N) ndarray
Matrix cosine of A
Examples
--------
>>> from scipy.linalg import expm, sinm, cosm
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
"""
A = _asarray_square(A)
if np.iscomplexobj(A):
return 0.5*(expm(1j*A) + expm(-1j*A))
else:
return expm(1j*A).real | [
"def",
"cosm",
"(",
"A",
")",
":",
"A",
"=",
"_asarray_square",
"(",
"A",
")",
"if",
"np",
".",
"iscomplexobj",
"(",
"A",
")",
":",
"return",
"0.5",
"*",
"(",
"expm",
"(",
"1j",
"*",
"A",
")",
"+",
"expm",
"(",
"-",
"1j",
"*",
"A",
")",
")",
"else",
":",
"return",
"expm",
"(",
"1j",
"*",
"A",
")",
".",
"real"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/linalg/matfuncs.py#L259-L295 | ||
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/contrib/factorization/python/ops/gmm.py | python | GMM.score | (self, x, batch_size=None) | return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES]) | Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score. | Predict total sum of distances to nearest clusters. | [
"Predict",
"total",
"sum",
"of",
"distances",
"to",
"nearest",
"clusters",
"."
] | def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score.
"""
return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES]) | [
"def",
"score",
"(",
"self",
",",
"x",
",",
"batch_size",
"=",
"None",
")",
":",
"return",
"np",
".",
"sum",
"(",
"self",
".",
"evaluate",
"(",
"x",
"=",
"x",
",",
"batch_size",
"=",
"batch_size",
")",
"[",
"GMM",
".",
"SCORES",
"]",
")"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/contrib/factorization/python/ops/gmm.py#L129-L139 | |
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/python/ops/rnn.py | python | _infer_state_dtype | (explicit_dtype, state) | Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty. | Infer the dtype of an RNN state. | [
"Infer",
"the",
"dtype",
"of",
"an",
"RNN",
"state",
"."
] | def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype | [
"def",
"_infer_state_dtype",
"(",
"explicit_dtype",
",",
"state",
")",
":",
"if",
"explicit_dtype",
"is",
"not",
"None",
":",
"return",
"explicit_dtype",
"elif",
"nest",
".",
"is_sequence",
"(",
"state",
")",
":",
"inferred_dtypes",
"=",
"[",
"element",
".",
"dtype",
"for",
"element",
"in",
"nest",
".",
"flatten",
"(",
"state",
")",
"]",
"if",
"not",
"inferred_dtypes",
":",
"raise",
"ValueError",
"(",
"\"Unable to infer dtype from empty state.\"",
")",
"all_same",
"=",
"all",
"(",
"[",
"x",
"==",
"inferred_dtypes",
"[",
"0",
"]",
"for",
"x",
"in",
"inferred_dtypes",
"]",
")",
"if",
"not",
"all_same",
":",
"raise",
"ValueError",
"(",
"\"State has tensors of different inferred_dtypes. Unable to infer a \"",
"\"single representative dtype.\"",
")",
"return",
"inferred_dtypes",
"[",
"0",
"]",
"else",
":",
"return",
"state",
".",
"dtype"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/ops/rnn.py#L43-L70 | ||
facebook/pyre2 | 053612cd79dab923444454d0035835422e99a632 | re2.py | python | fullmatch | (pattern, string) | return _compile(pattern, error).fullmatch(string) | Try to apply the pattern to the entire string, returning
a match object, or None if no match was found. | Try to apply the pattern to the entire string, returning
a match object, or None if no match was found. | [
"Try",
"to",
"apply",
"the",
"pattern",
"to",
"the",
"entire",
"string",
"returning",
"a",
"match",
"object",
"or",
"None",
"if",
"no",
"match",
"was",
"found",
"."
] | def fullmatch(pattern, string):
"""Try to apply the pattern to the entire string, returning
a match object, or None if no match was found."""
return _compile(pattern, error).fullmatch(string) | [
"def",
"fullmatch",
"(",
"pattern",
",",
"string",
")",
":",
"return",
"_compile",
"(",
"pattern",
",",
"error",
")",
".",
"fullmatch",
"(",
"string",
")"
] | https://github.com/facebook/pyre2/blob/053612cd79dab923444454d0035835422e99a632/re2.py#L68-L71 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/distutils/ccompiler.py | python | CCompiler._fix_compile_args | (self, output_dir, macros, include_dirs) | return output_dir, macros, include_dirs | Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None. | Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None. | [
"Typecheck",
"and",
"fix",
"-",
"up",
"some",
"of",
"the",
"arguments",
"to",
"the",
"compile",
"()",
"method",
"and",
"return",
"fixed",
"-",
"up",
"values",
".",
"Specifically",
":",
"if",
"output_dir",
"is",
"None",
"replaces",
"it",
"with",
"self",
".",
"output_dir",
";",
"ensures",
"that",
"macros",
"is",
"a",
"list",
"and",
"augments",
"it",
"with",
"self",
".",
"macros",
";",
"ensures",
"that",
"include_dirs",
"is",
"a",
"list",
"and",
"augments",
"it",
"with",
"self",
".",
"include_dirs",
".",
"Guarantees",
"that",
"the",
"returned",
"values",
"are",
"of",
"the",
"correct",
"type",
"i",
".",
"e",
".",
"for",
"output_dir",
"either",
"string",
"or",
"None",
"and",
"for",
"macros",
"and",
"include_dirs",
"either",
"list",
"or",
"None",
"."
] | def _fix_compile_args(self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if include_dirs is None:
include_dirs = self.include_dirs
elif isinstance(include_dirs, (list, tuple)):
include_dirs = list (include_dirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
return output_dir, macros, include_dirs | [
"def",
"_fix_compile_args",
"(",
"self",
",",
"output_dir",
",",
"macros",
",",
"include_dirs",
")",
":",
"if",
"output_dir",
"is",
"None",
":",
"output_dir",
"=",
"self",
".",
"output_dir",
"elif",
"not",
"isinstance",
"(",
"output_dir",
",",
"str",
")",
":",
"raise",
"TypeError",
",",
"\"'output_dir' must be a string or None\"",
"if",
"macros",
"is",
"None",
":",
"macros",
"=",
"self",
".",
"macros",
"elif",
"isinstance",
"(",
"macros",
",",
"list",
")",
":",
"macros",
"=",
"macros",
"+",
"(",
"self",
".",
"macros",
"or",
"[",
"]",
")",
"else",
":",
"raise",
"TypeError",
",",
"\"'macros' (if supplied) must be a list of tuples\"",
"if",
"include_dirs",
"is",
"None",
":",
"include_dirs",
"=",
"self",
".",
"include_dirs",
"elif",
"isinstance",
"(",
"include_dirs",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"include_dirs",
"=",
"list",
"(",
"include_dirs",
")",
"+",
"(",
"self",
".",
"include_dirs",
"or",
"[",
"]",
")",
"else",
":",
"raise",
"TypeError",
",",
"\"'include_dirs' (if supplied) must be a list of strings\"",
"return",
"output_dir",
",",
"macros",
",",
"include_dirs"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/distutils/ccompiler.py#L376-L406 | |
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/python2_version/klampt/model/coordinates.py | python | Point.localCoordinates | (self) | return self._localCoordinates[:] | Returns the coordinates of this point in its parent Frame | Returns the coordinates of this point in its parent Frame | [
"Returns",
"the",
"coordinates",
"of",
"this",
"point",
"in",
"its",
"parent",
"Frame"
] | def localCoordinates(self):
"""Returns the coordinates of this point in its parent Frame"""
return self._localCoordinates[:] | [
"def",
"localCoordinates",
"(",
"self",
")",
":",
"return",
"self",
".",
"_localCoordinates",
"[",
":",
"]"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/model/coordinates.py#L137-L139 | |
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/fluid/incubate/fleet/utils/fleet_util.py | python | FleetUtil.get_global_metrics | (self,
scope=fluid.global_scope(),
stat_pos_name="_generated_var_2",
stat_neg_name="_generated_var_3",
sqrerr_name="sqrerr",
abserr_name="abserr",
prob_name="prob",
q_name="q",
pos_ins_num_name="pos",
total_ins_num_name="total") | return [
auc, bucket_error, mae, rmse, return_actual_ctr, predicted_ctr,
copc, mean_predict_qvalue, int(total_ins_num)
] | r"""
get global metrics, including auc, bucket_error, mae, rmse,
actual_ctr, predicted_ctr, copc, mean_predict_qvalue, total_ins_num.
Args:
scope(Scope): Scope object, default is fluid.global_scope()
stat_pos_name(str): name of auc pos bucket Variable
stat_neg_name(str): name of auc neg bucket Variable
sqrerr_name(str): name of sqrerr Variable
abserr_name(str): name of abserr Variable
prob_name(str): name of prob Variable
q_name(str): name of q Variable
pos_ins_num_name(str): name of pos ins num Variable
total_ins_num_name(str): name of total ins num Variable
Returns:
[auc, bucket_error, mae, rmse, actual_ctr, predicted_ctr, copc,
mean_predict_qvalue, total_ins_num]
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
metric_list = fleet_util.get_global_metrics(myscope,
stat_pos.name,
stat_neg.name,
local_sqrerr.name,
local_abserr.name,
local_prob.name,
local_q.name,
local_pos_ins.name,
local_total_ins.name)
# below is part of example model
label = fluid.layers.data(name="click", shape=[-1, 1],\
dtype="int64", lod_level=0, append_batch_size=False)
emb = my_slot_net(slots, label) # emb can be fc layer of size 1
similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(\
emb, min=-15.0, max=15.0), name="similarity_norm")\
binary_predict = fluid.layers.concat(input=[\
fluid.layers.elementwise_sub(\
fluid.layers.ceil(similarity_norm), similarity_norm),\
similarity_norm], axis=1)
auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \
stat_neg] = fluid.layers.auc(input=binary_predict,\
label=label, curve='ROC',\
num_thresholds=4096)
local_sqrerr, local_abserr, local_prob, local_q, local_pos_ins,\
local_total_ins = fluid.contrib.layers.ctr_metric_bundle(\
similarity_norm, label) | r"""
get global metrics, including auc, bucket_error, mae, rmse,
actual_ctr, predicted_ctr, copc, mean_predict_qvalue, total_ins_num. | [
"r",
"get",
"global",
"metrics",
"including",
"auc",
"bucket_error",
"mae",
"rmse",
"actual_ctr",
"predicted_ctr",
"copc",
"mean_predict_qvalue",
"total_ins_num",
"."
] | def get_global_metrics(self,
scope=fluid.global_scope(),
stat_pos_name="_generated_var_2",
stat_neg_name="_generated_var_3",
sqrerr_name="sqrerr",
abserr_name="abserr",
prob_name="prob",
q_name="q",
pos_ins_num_name="pos",
total_ins_num_name="total"):
r"""
get global metrics, including auc, bucket_error, mae, rmse,
actual_ctr, predicted_ctr, copc, mean_predict_qvalue, total_ins_num.
Args:
scope(Scope): Scope object, default is fluid.global_scope()
stat_pos_name(str): name of auc pos bucket Variable
stat_neg_name(str): name of auc neg bucket Variable
sqrerr_name(str): name of sqrerr Variable
abserr_name(str): name of abserr Variable
prob_name(str): name of prob Variable
q_name(str): name of q Variable
pos_ins_num_name(str): name of pos ins num Variable
total_ins_num_name(str): name of total ins num Variable
Returns:
[auc, bucket_error, mae, rmse, actual_ctr, predicted_ctr, copc,
mean_predict_qvalue, total_ins_num]
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
metric_list = fleet_util.get_global_metrics(myscope,
stat_pos.name,
stat_neg.name,
local_sqrerr.name,
local_abserr.name,
local_prob.name,
local_q.name,
local_pos_ins.name,
local_total_ins.name)
# below is part of example model
label = fluid.layers.data(name="click", shape=[-1, 1],\
dtype="int64", lod_level=0, append_batch_size=False)
emb = my_slot_net(slots, label) # emb can be fc layer of size 1
similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(\
emb, min=-15.0, max=15.0), name="similarity_norm")\
binary_predict = fluid.layers.concat(input=[\
fluid.layers.elementwise_sub(\
fluid.layers.ceil(similarity_norm), similarity_norm),\
similarity_norm], axis=1)
auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \
stat_neg] = fluid.layers.auc(input=binary_predict,\
label=label, curve='ROC',\
num_thresholds=4096)
local_sqrerr, local_abserr, local_prob, local_q, local_pos_ins,\
local_total_ins = fluid.contrib.layers.ctr_metric_bundle(\
similarity_norm, label)
"""
if scope.find_var(stat_pos_name) is None or \
scope.find_var(stat_neg_name) is None:
self.rank0_print("not found auc bucket")
return [None] * 9
elif scope.find_var(sqrerr_name) is None:
self.rank0_print("not found sqrerr_name=%s" % sqrerr_name)
return [None] * 9
elif scope.find_var(abserr_name) is None:
self.rank0_print("not found abserr_name=%s" % abserr_name)
return [None] * 9
elif scope.find_var(prob_name) is None:
self.rank0_print("not found prob_name=%s" % prob_name)
return [None] * 9
elif scope.find_var(q_name) is None:
self.rank0_print("not found q_name=%s" % q_name)
return [None] * 9
elif scope.find_var(pos_ins_num_name) is None:
self.rank0_print("not found pos_ins_num_name=%s" % pos_ins_num_name)
return [None] * 9
elif scope.find_var(total_ins_num_name) is None:
self.rank0_print("not found total_ins_num_name=%s" % \
total_ins_num_name)
return [None] * 9
# barrier worker to ensure all workers finished training
fleet._role_maker._barrier_worker()
# get auc
auc = self.get_global_auc(scope, stat_pos_name, stat_neg_name)
pos = np.array(scope.find_var(stat_pos_name).get_tensor())
# auc pos bucket shape
old_pos_shape = np.array(pos.shape)
# reshape to one dim
pos = pos.reshape(-1)
global_pos = np.copy(pos) * 0
# mpi allreduce
fleet._role_maker._all_reduce(pos, global_pos)
# reshape to its original shape
global_pos = global_pos.reshape(old_pos_shape)
# auc neg bucket
neg = np.array(scope.find_var(stat_neg_name).get_tensor())
old_neg_shape = np.array(neg.shape)
neg = neg.reshape(-1)
global_neg = np.copy(neg) * 0
fleet._role_maker._all_reduce(neg, global_neg)
global_neg = global_neg.reshape(old_neg_shape)
num_bucket = len(global_pos[0])
def get_metric(name):
metric = np.array(scope.find_var(name).get_tensor())
old_metric_shape = np.array(metric.shape)
metric = metric.reshape(-1)
global_metric = np.copy(metric) * 0
fleet._role_maker._all_reduce(metric, global_metric)
global_metric = global_metric.reshape(old_metric_shape)
return global_metric[0]
global_sqrerr = get_metric(sqrerr_name)
global_abserr = get_metric(abserr_name)
global_prob = get_metric(prob_name)
global_q_value = get_metric(q_name)
# note: get ins_num from auc bucket is not actual value,
# so get it from metric op
pos_ins_num = get_metric(pos_ins_num_name)
total_ins_num = get_metric(total_ins_num_name)
neg_ins_num = total_ins_num - pos_ins_num
mae = global_abserr / total_ins_num
rmse = math.sqrt(global_sqrerr / total_ins_num)
return_actual_ctr = pos_ins_num / total_ins_num
predicted_ctr = global_prob / total_ins_num
mean_predict_qvalue = global_q_value / total_ins_num
copc = 0.0
if abs(predicted_ctr > 1e-6):
copc = return_actual_ctr / predicted_ctr
# calculate bucket error
last_ctr = -1.0
impression_sum = 0.0
ctr_sum = 0.0
click_sum = 0.0
error_sum = 0.0
error_count = 0.0
click = 0.0
show = 0.0
ctr = 0.0
adjust_ctr = 0.0
relative_error = 0.0
actual_ctr = 0.0
relative_ctr_error = 0.0
k_max_span = 0.01
k_relative_error_bound = 0.05
for i in range(num_bucket):
click = global_pos[0][i]
show = global_pos[0][i] + global_neg[0][i]
ctr = float(i) / num_bucket
if abs(ctr - last_ctr) > k_max_span:
last_ctr = ctr
impression_sum = 0.0
ctr_sum = 0.0
click_sum = 0.0
impression_sum += show
ctr_sum += ctr * show
click_sum += click
if impression_sum == 0:
continue
adjust_ctr = ctr_sum / impression_sum
if adjust_ctr == 0:
continue
relative_error = \
math.sqrt((1 - adjust_ctr) / (adjust_ctr * impression_sum))
if relative_error < k_relative_error_bound:
actual_ctr = click_sum / impression_sum
relative_ctr_error = abs(actual_ctr / adjust_ctr - 1)
error_sum += relative_ctr_error * impression_sum
error_count += impression_sum
last_ctr = -1
bucket_error = error_sum / error_count if error_count > 0 else 0.0
return [
auc, bucket_error, mae, rmse, return_actual_ctr, predicted_ctr,
copc, mean_predict_qvalue, int(total_ins_num)
] | [
"def",
"get_global_metrics",
"(",
"self",
",",
"scope",
"=",
"fluid",
".",
"global_scope",
"(",
")",
",",
"stat_pos_name",
"=",
"\"_generated_var_2\"",
",",
"stat_neg_name",
"=",
"\"_generated_var_3\"",
",",
"sqrerr_name",
"=",
"\"sqrerr\"",
",",
"abserr_name",
"=",
"\"abserr\"",
",",
"prob_name",
"=",
"\"prob\"",
",",
"q_name",
"=",
"\"q\"",
",",
"pos_ins_num_name",
"=",
"\"pos\"",
",",
"total_ins_num_name",
"=",
"\"total\"",
")",
":",
"if",
"scope",
".",
"find_var",
"(",
"stat_pos_name",
")",
"is",
"None",
"or",
"scope",
".",
"find_var",
"(",
"stat_neg_name",
")",
"is",
"None",
":",
"self",
".",
"rank0_print",
"(",
"\"not found auc bucket\"",
")",
"return",
"[",
"None",
"]",
"*",
"9",
"elif",
"scope",
".",
"find_var",
"(",
"sqrerr_name",
")",
"is",
"None",
":",
"self",
".",
"rank0_print",
"(",
"\"not found sqrerr_name=%s\"",
"%",
"sqrerr_name",
")",
"return",
"[",
"None",
"]",
"*",
"9",
"elif",
"scope",
".",
"find_var",
"(",
"abserr_name",
")",
"is",
"None",
":",
"self",
".",
"rank0_print",
"(",
"\"not found abserr_name=%s\"",
"%",
"abserr_name",
")",
"return",
"[",
"None",
"]",
"*",
"9",
"elif",
"scope",
".",
"find_var",
"(",
"prob_name",
")",
"is",
"None",
":",
"self",
".",
"rank0_print",
"(",
"\"not found prob_name=%s\"",
"%",
"prob_name",
")",
"return",
"[",
"None",
"]",
"*",
"9",
"elif",
"scope",
".",
"find_var",
"(",
"q_name",
")",
"is",
"None",
":",
"self",
".",
"rank0_print",
"(",
"\"not found q_name=%s\"",
"%",
"q_name",
")",
"return",
"[",
"None",
"]",
"*",
"9",
"elif",
"scope",
".",
"find_var",
"(",
"pos_ins_num_name",
")",
"is",
"None",
":",
"self",
".",
"rank0_print",
"(",
"\"not found pos_ins_num_name=%s\"",
"%",
"pos_ins_num_name",
")",
"return",
"[",
"None",
"]",
"*",
"9",
"elif",
"scope",
".",
"find_var",
"(",
"total_ins_num_name",
")",
"is",
"None",
":",
"self",
".",
"rank0_print",
"(",
"\"not found total_ins_num_name=%s\"",
"%",
"total_ins_num_name",
")",
"return",
"[",
"None",
"]",
"*",
"9",
"# barrier worker to ensure all workers finished training",
"fleet",
".",
"_role_maker",
".",
"_barrier_worker",
"(",
")",
"# get auc",
"auc",
"=",
"self",
".",
"get_global_auc",
"(",
"scope",
",",
"stat_pos_name",
",",
"stat_neg_name",
")",
"pos",
"=",
"np",
".",
"array",
"(",
"scope",
".",
"find_var",
"(",
"stat_pos_name",
")",
".",
"get_tensor",
"(",
")",
")",
"# auc pos bucket shape",
"old_pos_shape",
"=",
"np",
".",
"array",
"(",
"pos",
".",
"shape",
")",
"# reshape to one dim",
"pos",
"=",
"pos",
".",
"reshape",
"(",
"-",
"1",
")",
"global_pos",
"=",
"np",
".",
"copy",
"(",
"pos",
")",
"*",
"0",
"# mpi allreduce",
"fleet",
".",
"_role_maker",
".",
"_all_reduce",
"(",
"pos",
",",
"global_pos",
")",
"# reshape to its original shape",
"global_pos",
"=",
"global_pos",
".",
"reshape",
"(",
"old_pos_shape",
")",
"# auc neg bucket",
"neg",
"=",
"np",
".",
"array",
"(",
"scope",
".",
"find_var",
"(",
"stat_neg_name",
")",
".",
"get_tensor",
"(",
")",
")",
"old_neg_shape",
"=",
"np",
".",
"array",
"(",
"neg",
".",
"shape",
")",
"neg",
"=",
"neg",
".",
"reshape",
"(",
"-",
"1",
")",
"global_neg",
"=",
"np",
".",
"copy",
"(",
"neg",
")",
"*",
"0",
"fleet",
".",
"_role_maker",
".",
"_all_reduce",
"(",
"neg",
",",
"global_neg",
")",
"global_neg",
"=",
"global_neg",
".",
"reshape",
"(",
"old_neg_shape",
")",
"num_bucket",
"=",
"len",
"(",
"global_pos",
"[",
"0",
"]",
")",
"def",
"get_metric",
"(",
"name",
")",
":",
"metric",
"=",
"np",
".",
"array",
"(",
"scope",
".",
"find_var",
"(",
"name",
")",
".",
"get_tensor",
"(",
")",
")",
"old_metric_shape",
"=",
"np",
".",
"array",
"(",
"metric",
".",
"shape",
")",
"metric",
"=",
"metric",
".",
"reshape",
"(",
"-",
"1",
")",
"global_metric",
"=",
"np",
".",
"copy",
"(",
"metric",
")",
"*",
"0",
"fleet",
".",
"_role_maker",
".",
"_all_reduce",
"(",
"metric",
",",
"global_metric",
")",
"global_metric",
"=",
"global_metric",
".",
"reshape",
"(",
"old_metric_shape",
")",
"return",
"global_metric",
"[",
"0",
"]",
"global_sqrerr",
"=",
"get_metric",
"(",
"sqrerr_name",
")",
"global_abserr",
"=",
"get_metric",
"(",
"abserr_name",
")",
"global_prob",
"=",
"get_metric",
"(",
"prob_name",
")",
"global_q_value",
"=",
"get_metric",
"(",
"q_name",
")",
"# note: get ins_num from auc bucket is not actual value,",
"# so get it from metric op",
"pos_ins_num",
"=",
"get_metric",
"(",
"pos_ins_num_name",
")",
"total_ins_num",
"=",
"get_metric",
"(",
"total_ins_num_name",
")",
"neg_ins_num",
"=",
"total_ins_num",
"-",
"pos_ins_num",
"mae",
"=",
"global_abserr",
"/",
"total_ins_num",
"rmse",
"=",
"math",
".",
"sqrt",
"(",
"global_sqrerr",
"/",
"total_ins_num",
")",
"return_actual_ctr",
"=",
"pos_ins_num",
"/",
"total_ins_num",
"predicted_ctr",
"=",
"global_prob",
"/",
"total_ins_num",
"mean_predict_qvalue",
"=",
"global_q_value",
"/",
"total_ins_num",
"copc",
"=",
"0.0",
"if",
"abs",
"(",
"predicted_ctr",
">",
"1e-6",
")",
":",
"copc",
"=",
"return_actual_ctr",
"/",
"predicted_ctr",
"# calculate bucket error",
"last_ctr",
"=",
"-",
"1.0",
"impression_sum",
"=",
"0.0",
"ctr_sum",
"=",
"0.0",
"click_sum",
"=",
"0.0",
"error_sum",
"=",
"0.0",
"error_count",
"=",
"0.0",
"click",
"=",
"0.0",
"show",
"=",
"0.0",
"ctr",
"=",
"0.0",
"adjust_ctr",
"=",
"0.0",
"relative_error",
"=",
"0.0",
"actual_ctr",
"=",
"0.0",
"relative_ctr_error",
"=",
"0.0",
"k_max_span",
"=",
"0.01",
"k_relative_error_bound",
"=",
"0.05",
"for",
"i",
"in",
"range",
"(",
"num_bucket",
")",
":",
"click",
"=",
"global_pos",
"[",
"0",
"]",
"[",
"i",
"]",
"show",
"=",
"global_pos",
"[",
"0",
"]",
"[",
"i",
"]",
"+",
"global_neg",
"[",
"0",
"]",
"[",
"i",
"]",
"ctr",
"=",
"float",
"(",
"i",
")",
"/",
"num_bucket",
"if",
"abs",
"(",
"ctr",
"-",
"last_ctr",
")",
">",
"k_max_span",
":",
"last_ctr",
"=",
"ctr",
"impression_sum",
"=",
"0.0",
"ctr_sum",
"=",
"0.0",
"click_sum",
"=",
"0.0",
"impression_sum",
"+=",
"show",
"ctr_sum",
"+=",
"ctr",
"*",
"show",
"click_sum",
"+=",
"click",
"if",
"impression_sum",
"==",
"0",
":",
"continue",
"adjust_ctr",
"=",
"ctr_sum",
"/",
"impression_sum",
"if",
"adjust_ctr",
"==",
"0",
":",
"continue",
"relative_error",
"=",
"math",
".",
"sqrt",
"(",
"(",
"1",
"-",
"adjust_ctr",
")",
"/",
"(",
"adjust_ctr",
"*",
"impression_sum",
")",
")",
"if",
"relative_error",
"<",
"k_relative_error_bound",
":",
"actual_ctr",
"=",
"click_sum",
"/",
"impression_sum",
"relative_ctr_error",
"=",
"abs",
"(",
"actual_ctr",
"/",
"adjust_ctr",
"-",
"1",
")",
"error_sum",
"+=",
"relative_ctr_error",
"*",
"impression_sum",
"error_count",
"+=",
"impression_sum",
"last_ctr",
"=",
"-",
"1",
"bucket_error",
"=",
"error_sum",
"/",
"error_count",
"if",
"error_count",
">",
"0",
"else",
"0.0",
"return",
"[",
"auc",
",",
"bucket_error",
",",
"mae",
",",
"rmse",
",",
"return_actual_ctr",
",",
"predicted_ctr",
",",
"copc",
",",
"mean_predict_qvalue",
",",
"int",
"(",
"total_ins_num",
")",
"]"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/incubate/fleet/utils/fleet_util.py#L1248-L1435 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/fftpack/pseudo_diffs.py | python | sc_diff | (x, a, b, period=None, _cache=_cache) | return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) | Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
Input array.
a,b : float
Defines the parameters of the sinh/cosh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is 2*pi.
Notes
-----
``sc_diff(cs_diff(x,a,b),b,a) == x``
For even ``len(x)``, the Nyquist mode of x is taken as zero. | Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x. | [
"Return",
"(",
"a",
"b",
")",
"-",
"sinh",
"/",
"cosh",
"pseudo",
"-",
"derivative",
"of",
"a",
"periodic",
"sequence",
"x",
"."
] | def sc_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
Input array.
a,b : float
Defines the parameters of the sinh/cosh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is 2*pi.
Notes
-----
``sc_diff(cs_diff(x,a,b),b,a) == x``
For even ``len(x)``, the Nyquist mode of x is taken as zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return sc_diff(tmp.real,a,b,period) + \
1j*sc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
if k:
return sinh(a*k)/cosh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) | [
"def",
"sc_diff",
"(",
"x",
",",
"a",
",",
"b",
",",
"period",
"=",
"None",
",",
"_cache",
"=",
"_cache",
")",
":",
"tmp",
"=",
"asarray",
"(",
"x",
")",
"if",
"iscomplexobj",
"(",
"tmp",
")",
":",
"return",
"sc_diff",
"(",
"tmp",
".",
"real",
",",
"a",
",",
"b",
",",
"period",
")",
"+",
"1j",
"*",
"sc_diff",
"(",
"tmp",
".",
"imag",
",",
"a",
",",
"b",
",",
"period",
")",
"if",
"period",
"is",
"not",
"None",
":",
"a",
"=",
"a",
"*",
"2",
"*",
"pi",
"/",
"period",
"b",
"=",
"b",
"*",
"2",
"*",
"pi",
"/",
"period",
"n",
"=",
"len",
"(",
"x",
")",
"omega",
"=",
"_cache",
".",
"get",
"(",
"(",
"n",
",",
"a",
",",
"b",
")",
")",
"if",
"omega",
"is",
"None",
":",
"if",
"len",
"(",
"_cache",
")",
">",
"20",
":",
"while",
"_cache",
":",
"_cache",
".",
"popitem",
"(",
")",
"def",
"kernel",
"(",
"k",
",",
"a",
"=",
"a",
",",
"b",
"=",
"b",
")",
":",
"if",
"k",
":",
"return",
"sinh",
"(",
"a",
"*",
"k",
")",
"/",
"cosh",
"(",
"b",
"*",
"k",
")",
"return",
"0",
"omega",
"=",
"convolve",
".",
"init_convolution_kernel",
"(",
"n",
",",
"kernel",
",",
"d",
"=",
"1",
")",
"_cache",
"[",
"(",
"n",
",",
"a",
",",
"b",
")",
"]",
"=",
"omega",
"overwrite_x",
"=",
"_datacopied",
"(",
"tmp",
",",
"x",
")",
"return",
"convolve",
".",
"convolve",
"(",
"tmp",
",",
"omega",
",",
"swap_real_imag",
"=",
"1",
",",
"overwrite_x",
"=",
"overwrite_x",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/fftpack/pseudo_diffs.py#L342-L389 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/hmac.py | python | HMAC.digest | (self) | return h.digest() | Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function. | Return the hash value of this hashing object. | [
"Return",
"the",
"hash",
"value",
"of",
"this",
"hashing",
"object",
"."
] | def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest() | [
"def",
"digest",
"(",
"self",
")",
":",
"h",
"=",
"self",
".",
"_current",
"(",
")",
"return",
"h",
".",
"digest",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/hmac.py#L126-L134 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/req/req_uninstall.py | python | StashedUninstallPathSet.rollback | (self) | Undoes the uninstall by moving stashed files back. | Undoes the uninstall by moving stashed files back. | [
"Undoes",
"the",
"uninstall",
"by",
"moving",
"stashed",
"files",
"back",
"."
] | def rollback(self):
# type: () -> None
"""Undoes the uninstall by moving stashed files back."""
for p in self._moves:
logger.info("Moving to %s\n from %s", *p)
for new_path, path in self._moves:
try:
logger.debug('Replacing %s from %s', new_path, path)
if os.path.isfile(new_path) or os.path.islink(new_path):
os.unlink(new_path)
elif os.path.isdir(new_path):
rmtree(new_path)
renames(path, new_path)
except OSError as ex:
logger.error("Failed to restore %s", new_path)
logger.debug("Exception: %s", ex)
self.commit() | [
"def",
"rollback",
"(",
"self",
")",
":",
"# type: () -> None",
"for",
"p",
"in",
"self",
".",
"_moves",
":",
"logger",
".",
"info",
"(",
"\"Moving to %s\\n from %s\"",
",",
"*",
"p",
")",
"for",
"new_path",
",",
"path",
"in",
"self",
".",
"_moves",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"'Replacing %s from %s'",
",",
"new_path",
",",
"path",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"new_path",
")",
"or",
"os",
".",
"path",
".",
"islink",
"(",
"new_path",
")",
":",
"os",
".",
"unlink",
"(",
"new_path",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"new_path",
")",
":",
"rmtree",
"(",
"new_path",
")",
"renames",
"(",
"path",
",",
"new_path",
")",
"except",
"OSError",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"\"Failed to restore %s\"",
",",
"new_path",
")",
"logger",
".",
"debug",
"(",
"\"Exception: %s\"",
",",
"ex",
")",
"self",
".",
"commit",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/req/req_uninstall.py#L300-L318 | ||
zju3dv/clean-pvnet | 5870c509e3cc205e1bb28910a7b1a9a3c8add9a8 | lib/utils/meshrenderer/pysixd/transform.py | python | scale_matrix | (factor, origin=None, direction=None) | return M | Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct) | Return matrix to scale by factor around origin in direction. | [
"Return",
"matrix",
"to",
"scale",
"by",
"factor",
"around",
"origin",
"in",
"direction",
"."
] | def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M | [
"def",
"scale_matrix",
"(",
"factor",
",",
"origin",
"=",
"None",
",",
"direction",
"=",
"None",
")",
":",
"if",
"direction",
"is",
"None",
":",
"# uniform scaling",
"M",
"=",
"numpy",
".",
"diag",
"(",
"[",
"factor",
",",
"factor",
",",
"factor",
",",
"1.0",
"]",
")",
"if",
"origin",
"is",
"not",
"None",
":",
"M",
"[",
":",
"3",
",",
"3",
"]",
"=",
"origin",
"[",
":",
"3",
"]",
"M",
"[",
":",
"3",
",",
"3",
"]",
"*=",
"1.0",
"-",
"factor",
"else",
":",
"# nonuniform scaling",
"direction",
"=",
"unit_vector",
"(",
"direction",
"[",
":",
"3",
"]",
")",
"factor",
"=",
"1.0",
"-",
"factor",
"M",
"=",
"numpy",
".",
"identity",
"(",
"4",
")",
"M",
"[",
":",
"3",
",",
":",
"3",
"]",
"-=",
"factor",
"*",
"numpy",
".",
"outer",
"(",
"direction",
",",
"direction",
")",
"if",
"origin",
"is",
"not",
"None",
":",
"M",
"[",
":",
"3",
",",
"3",
"]",
"=",
"(",
"factor",
"*",
"numpy",
".",
"dot",
"(",
"origin",
"[",
":",
"3",
"]",
",",
"direction",
")",
")",
"*",
"direction",
"return",
"M"
] | https://github.com/zju3dv/clean-pvnet/blob/5870c509e3cc205e1bb28910a7b1a9a3c8add9a8/lib/utils/meshrenderer/pysixd/transform.py#L386-L417 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/basic_session_run_hooks.py | python | _HookTimer.last_triggered_step | (self) | Returns the last triggered time step or None if never triggered. | Returns the last triggered time step or None if never triggered. | [
"Returns",
"the",
"last",
"triggered",
"time",
"step",
"or",
"None",
"if",
"never",
"triggered",
"."
] | def last_triggered_step(self):
"""Returns the last triggered time step or None if never triggered."""
raise NotImplementedError | [
"def",
"last_triggered_step",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/basic_session_run_hooks.py#L85-L87 | ||
PixarAnimationStudios/USD | faed18ce62c8736b02413635b584a2f637156bad | pxr/usdImaging/usdviewq/settings2.py | python | StateSource._registerChildStateSource | (self, child) | Registers a child StateSource with this source object. | Registers a child StateSource with this source object. | [
"Registers",
"a",
"child",
"StateSource",
"with",
"this",
"source",
"object",
"."
] | def _registerChildStateSource(self, child):
"""Registers a child StateSource with this source object."""
self._childStateSources[child._stateSourceName] = child | [
"def",
"_registerChildStateSource",
"(",
"self",
",",
"child",
")",
":",
"self",
".",
"_childStateSources",
"[",
"child",
".",
"_stateSourceName",
"]",
"=",
"child"
] | https://github.com/PixarAnimationStudios/USD/blob/faed18ce62c8736b02413635b584a2f637156bad/pxr/usdImaging/usdviewq/settings2.py#L52-L54 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.