nwo stringlengths 5 86 | sha stringlengths 40 40 | path stringlengths 4 189 | language stringclasses 1 value | identifier stringlengths 1 94 | parameters stringlengths 2 4.03k | argument_list stringclasses 1 value | return_statement stringlengths 0 11.5k | docstring stringlengths 1 33.2k | docstring_summary stringlengths 0 5.15k | docstring_tokens list | function stringlengths 34 151k | function_tokens list | url stringlengths 90 278 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/customtreectrl.py | python | GenericTreeItem.DeleteChildren | (self, tree) | Deletes the item children.
:param `tree`: the main :class:`CustomTreeCtrl` instance. | Deletes the item children. | [
"Deletes",
"the",
"item",
"children",
"."
] | def DeleteChildren(self, tree):
"""
Deletes the item children.
:param `tree`: the main :class:`CustomTreeCtrl` instance.
"""
for child in self._children:
if tree:
tree.SendDeleteEvent(child)
child.DeleteChildren(tree)
if child == tree._select_me:
tree._select_me = None
# We have to destroy the associated window
wnd = child.GetWindow()
if wnd:
wnd.Destroy()
child._wnd = None
if child in tree._itemWithWindow:
tree._itemWithWindow.remove(child)
del child
self._children = [] | [
"def",
"DeleteChildren",
"(",
"self",
",",
"tree",
")",
":",
"for",
"child",
"in",
"self",
".",
"_children",
":",
"if",
"tree",
":",
"tree",
".",
"SendDeleteEvent",
"(",
"child",
")",
"child",
".",
"DeleteChildren",
"(",
"tree",
")",
"if",
"child",
"==",
"tree",
".",
"_select_me",
":",
"tree",
".",
"_select_me",
"=",
"None",
"# We have to destroy the associated window",
"wnd",
"=",
"child",
".",
"GetWindow",
"(",
")",
"if",
"wnd",
":",
"wnd",
".",
"Destroy",
"(",
")",
"child",
".",
"_wnd",
"=",
"None",
"if",
"child",
"in",
"tree",
".",
"_itemWithWindow",
":",
"tree",
".",
"_itemWithWindow",
".",
"remove",
"(",
"child",
")",
"del",
"child",
"self",
".",
"_children",
"=",
"[",
"]"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/customtreectrl.py#L2378-L2405 | ||
nyuwireless-unipd/ns3-mmwave | 4ff9e87e8079764e04cbeccd8e85bff15ae16fb3 | src/visualizer/visualizer/core.py | python | Node.set_label | (self, label) | !
Set a label for the node.
@param self: class object.
@param label: label to set
@return: an exception if invalid parameter. | !
Set a label for the node. | [
"!",
"Set",
"a",
"label",
"for",
"the",
"node",
"."
] | def set_label(self, label):
"""!
Set a label for the node.
@param self: class object.
@param label: label to set
@return: an exception if invalid parameter.
"""
assert isinstance(label, basestring)
self._label = label
self._update_appearance() | [
"def",
"set_label",
"(",
"self",
",",
"label",
")",
":",
"assert",
"isinstance",
"(",
"label",
",",
"basestring",
")",
"self",
".",
"_label",
"=",
"label",
"self",
".",
"_update_appearance",
"(",
")"
] | https://github.com/nyuwireless-unipd/ns3-mmwave/blob/4ff9e87e8079764e04cbeccd8e85bff15ae16fb3/src/visualizer/visualizer/core.py#L195-L206 | ||
greenheartgames/greenworks | 3ea4ab490b56676de3f0a237c74bcfdb17323e60 | deps/cpplint/cpplint.py | python | CheckForHeaderGuard | (filename, clean_lines, error) | Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found. | Checks that the file contains a header guard. | [
"Checks",
"that",
"the",
"file",
"contains",
"a",
"header",
"guard",
"."
] | def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar) | [
"def",
"CheckForHeaderGuard",
"(",
"filename",
",",
"clean_lines",
",",
"error",
")",
":",
"# Don't check for header guards if there are error suppression",
"# comments somewhere in this file.",
"#",
"# Because this is silencing a warning for a nonexistent line, we",
"# only support the very specific NOLINT(build/header_guard) syntax,",
"# and not the general NOLINT or NOLINT(*) syntax.",
"raw_lines",
"=",
"clean_lines",
".",
"lines_without_raw_strings",
"for",
"i",
"in",
"raw_lines",
":",
"if",
"Search",
"(",
"r'//\\s*NOLINT\\(build/header_guard\\)'",
",",
"i",
")",
":",
"return",
"cppvar",
"=",
"GetHeaderGuardCPPVariable",
"(",
"filename",
")",
"ifndef",
"=",
"''",
"ifndef_linenum",
"=",
"0",
"define",
"=",
"''",
"endif",
"=",
"''",
"endif_linenum",
"=",
"0",
"for",
"linenum",
",",
"line",
"in",
"enumerate",
"(",
"raw_lines",
")",
":",
"linesplit",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"linesplit",
")",
">=",
"2",
":",
"# find the first occurrence of #ifndef and #define, save arg",
"if",
"not",
"ifndef",
"and",
"linesplit",
"[",
"0",
"]",
"==",
"'#ifndef'",
":",
"# set ifndef to the header guard presented on the #ifndef line.",
"ifndef",
"=",
"linesplit",
"[",
"1",
"]",
"ifndef_linenum",
"=",
"linenum",
"if",
"not",
"define",
"and",
"linesplit",
"[",
"0",
"]",
"==",
"'#define'",
":",
"define",
"=",
"linesplit",
"[",
"1",
"]",
"# find the last occurrence of #endif, save entire line",
"if",
"line",
".",
"startswith",
"(",
"'#endif'",
")",
":",
"endif",
"=",
"line",
"endif_linenum",
"=",
"linenum",
"if",
"not",
"ifndef",
"or",
"not",
"define",
"or",
"ifndef",
"!=",
"define",
":",
"error",
"(",
"filename",
",",
"0",
",",
"'build/header_guard'",
",",
"5",
",",
"'No #ifndef header guard found, suggested CPP variable is: %s'",
"%",
"cppvar",
")",
"return",
"# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__",
"# for backward compatibility.",
"if",
"ifndef",
"!=",
"cppvar",
":",
"error_level",
"=",
"0",
"if",
"ifndef",
"!=",
"cppvar",
"+",
"'_'",
":",
"error_level",
"=",
"5",
"ParseNolintSuppressions",
"(",
"filename",
",",
"raw_lines",
"[",
"ifndef_linenum",
"]",
",",
"ifndef_linenum",
",",
"error",
")",
"error",
"(",
"filename",
",",
"ifndef_linenum",
",",
"'build/header_guard'",
",",
"error_level",
",",
"'#ifndef header guard has wrong style, please use: %s'",
"%",
"cppvar",
")",
"# Check for \"//\" comments on endif line.",
"ParseNolintSuppressions",
"(",
"filename",
",",
"raw_lines",
"[",
"endif_linenum",
"]",
",",
"endif_linenum",
",",
"error",
")",
"match",
"=",
"Match",
"(",
"r'#endif\\s*//\\s*'",
"+",
"cppvar",
"+",
"r'(_)?\\b'",
",",
"endif",
")",
"if",
"match",
":",
"if",
"match",
".",
"group",
"(",
"1",
")",
"==",
"'_'",
":",
"# Issue low severity warning for deprecated double trailing underscore",
"error",
"(",
"filename",
",",
"endif_linenum",
",",
"'build/header_guard'",
",",
"0",
",",
"'#endif line should be \"#endif // %s\"'",
"%",
"cppvar",
")",
"return",
"# Didn't find the corresponding \"//\" comment. If this file does not",
"# contain any \"//\" comments at all, it could be that the compiler",
"# only wants \"/**/\" comments, look for those instead.",
"no_single_line_comments",
"=",
"True",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"len",
"(",
"raw_lines",
")",
"-",
"1",
")",
":",
"line",
"=",
"raw_lines",
"[",
"i",
"]",
"if",
"Match",
"(",
"r'^(?:(?:\\'(?:\\.|[^\\'])*\\')|(?:\"(?:\\.|[^\"])*\")|[^\\'\"])*//'",
",",
"line",
")",
":",
"no_single_line_comments",
"=",
"False",
"break",
"if",
"no_single_line_comments",
":",
"match",
"=",
"Match",
"(",
"r'#endif\\s*/\\*\\s*'",
"+",
"cppvar",
"+",
"r'(_)?\\s*\\*/'",
",",
"endif",
")",
"if",
"match",
":",
"if",
"match",
".",
"group",
"(",
"1",
")",
"==",
"'_'",
":",
"# Low severity warning for double trailing underscore",
"error",
"(",
"filename",
",",
"endif_linenum",
",",
"'build/header_guard'",
",",
"0",
",",
"'#endif line should be \"#endif /* %s */\"'",
"%",
"cppvar",
")",
"return",
"# Didn't find anything",
"error",
"(",
"filename",
",",
"endif_linenum",
",",
"'build/header_guard'",
",",
"5",
",",
"'#endif line should be \"#endif // %s\"'",
"%",
"cppvar",
")"
] | https://github.com/greenheartgames/greenworks/blob/3ea4ab490b56676de3f0a237c74bcfdb17323e60/deps/cpplint/cpplint.py#L1789-L1884 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_core.py | python | PyApp.SetMacAboutMenuItemId | (*args, **kwargs) | return _core_.PyApp_SetMacAboutMenuItemId(*args, **kwargs) | SetMacAboutMenuItemId(long val) | SetMacAboutMenuItemId(long val) | [
"SetMacAboutMenuItemId",
"(",
"long",
"val",
")"
] | def SetMacAboutMenuItemId(*args, **kwargs):
"""SetMacAboutMenuItemId(long val)"""
return _core_.PyApp_SetMacAboutMenuItemId(*args, **kwargs) | [
"def",
"SetMacAboutMenuItemId",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"PyApp_SetMacAboutMenuItemId",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L8170-L8172 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/build/waf-1.7.13/waflib/extras/msvs.py | python | msvs_generator.collect_projects | (self) | Fill the list self.all_projects with project objects
Fill the list of build targets | Fill the list self.all_projects with project objects
Fill the list of build targets | [
"Fill",
"the",
"list",
"self",
".",
"all_projects",
"with",
"project",
"objects",
"Fill",
"the",
"list",
"of",
"build",
"targets"
] | def collect_projects(self):
"""
Fill the list self.all_projects with project objects
Fill the list of build targets
"""
self.collect_targets()
self.add_aliases()
self.collect_dirs()
default_project = getattr(self, 'default_project', None)
def sortfun(x):
if x.name == default_project:
return ''
return getattr(x, 'path', None) and x.path.abspath() or x.name
self.all_projects.sort(key=sortfun) | [
"def",
"collect_projects",
"(",
"self",
")",
":",
"self",
".",
"collect_targets",
"(",
")",
"self",
".",
"add_aliases",
"(",
")",
"self",
".",
"collect_dirs",
"(",
")",
"default_project",
"=",
"getattr",
"(",
"self",
",",
"'default_project'",
",",
"None",
")",
"def",
"sortfun",
"(",
"x",
")",
":",
"if",
"x",
".",
"name",
"==",
"default_project",
":",
"return",
"''",
"return",
"getattr",
"(",
"x",
",",
"'path'",
",",
"None",
")",
"and",
"x",
".",
"path",
".",
"abspath",
"(",
")",
"or",
"x",
".",
"name",
"self",
".",
"all_projects",
".",
"sort",
"(",
"key",
"=",
"sortfun",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/waflib/extras/msvs.py#L743-L756 | ||
SoarGroup/Soar | a1c5e249499137a27da60533c72969eef3b8ab6b | scons/scons-local-4.1.0/SCons/Node/__init__.py | python | Node.add_ignore | (self, depend) | Adds dependencies to ignore. | Adds dependencies to ignore. | [
"Adds",
"dependencies",
"to",
"ignore",
"."
] | def add_ignore(self, depend):
"""Adds dependencies to ignore."""
try:
self._add_child(self.ignore, self.ignore_set, depend)
except TypeError as e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to ignore a non-Node dependency of %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e))) | [
"def",
"add_ignore",
"(",
"self",
",",
"depend",
")",
":",
"try",
":",
"self",
".",
"_add_child",
"(",
"self",
".",
"ignore",
",",
"self",
".",
"ignore_set",
",",
"depend",
")",
"except",
"TypeError",
"as",
"e",
":",
"e",
"=",
"e",
".",
"args",
"[",
"0",
"]",
"if",
"SCons",
".",
"Util",
".",
"is_List",
"(",
"e",
")",
":",
"s",
"=",
"list",
"(",
"map",
"(",
"str",
",",
"e",
")",
")",
"else",
":",
"s",
"=",
"str",
"(",
"e",
")",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"attempted to ignore a non-Node dependency of %s:\\n\\t%s is a %s, not a Node\"",
"%",
"(",
"str",
"(",
"self",
")",
",",
"s",
",",
"type",
"(",
"e",
")",
")",
")"
] | https://github.com/SoarGroup/Soar/blob/a1c5e249499137a27da60533c72969eef3b8ab6b/scons/scons-local-4.1.0/SCons/Node/__init__.py#L1290-L1300 | ||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py | python | Misc.focus_lastfor | (self) | return self._nametowidget(name) | Return the widget which would have the focus if top level
for this widget gets the focus from the window manager. | Return the widget which would have the focus if top level
for this widget gets the focus from the window manager. | [
"Return",
"the",
"widget",
"which",
"would",
"have",
"the",
"focus",
"if",
"top",
"level",
"for",
"this",
"widget",
"gets",
"the",
"focus",
"from",
"the",
"window",
"manager",
"."
] | def focus_lastfor(self):
"""Return the widget which would have the focus if top level
for this widget gets the focus from the window manager."""
name = self.tk.call('focus', '-lastfor', self._w)
if name == 'none' or not name: return None
return self._nametowidget(name) | [
"def",
"focus_lastfor",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"tk",
".",
"call",
"(",
"'focus'",
",",
"'-lastfor'",
",",
"self",
".",
"_w",
")",
"if",
"name",
"==",
"'none'",
"or",
"not",
"name",
":",
"return",
"None",
"return",
"self",
".",
"_nametowidget",
"(",
"name",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py#L491-L496 | |
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/python/ops/check_ops.py | python | assert_none_equal | (
x, y, data=None, summarize=None, message=None, name=None) | Assert the condition `x != y` holds for all elements.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_none_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] != y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_none_equal".
Returns:
Op that raises `InvalidArgumentError` if `x != y` is ever False. | Assert the condition `x != y` holds for all elements. | [
"Assert",
"the",
"condition",
"x",
"!",
"=",
"y",
"holds",
"for",
"all",
"elements",
"."
] | def assert_none_equal(
x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x != y` holds for all elements.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_none_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] != y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_none_equal".
Returns:
Op that raises `InvalidArgumentError` if `x != y` is ever False.
"""
message = message or ''
with ops.name_scope(name, 'assert_none_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x != y did not hold for every single element:'
'x (%s) = ' % x.name, x,
'y (%s) = ' % y.name, y
]
condition = math_ops.reduce_all(math_ops.not_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize) | [
"def",
"assert_none_equal",
"(",
"x",
",",
"y",
",",
"data",
"=",
"None",
",",
"summarize",
"=",
"None",
",",
"message",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"message",
"=",
"message",
"or",
"''",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"'assert_none_equal'",
",",
"[",
"x",
",",
"y",
",",
"data",
"]",
")",
":",
"x",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"x",
",",
"name",
"=",
"'x'",
")",
"y",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"y",
",",
"name",
"=",
"'y'",
")",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"[",
"message",
",",
"'Condition x != y did not hold for every single element:'",
"'x (%s) = '",
"%",
"x",
".",
"name",
",",
"x",
",",
"'y (%s) = '",
"%",
"y",
".",
"name",
",",
"y",
"]",
"condition",
"=",
"math_ops",
".",
"reduce_all",
"(",
"math_ops",
".",
"not_equal",
"(",
"x",
",",
"y",
")",
")",
"return",
"control_flow_ops",
".",
"Assert",
"(",
"condition",
",",
"data",
",",
"summarize",
"=",
"summarize",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/check_ops.py#L321-L361 | ||
ceph/ceph | 959663007321a369c83218414a29bd9dbc8bda3a | qa/tasks/ceph.py | python | create_simple_monmap | (ctx, remote, conf, mons,
path=None,
mon_bind_addrvec=False) | return fsid | Writes a simple monmap based on current ceph.conf into path, or
<testdir>/monmap by default.
Assumes ceph_conf is up to date.
Assumes mon sections are named "mon.*", with the dot.
:return the FSID (as a string) of the newly created monmap | Writes a simple monmap based on current ceph.conf into path, or
<testdir>/monmap by default. | [
"Writes",
"a",
"simple",
"monmap",
"based",
"on",
"current",
"ceph",
".",
"conf",
"into",
"path",
"or",
"<testdir",
">",
"/",
"monmap",
"by",
"default",
"."
] | def create_simple_monmap(ctx, remote, conf, mons,
path=None,
mon_bind_addrvec=False):
"""
Writes a simple monmap based on current ceph.conf into path, or
<testdir>/monmap by default.
Assumes ceph_conf is up to date.
Assumes mon sections are named "mon.*", with the dot.
:return the FSID (as a string) of the newly created monmap
"""
addresses = list(mons.items())
assert addresses, "There are no monitors in config!"
log.debug('Ceph mon addresses: %s', addresses)
try:
log.debug('writing out conf {c}'.format(c=conf))
except:
log.debug('my conf logging attempt failed')
testdir = teuthology.get_testdir(ctx)
tmp_conf_path = '{tdir}/ceph.tmp.conf'.format(tdir=testdir)
conf_fp = BytesIO()
conf.write(conf_fp)
conf_fp.seek(0)
teuthology.write_file(remote, tmp_conf_path, conf_fp)
args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'monmaptool',
'-c',
'{conf}'.format(conf=tmp_conf_path),
'--create',
'--clobber',
]
if mon_bind_addrvec:
args.extend(['--enable-all-features'])
for (role, addr) in addresses:
_, _, n = teuthology.split_role(role)
if mon_bind_addrvec and (',' in addr or 'v' in addr or ':' in addr):
args.extend(('--addv', n, addr))
else:
args.extend(('--add', n, addr))
if not path:
path = '{tdir}/monmap'.format(tdir=testdir)
args.extend([
'--print',
path
])
monmap_output = remote.sh(args)
fsid = re.search("generated fsid (.+)$",
monmap_output, re.MULTILINE).group(1)
teuthology.delete_file(remote, tmp_conf_path)
return fsid | [
"def",
"create_simple_monmap",
"(",
"ctx",
",",
"remote",
",",
"conf",
",",
"mons",
",",
"path",
"=",
"None",
",",
"mon_bind_addrvec",
"=",
"False",
")",
":",
"addresses",
"=",
"list",
"(",
"mons",
".",
"items",
"(",
")",
")",
"assert",
"addresses",
",",
"\"There are no monitors in config!\"",
"log",
".",
"debug",
"(",
"'Ceph mon addresses: %s'",
",",
"addresses",
")",
"try",
":",
"log",
".",
"debug",
"(",
"'writing out conf {c}'",
".",
"format",
"(",
"c",
"=",
"conf",
")",
")",
"except",
":",
"log",
".",
"debug",
"(",
"'my conf logging attempt failed'",
")",
"testdir",
"=",
"teuthology",
".",
"get_testdir",
"(",
"ctx",
")",
"tmp_conf_path",
"=",
"'{tdir}/ceph.tmp.conf'",
".",
"format",
"(",
"tdir",
"=",
"testdir",
")",
"conf_fp",
"=",
"BytesIO",
"(",
")",
"conf",
".",
"write",
"(",
"conf_fp",
")",
"conf_fp",
".",
"seek",
"(",
"0",
")",
"teuthology",
".",
"write_file",
"(",
"remote",
",",
"tmp_conf_path",
",",
"conf_fp",
")",
"args",
"=",
"[",
"'adjust-ulimits'",
",",
"'ceph-coverage'",
",",
"'{tdir}/archive/coverage'",
".",
"format",
"(",
"tdir",
"=",
"testdir",
")",
",",
"'monmaptool'",
",",
"'-c'",
",",
"'{conf}'",
".",
"format",
"(",
"conf",
"=",
"tmp_conf_path",
")",
",",
"'--create'",
",",
"'--clobber'",
",",
"]",
"if",
"mon_bind_addrvec",
":",
"args",
".",
"extend",
"(",
"[",
"'--enable-all-features'",
"]",
")",
"for",
"(",
"role",
",",
"addr",
")",
"in",
"addresses",
":",
"_",
",",
"_",
",",
"n",
"=",
"teuthology",
".",
"split_role",
"(",
"role",
")",
"if",
"mon_bind_addrvec",
"and",
"(",
"','",
"in",
"addr",
"or",
"'v'",
"in",
"addr",
"or",
"':'",
"in",
"addr",
")",
":",
"args",
".",
"extend",
"(",
"(",
"'--addv'",
",",
"n",
",",
"addr",
")",
")",
"else",
":",
"args",
".",
"extend",
"(",
"(",
"'--add'",
",",
"n",
",",
"addr",
")",
")",
"if",
"not",
"path",
":",
"path",
"=",
"'{tdir}/monmap'",
".",
"format",
"(",
"tdir",
"=",
"testdir",
")",
"args",
".",
"extend",
"(",
"[",
"'--print'",
",",
"path",
"]",
")",
"monmap_output",
"=",
"remote",
".",
"sh",
"(",
"args",
")",
"fsid",
"=",
"re",
".",
"search",
"(",
"\"generated fsid (.+)$\"",
",",
"monmap_output",
",",
"re",
".",
"MULTILINE",
")",
".",
"group",
"(",
"1",
")",
"teuthology",
".",
"delete_file",
"(",
"remote",
",",
"tmp_conf_path",
")",
"return",
"fsid"
] | https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/qa/tasks/ceph.py#L537-L594 | |
OpenLightingProject/ola | d1433a1bed73276fbe55ce18c03b1c208237decc | python/examples/rdm_snapshot.py | python | ConfigReader._HandlePersonality | (self, data) | Called when we get a DMX_PERSONALITY response. | Called when we get a DMX_PERSONALITY response. | [
"Called",
"when",
"we",
"get",
"a",
"DMX_PERSONALITY",
"response",
"."
] | def _HandlePersonality(self, data):
"""Called when we get a DMX_PERSONALITY response."""
this_device = self.data.setdefault(str(self.uid), {})
this_device['personality'] = data['current_personality']
self._NextState() | [
"def",
"_HandlePersonality",
"(",
"self",
",",
"data",
")",
":",
"this_device",
"=",
"self",
".",
"data",
".",
"setdefault",
"(",
"str",
"(",
"self",
".",
"uid",
")",
",",
"{",
"}",
")",
"this_device",
"[",
"'personality'",
"]",
"=",
"data",
"[",
"'current_personality'",
"]",
"self",
".",
"_NextState",
"(",
")"
] | https://github.com/OpenLightingProject/ola/blob/d1433a1bed73276fbe55ce18c03b1c208237decc/python/examples/rdm_snapshot.py#L130-L134 | ||
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/python2_version/klampt/io/ros.py | python | listen_tf | (listener,klampt_obj,frameprefix="klampt",root="world",onerror=None) | Reads Klampt frames from the ROS tf module.
Args:
listener (tf.TransformListener): the tf listener
klampt_obj: the object to update. Can be WorldModel, RobotModel,
anything with a setTransform or setCurrentTransform method,
or None (in the latter case, a se3 object will be returned).
Note:
RobotModel configurations will not be changed, just
the link transforms.
frameprefix (str): the name of the base frame for this object
root (str): the name of the TF world frame.
onerror (str or None): either 'raise' in which case a tf exception
is raised, 'print', in which case the error is printed, or None,
in which case any exception is silently ignored. | Reads Klampt frames from the ROS tf module. | [
"Reads",
"Klampt",
"frames",
"from",
"the",
"ROS",
"tf",
"module",
"."
] | def listen_tf(listener,klampt_obj,frameprefix="klampt",root="world",onerror=None):
"""Reads Klampt frames from the ROS tf module.
Args:
listener (tf.TransformListener): the tf listener
klampt_obj: the object to update. Can be WorldModel, RobotModel,
anything with a setTransform or setCurrentTransform method,
or None (in the latter case, a se3 object will be returned).
Note:
RobotModel configurations will not be changed, just
the link transforms.
frameprefix (str): the name of the base frame for this object
root (str): the name of the TF world frame.
onerror (str or None): either 'raise' in which case a tf exception
is raised, 'print', in which case the error is printed, or None,
in which case any exception is silently ignored.
"""
from klampt import WorldModel,RobotModel
import tf
def do_lookup(frame,parent):
try:
(trans,rot) = listener.lookupTransform(frame, parent, rospy.Time(0))
return (so3.from_quaternion((rot[3],rot[0],rot[1],rot[2])),trans)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
if onerror == 'print':
print "listen_tf: Error looking up frame",frame
elif onerror == 'raise':
raise
return None
if isinstance(klampt_obj,WorldModel):
world = klampt_obj
prefix = frameprefix
for i in xrange(world.numRigidObjects()):
T = do_lookup(prefix+"/"+world.rigidObject(i).getName(),root)
if T:
world.rigidObject(i).setTransform(*T)
for i in xrange(world.numRobots()):
robot = world.robot(i)
rprefix = prefix+"/"+robot.getName()
listen_tf(listener,robot,rprefix,root,onerror)
return
elif isinstance(klampt_obj,RobotModel):
robot = klampt_obj
rprefix = frameprefix
for j in xrange(robot.numLinks()):
p = robot.link(j).getParent()
if p < 0:
T = do_lookup(rprefix+"/"+robot.link(j).getName(),root)
if T:
robot.link(j).setTransform(*T)
else:
T = do_lookup(rprefix+"/"+robot.link(j).getName(),rprefix+"/"+robot.link(p).getName())
if T:
robot.link(j).setTransform(*se3.mul(robot.link(p).getTransform(),T))
return
elif hasattr(klampt_obj,'setTransform'):
T = do_lookup(frameprefix,root)
if T:
klampt_obj.setTransform(*T)
elif hasattr(klampt_obj,'setCurrentTransform'):
T = do_lookup(frameprefix,root)
if T:
klampt_obj.setCurrentTransform(*T)
elif klampt_obj is None:
return do_lookup(frameprefix,root)
else:
raise ValueError("Invalid type given to listen_tf: ",klampt_obj.__class__.__name__) | [
"def",
"listen_tf",
"(",
"listener",
",",
"klampt_obj",
",",
"frameprefix",
"=",
"\"klampt\"",
",",
"root",
"=",
"\"world\"",
",",
"onerror",
"=",
"None",
")",
":",
"from",
"klampt",
"import",
"WorldModel",
",",
"RobotModel",
"import",
"tf",
"def",
"do_lookup",
"(",
"frame",
",",
"parent",
")",
":",
"try",
":",
"(",
"trans",
",",
"rot",
")",
"=",
"listener",
".",
"lookupTransform",
"(",
"frame",
",",
"parent",
",",
"rospy",
".",
"Time",
"(",
"0",
")",
")",
"return",
"(",
"so3",
".",
"from_quaternion",
"(",
"(",
"rot",
"[",
"3",
"]",
",",
"rot",
"[",
"0",
"]",
",",
"rot",
"[",
"1",
"]",
",",
"rot",
"[",
"2",
"]",
")",
")",
",",
"trans",
")",
"except",
"(",
"tf",
".",
"LookupException",
",",
"tf",
".",
"ConnectivityException",
",",
"tf",
".",
"ExtrapolationException",
")",
":",
"if",
"onerror",
"==",
"'print'",
":",
"print",
"\"listen_tf: Error looking up frame\"",
",",
"frame",
"elif",
"onerror",
"==",
"'raise'",
":",
"raise",
"return",
"None",
"if",
"isinstance",
"(",
"klampt_obj",
",",
"WorldModel",
")",
":",
"world",
"=",
"klampt_obj",
"prefix",
"=",
"frameprefix",
"for",
"i",
"in",
"xrange",
"(",
"world",
".",
"numRigidObjects",
"(",
")",
")",
":",
"T",
"=",
"do_lookup",
"(",
"prefix",
"+",
"\"/\"",
"+",
"world",
".",
"rigidObject",
"(",
"i",
")",
".",
"getName",
"(",
")",
",",
"root",
")",
"if",
"T",
":",
"world",
".",
"rigidObject",
"(",
"i",
")",
".",
"setTransform",
"(",
"*",
"T",
")",
"for",
"i",
"in",
"xrange",
"(",
"world",
".",
"numRobots",
"(",
")",
")",
":",
"robot",
"=",
"world",
".",
"robot",
"(",
"i",
")",
"rprefix",
"=",
"prefix",
"+",
"\"/\"",
"+",
"robot",
".",
"getName",
"(",
")",
"listen_tf",
"(",
"listener",
",",
"robot",
",",
"rprefix",
",",
"root",
",",
"onerror",
")",
"return",
"elif",
"isinstance",
"(",
"klampt_obj",
",",
"RobotModel",
")",
":",
"robot",
"=",
"klampt_obj",
"rprefix",
"=",
"frameprefix",
"for",
"j",
"in",
"xrange",
"(",
"robot",
".",
"numLinks",
"(",
")",
")",
":",
"p",
"=",
"robot",
".",
"link",
"(",
"j",
")",
".",
"getParent",
"(",
")",
"if",
"p",
"<",
"0",
":",
"T",
"=",
"do_lookup",
"(",
"rprefix",
"+",
"\"/\"",
"+",
"robot",
".",
"link",
"(",
"j",
")",
".",
"getName",
"(",
")",
",",
"root",
")",
"if",
"T",
":",
"robot",
".",
"link",
"(",
"j",
")",
".",
"setTransform",
"(",
"*",
"T",
")",
"else",
":",
"T",
"=",
"do_lookup",
"(",
"rprefix",
"+",
"\"/\"",
"+",
"robot",
".",
"link",
"(",
"j",
")",
".",
"getName",
"(",
")",
",",
"rprefix",
"+",
"\"/\"",
"+",
"robot",
".",
"link",
"(",
"p",
")",
".",
"getName",
"(",
")",
")",
"if",
"T",
":",
"robot",
".",
"link",
"(",
"j",
")",
".",
"setTransform",
"(",
"*",
"se3",
".",
"mul",
"(",
"robot",
".",
"link",
"(",
"p",
")",
".",
"getTransform",
"(",
")",
",",
"T",
")",
")",
"return",
"elif",
"hasattr",
"(",
"klampt_obj",
",",
"'setTransform'",
")",
":",
"T",
"=",
"do_lookup",
"(",
"frameprefix",
",",
"root",
")",
"if",
"T",
":",
"klampt_obj",
".",
"setTransform",
"(",
"*",
"T",
")",
"elif",
"hasattr",
"(",
"klampt_obj",
",",
"'setCurrentTransform'",
")",
":",
"T",
"=",
"do_lookup",
"(",
"frameprefix",
",",
"root",
")",
"if",
"T",
":",
"klampt_obj",
".",
"setCurrentTransform",
"(",
"*",
"T",
")",
"elif",
"klampt_obj",
"is",
"None",
":",
"return",
"do_lookup",
"(",
"frameprefix",
",",
"root",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid type given to listen_tf: \"",
",",
"klampt_obj",
".",
"__class__",
".",
"__name__",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/io/ros.py#L1093-L1162 | ||
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py | python | parserCtxt.htmlCtxtReadMemory | (self, buffer, size, URL, encoding, options) | return __tmp | parse an XML in-memory document and build a tree. This
reuses the existing @ctxt parser context | parse an XML in-memory document and build a tree. This
reuses the existing | [
"parse",
"an",
"XML",
"in",
"-",
"memory",
"document",
"and",
"build",
"a",
"tree",
".",
"This",
"reuses",
"the",
"existing"
] | def htmlCtxtReadMemory(self, buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.htmlCtxtReadMemory(self._o, buffer, size, URL, encoding, options)
if ret is None:raise treeError('htmlCtxtReadMemory() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp | [
"def",
"htmlCtxtReadMemory",
"(",
"self",
",",
"buffer",
",",
"size",
",",
"URL",
",",
"encoding",
",",
"options",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlCtxtReadMemory",
"(",
"self",
".",
"_o",
",",
"buffer",
",",
"size",
",",
"URL",
",",
"encoding",
",",
"options",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'htmlCtxtReadMemory() failed'",
")",
"__tmp",
"=",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py#L4193-L4199 | |
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/wsgiref/simple_server.py | python | make_server | (
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
) | return server | Create a new WSGI server listening on `host` and `port` for `app` | Create a new WSGI server listening on `host` and `port` for `app` | [
"Create",
"a",
"new",
"WSGI",
"server",
"listening",
"on",
"host",
"and",
"port",
"for",
"app"
] | def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
"""Create a new WSGI server listening on `host` and `port` for `app`"""
server = server_class((host, port), handler_class)
server.set_app(app)
return server | [
"def",
"make_server",
"(",
"host",
",",
"port",
",",
"app",
",",
"server_class",
"=",
"WSGIServer",
",",
"handler_class",
"=",
"WSGIRequestHandler",
")",
":",
"server",
"=",
"server_class",
"(",
"(",
"host",
",",
"port",
")",
",",
"handler_class",
")",
"server",
".",
"set_app",
"(",
"app",
")",
"return",
"server"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/wsgiref/simple_server.py#L177-L183 | |
xhzdeng/crpn | a5aef0f80dbe486103123f740c634fb01e6cc9a1 | lib/nms/py_cpu_nms.py | python | py_cpu_nms | (dets, thresh) | return keep | Pure Python NMS baseline. | Pure Python NMS baseline. | [
"Pure",
"Python",
"NMS",
"baseline",
"."
] | def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep | [
"def",
"py_cpu_nms",
"(",
"dets",
",",
"thresh",
")",
":",
"x1",
"=",
"dets",
"[",
":",
",",
"0",
"]",
"y1",
"=",
"dets",
"[",
":",
",",
"1",
"]",
"x2",
"=",
"dets",
"[",
":",
",",
"2",
"]",
"y2",
"=",
"dets",
"[",
":",
",",
"3",
"]",
"scores",
"=",
"dets",
"[",
":",
",",
"4",
"]",
"areas",
"=",
"(",
"x2",
"-",
"x1",
"+",
"1",
")",
"*",
"(",
"y2",
"-",
"y1",
"+",
"1",
")",
"order",
"=",
"scores",
".",
"argsort",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"keep",
"=",
"[",
"]",
"while",
"order",
".",
"size",
">",
"0",
":",
"i",
"=",
"order",
"[",
"0",
"]",
"keep",
".",
"append",
"(",
"i",
")",
"xx1",
"=",
"np",
".",
"maximum",
"(",
"x1",
"[",
"i",
"]",
",",
"x1",
"[",
"order",
"[",
"1",
":",
"]",
"]",
")",
"yy1",
"=",
"np",
".",
"maximum",
"(",
"y1",
"[",
"i",
"]",
",",
"y1",
"[",
"order",
"[",
"1",
":",
"]",
"]",
")",
"xx2",
"=",
"np",
".",
"minimum",
"(",
"x2",
"[",
"i",
"]",
",",
"x2",
"[",
"order",
"[",
"1",
":",
"]",
"]",
")",
"yy2",
"=",
"np",
".",
"minimum",
"(",
"y2",
"[",
"i",
"]",
",",
"y2",
"[",
"order",
"[",
"1",
":",
"]",
"]",
")",
"w",
"=",
"np",
".",
"maximum",
"(",
"0.0",
",",
"xx2",
"-",
"xx1",
"+",
"1",
")",
"h",
"=",
"np",
".",
"maximum",
"(",
"0.0",
",",
"yy2",
"-",
"yy1",
"+",
"1",
")",
"inter",
"=",
"w",
"*",
"h",
"ovr",
"=",
"inter",
"/",
"(",
"areas",
"[",
"i",
"]",
"+",
"areas",
"[",
"order",
"[",
"1",
":",
"]",
"]",
"-",
"inter",
")",
"inds",
"=",
"np",
".",
"where",
"(",
"ovr",
"<=",
"thresh",
")",
"[",
"0",
"]",
"order",
"=",
"order",
"[",
"inds",
"+",
"1",
"]",
"return",
"keep"
] | https://github.com/xhzdeng/crpn/blob/a5aef0f80dbe486103123f740c634fb01e6cc9a1/lib/nms/py_cpu_nms.py#L10-L38 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/vcs/versioncontrol.py | python | VersionControl.get_requirement_revision | (cls, repo_dir) | return cls.get_revision(repo_dir) | Return the revision string that should be used in a requirement. | [] | def get_requirement_revision(cls, repo_dir):
# type: (str) -> str
"""
Return the revision string that should be used in a requirement.
"""
return cls.get_revision(repo_dir) | [
"def",
"get_requirement_revision",
"(",
"cls",
",",
"repo_dir",
")",
":",
"# type: (str) -> str",
"return",
"cls",
".",
"get_revision",
"(",
"repo_dir",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/vcs/versioncontrol.py#L613-L623 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/dataview.py | python | DataViewCtrl.GetCurrentItem | (*args, **kwargs) | return _dataview.DataViewCtrl_GetCurrentItem(*args, **kwargs) | GetCurrentItem(self) -> DataViewItem | GetCurrentItem(self) -> DataViewItem | [
"GetCurrentItem",
"(",
"self",
")",
"-",
">",
"DataViewItem"
] | def GetCurrentItem(*args, **kwargs):
"""GetCurrentItem(self) -> DataViewItem"""
return _dataview.DataViewCtrl_GetCurrentItem(*args, **kwargs) | [
"def",
"GetCurrentItem",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewCtrl_GetCurrentItem",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/dataview.py#L1743-L1745 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/ultimatelistctrl.py | python | UltimateListCtrl.GetFooterHeight | (self) | return self._headerWin.GetWindowHeight() | Returns the :class:`UltimateListHeaderWindow` height, in pixels. | Returns the :class:`UltimateListHeaderWindow` height, in pixels. | [
"Returns",
"the",
":",
"class",
":",
"UltimateListHeaderWindow",
"height",
"in",
"pixels",
"."
] | def GetFooterHeight(self):
""" Returns the :class:`UltimateListHeaderWindow` height, in pixels. """
if not self._footerWin:
return -1
return self._headerWin.GetWindowHeight() | [
"def",
"GetFooterHeight",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_footerWin",
":",
"return",
"-",
"1",
"return",
"self",
".",
"_headerWin",
".",
"GetWindowHeight",
"(",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/ultimatelistctrl.py#L13699-L13705 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/grid.py | python | Grid.SetColSize | (*args, **kwargs) | return _grid.Grid_SetColSize(*args, **kwargs) | SetColSize(self, int col, int width) | SetColSize(self, int col, int width) | [
"SetColSize",
"(",
"self",
"int",
"col",
"int",
"width",
")"
] | def SetColSize(*args, **kwargs):
"""SetColSize(self, int col, int width)"""
return _grid.Grid_SetColSize(*args, **kwargs) | [
"def",
"SetColSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_grid",
".",
"Grid_SetColSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/grid.py#L1834-L1836 | |
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | build/android/buildbot/bb_device_status_check.py | python | CheckForMissingDevices | (options, adb_online_devs) | Uses file of previous online devices to detect broken phones.
Args:
options: out_dir parameter of options argument is used as the base
directory to load and update the cache file.
adb_online_devs: A list of serial numbers of the currently visible
and online attached devices. | Uses file of previous online devices to detect broken phones. | [
"Uses",
"file",
"of",
"previous",
"online",
"devices",
"to",
"detect",
"broken",
"phones",
"."
] | def CheckForMissingDevices(options, adb_online_devs):
"""Uses file of previous online devices to detect broken phones.
Args:
options: out_dir parameter of options argument is used as the base
directory to load and update the cache file.
adb_online_devs: A list of serial numbers of the currently visible
and online attached devices.
"""
# TODO(navabi): remove this once the bug that causes different number
# of devices to be detected between calls is fixed.
logger = logging.getLogger()
logger.setLevel(logging.INFO)
out_dir = os.path.abspath(options.out_dir)
def WriteDeviceList(file_name, device_list):
path = os.path.join(out_dir, file_name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(path, 'w') as f:
# Write devices currently visible plus devices previously seen.
f.write('\n'.join(set(device_list)))
last_devices_path = os.path.join(out_dir, '.last_devices')
last_devices = GetLastDevices(out_dir)
missing_devs = list(set(last_devices) - set(adb_online_devs))
all_known_devices = list(set(adb_online_devs) | set(last_devices))
WriteDeviceList('.last_devices', all_known_devices)
WriteDeviceList('.last_missing', missing_devs)
if not all_known_devices:
# This can happen if for some reason the .last_devices file is not
# present or if it was empty.
return ['No online devices. Have any devices been plugged in?']
if missing_devs:
devices_missing_msg = '%d devices not detected.' % len(missing_devs)
bb_annotations.PrintSummaryText(devices_missing_msg)
# TODO(navabi): Debug by printing both output from GetCmdOutput and
# GetAttachedDevices to compare results.
crbug_link = ('https://code.google.com/p/chromium/issues/entry?summary='
'%s&comment=%s&labels=Restrict-View-Google,OS-Android,Infra' %
(urllib.quote('Device Offline'),
urllib.quote('Buildbot: %s %s\n'
'Build: %s\n'
'(please don\'t change any labels)' %
(os.environ.get('BUILDBOT_BUILDERNAME'),
os.environ.get('BUILDBOT_SLAVENAME'),
os.environ.get('BUILDBOT_BUILDNUMBER')))))
return ['Current online devices: %s' % adb_online_devs,
'%s are no longer visible. Were they removed?\n' % missing_devs,
'SHERIFF:\n',
'@@@STEP_LINK@Click here to file a bug@%s@@@\n' % crbug_link,
'Cache file: %s\n\n' % last_devices_path,
'adb devices: %s' % GetCmdOutput(['adb', 'devices']),
'adb devices(GetAttachedDevices): %s' %
android_commands.GetAttachedDevices()]
else:
new_devs = set(adb_online_devs) - set(last_devices)
if new_devs and os.path.exists(last_devices_path):
bb_annotations.PrintWarning()
bb_annotations.PrintSummaryText(
'%d new devices detected' % len(new_devs))
print ('New devices detected %s. And now back to your '
'regularly scheduled program.' % list(new_devs)) | [
"def",
"CheckForMissingDevices",
"(",
"options",
",",
"adb_online_devs",
")",
":",
"# TODO(navabi): remove this once the bug that causes different number",
"# of devices to be detected between calls is fixed.",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"out_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"options",
".",
"out_dir",
")",
"def",
"WriteDeviceList",
"(",
"file_name",
",",
"device_list",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"file_name",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"out_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"out_dir",
")",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"# Write devices currently visible plus devices previously seen.",
"f",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"set",
"(",
"device_list",
")",
")",
")",
"last_devices_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"'.last_devices'",
")",
"last_devices",
"=",
"GetLastDevices",
"(",
"out_dir",
")",
"missing_devs",
"=",
"list",
"(",
"set",
"(",
"last_devices",
")",
"-",
"set",
"(",
"adb_online_devs",
")",
")",
"all_known_devices",
"=",
"list",
"(",
"set",
"(",
"adb_online_devs",
")",
"|",
"set",
"(",
"last_devices",
")",
")",
"WriteDeviceList",
"(",
"'.last_devices'",
",",
"all_known_devices",
")",
"WriteDeviceList",
"(",
"'.last_missing'",
",",
"missing_devs",
")",
"if",
"not",
"all_known_devices",
":",
"# This can happen if for some reason the .last_devices file is not",
"# present or if it was empty.",
"return",
"[",
"'No online devices. Have any devices been plugged in?'",
"]",
"if",
"missing_devs",
":",
"devices_missing_msg",
"=",
"'%d devices not detected.'",
"%",
"len",
"(",
"missing_devs",
")",
"bb_annotations",
".",
"PrintSummaryText",
"(",
"devices_missing_msg",
")",
"# TODO(navabi): Debug by printing both output from GetCmdOutput and",
"# GetAttachedDevices to compare results.",
"crbug_link",
"=",
"(",
"'https://code.google.com/p/chromium/issues/entry?summary='",
"'%s&comment=%s&labels=Restrict-View-Google,OS-Android,Infra'",
"%",
"(",
"urllib",
".",
"quote",
"(",
"'Device Offline'",
")",
",",
"urllib",
".",
"quote",
"(",
"'Buildbot: %s %s\\n'",
"'Build: %s\\n'",
"'(please don\\'t change any labels)'",
"%",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'BUILDBOT_BUILDERNAME'",
")",
",",
"os",
".",
"environ",
".",
"get",
"(",
"'BUILDBOT_SLAVENAME'",
")",
",",
"os",
".",
"environ",
".",
"get",
"(",
"'BUILDBOT_BUILDNUMBER'",
")",
")",
")",
")",
")",
"return",
"[",
"'Current online devices: %s'",
"%",
"adb_online_devs",
",",
"'%s are no longer visible. Were they removed?\\n'",
"%",
"missing_devs",
",",
"'SHERIFF:\\n'",
",",
"'@@@STEP_LINK@Click here to file a bug@%s@@@\\n'",
"%",
"crbug_link",
",",
"'Cache file: %s\\n\\n'",
"%",
"last_devices_path",
",",
"'adb devices: %s'",
"%",
"GetCmdOutput",
"(",
"[",
"'adb'",
",",
"'devices'",
"]",
")",
",",
"'adb devices(GetAttachedDevices): %s'",
"%",
"android_commands",
".",
"GetAttachedDevices",
"(",
")",
"]",
"else",
":",
"new_devs",
"=",
"set",
"(",
"adb_online_devs",
")",
"-",
"set",
"(",
"last_devices",
")",
"if",
"new_devs",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"last_devices_path",
")",
":",
"bb_annotations",
".",
"PrintWarning",
"(",
")",
"bb_annotations",
".",
"PrintSummaryText",
"(",
"'%d new devices detected'",
"%",
"len",
"(",
"new_devs",
")",
")",
"print",
"(",
"'New devices detected %s. And now back to your '",
"'regularly scheduled program.'",
"%",
"list",
"(",
"new_devs",
")",
")"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/build/android/buildbot/bb_device_status_check.py#L139-L205 | ||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/platform.py | python | win32_ver | (release='',version='',csd='',ptype='') | return release,version,csd,ptype | Get additional version information from the Windows Registry
and return a tuple (version,csd,ptype) referring to version
number, CSD level (service pack), and OS type (multi/single
processor).
As a hint: ptype returns 'Uniprocessor Free' on single
processor NT machines and 'Multiprocessor Free' on multi
processor machines. The 'Free' refers to the OS version being
free of debugging code. It could also state 'Checked' which
means the OS version uses debugging code, i.e. code that
checks arguments, ranges, etc. (Thomas Heller).
Note: this function works best with Mark Hammond's win32
package installed, but also on Python 2.3 and later. It
obviously only runs on Win32 compatible platforms. | Get additional version information from the Windows Registry
and return a tuple (version,csd,ptype) referring to version
number, CSD level (service pack), and OS type (multi/single
processor). | [
"Get",
"additional",
"version",
"information",
"from",
"the",
"Windows",
"Registry",
"and",
"return",
"a",
"tuple",
"(",
"version",
"csd",
"ptype",
")",
"referring",
"to",
"version",
"number",
"CSD",
"level",
"(",
"service",
"pack",
")",
"and",
"OS",
"type",
"(",
"multi",
"/",
"single",
"processor",
")",
"."
] | def win32_ver(release='',version='',csd='',ptype=''):
""" Get additional version information from the Windows Registry
and return a tuple (version,csd,ptype) referring to version
number, CSD level (service pack), and OS type (multi/single
processor).
As a hint: ptype returns 'Uniprocessor Free' on single
processor NT machines and 'Multiprocessor Free' on multi
processor machines. The 'Free' refers to the OS version being
free of debugging code. It could also state 'Checked' which
means the OS version uses debugging code, i.e. code that
checks arguments, ranges, etc. (Thomas Heller).
Note: this function works best with Mark Hammond's win32
package installed, but also on Python 2.3 and later. It
obviously only runs on Win32 compatible platforms.
"""
# XXX Is there any way to find out the processor type on WinXX ?
# XXX Is win32 available on Windows CE ?
#
# Adapted from code posted by Karl Putland to comp.lang.python.
#
# The mappings between reg. values and release names can be found
# here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
# Import the needed APIs
try:
import win32api
from win32api import RegQueryValueEx, RegOpenKeyEx, \
RegCloseKey, GetVersionEx
from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \
VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION
except ImportError:
# Emulate the win32api module using Python APIs
try:
sys.getwindowsversion
except AttributeError:
# No emulation possible, so return the defaults...
return release,version,csd,ptype
else:
# Emulation using _winreg (added in Python 2.0) and
# sys.getwindowsversion() (added in Python 2.3)
import _winreg
GetVersionEx = sys.getwindowsversion
RegQueryValueEx = _winreg.QueryValueEx
RegOpenKeyEx = _winreg.OpenKeyEx
RegCloseKey = _winreg.CloseKey
HKEY_LOCAL_MACHINE = _winreg.HKEY_LOCAL_MACHINE
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_NT_WORKSTATION = 1
VER_NT_SERVER = 3
REG_SZ = 1
# Find out the registry key and some general version infos
winver = GetVersionEx()
maj,min,buildno,plat,csd = winver
version = '%i.%i.%i' % (maj,min,buildno & 0xFFFF)
if hasattr(winver, "service_pack"):
if winver.service_pack != "":
csd = 'SP%s' % winver.service_pack_major
else:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
if plat == VER_PLATFORM_WIN32_WINDOWS:
regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
# Try to guess the release name
if maj == 4:
if min == 0:
release = '95'
elif min == 10:
release = '98'
elif min == 90:
release = 'Me'
else:
release = 'postMe'
elif maj == 5:
release = '2000'
elif plat == VER_PLATFORM_WIN32_NT:
regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
if maj <= 4:
release = 'NT'
elif maj == 5:
if min == 0:
release = '2000'
elif min == 1:
release = 'XP'
elif min == 2:
release = '2003Server'
else:
release = 'post2003'
elif maj == 6:
if hasattr(winver, "product_type"):
product_type = winver.product_type
else:
product_type = VER_NT_WORKSTATION
# Without an OSVERSIONINFOEX capable sys.getwindowsversion(),
# or help from the registry, we cannot properly identify
# non-workstation versions.
try:
key = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
name, type = RegQueryValueEx(key, "ProductName")
# Discard any type that isn't REG_SZ
if type == REG_SZ and name.find("Server") != -1:
product_type = VER_NT_SERVER
except WindowsError:
# Use default of VER_NT_WORKSTATION
pass
if min == 0:
if product_type == VER_NT_WORKSTATION:
release = 'Vista'
else:
release = '2008Server'
elif min == 1:
if product_type == VER_NT_WORKSTATION:
release = '7'
else:
release = '2008ServerR2'
elif min == 2:
if product_type == VER_NT_WORKSTATION:
release = '8'
else:
release = '2012Server'
else:
release = 'post2012Server'
else:
if not release:
# E.g. Win3.1 with win32s
release = '%i.%i' % (maj,min)
return release,version,csd,ptype
# Open the registry key
try:
keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
# Get a value to make sure the key exists...
RegQueryValueEx(keyCurVer, 'SystemRoot')
except:
return release,version,csd,ptype
# Parse values
#subversion = _win32_getvalue(keyCurVer,
# 'SubVersionNumber',
# ('',1))[0]
#if subversion:
# release = release + subversion # 95a, 95b, etc.
build = _win32_getvalue(keyCurVer,
'CurrentBuildNumber',
('',1))[0]
ptype = _win32_getvalue(keyCurVer,
'CurrentType',
(ptype,1))[0]
# Normalize version
version = _norm_version(version,build)
# Close key
RegCloseKey(keyCurVer)
return release,version,csd,ptype | [
"def",
"win32_ver",
"(",
"release",
"=",
"''",
",",
"version",
"=",
"''",
",",
"csd",
"=",
"''",
",",
"ptype",
"=",
"''",
")",
":",
"# XXX Is there any way to find out the processor type on WinXX ?",
"# XXX Is win32 available on Windows CE ?",
"#",
"# Adapted from code posted by Karl Putland to comp.lang.python.",
"#",
"# The mappings between reg. values and release names can be found",
"# here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp",
"# Import the needed APIs",
"try",
":",
"import",
"win32api",
"from",
"win32api",
"import",
"RegQueryValueEx",
",",
"RegOpenKeyEx",
",",
"RegCloseKey",
",",
"GetVersionEx",
"from",
"win32con",
"import",
"HKEY_LOCAL_MACHINE",
",",
"VER_PLATFORM_WIN32_NT",
",",
"VER_PLATFORM_WIN32_WINDOWS",
",",
"VER_NT_WORKSTATION",
"except",
"ImportError",
":",
"# Emulate the win32api module using Python APIs",
"try",
":",
"sys",
".",
"getwindowsversion",
"except",
"AttributeError",
":",
"# No emulation possible, so return the defaults...",
"return",
"release",
",",
"version",
",",
"csd",
",",
"ptype",
"else",
":",
"# Emulation using _winreg (added in Python 2.0) and",
"# sys.getwindowsversion() (added in Python 2.3)",
"import",
"_winreg",
"GetVersionEx",
"=",
"sys",
".",
"getwindowsversion",
"RegQueryValueEx",
"=",
"_winreg",
".",
"QueryValueEx",
"RegOpenKeyEx",
"=",
"_winreg",
".",
"OpenKeyEx",
"RegCloseKey",
"=",
"_winreg",
".",
"CloseKey",
"HKEY_LOCAL_MACHINE",
"=",
"_winreg",
".",
"HKEY_LOCAL_MACHINE",
"VER_PLATFORM_WIN32_WINDOWS",
"=",
"1",
"VER_PLATFORM_WIN32_NT",
"=",
"2",
"VER_NT_WORKSTATION",
"=",
"1",
"VER_NT_SERVER",
"=",
"3",
"REG_SZ",
"=",
"1",
"# Find out the registry key and some general version infos",
"winver",
"=",
"GetVersionEx",
"(",
")",
"maj",
",",
"min",
",",
"buildno",
",",
"plat",
",",
"csd",
"=",
"winver",
"version",
"=",
"'%i.%i.%i'",
"%",
"(",
"maj",
",",
"min",
",",
"buildno",
"&",
"0xFFFF",
")",
"if",
"hasattr",
"(",
"winver",
",",
"\"service_pack\"",
")",
":",
"if",
"winver",
".",
"service_pack",
"!=",
"\"\"",
":",
"csd",
"=",
"'SP%s'",
"%",
"winver",
".",
"service_pack_major",
"else",
":",
"if",
"csd",
"[",
":",
"13",
"]",
"==",
"'Service Pack '",
":",
"csd",
"=",
"'SP'",
"+",
"csd",
"[",
"13",
":",
"]",
"if",
"plat",
"==",
"VER_PLATFORM_WIN32_WINDOWS",
":",
"regkey",
"=",
"'SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion'",
"# Try to guess the release name",
"if",
"maj",
"==",
"4",
":",
"if",
"min",
"==",
"0",
":",
"release",
"=",
"'95'",
"elif",
"min",
"==",
"10",
":",
"release",
"=",
"'98'",
"elif",
"min",
"==",
"90",
":",
"release",
"=",
"'Me'",
"else",
":",
"release",
"=",
"'postMe'",
"elif",
"maj",
"==",
"5",
":",
"release",
"=",
"'2000'",
"elif",
"plat",
"==",
"VER_PLATFORM_WIN32_NT",
":",
"regkey",
"=",
"'SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion'",
"if",
"maj",
"<=",
"4",
":",
"release",
"=",
"'NT'",
"elif",
"maj",
"==",
"5",
":",
"if",
"min",
"==",
"0",
":",
"release",
"=",
"'2000'",
"elif",
"min",
"==",
"1",
":",
"release",
"=",
"'XP'",
"elif",
"min",
"==",
"2",
":",
"release",
"=",
"'2003Server'",
"else",
":",
"release",
"=",
"'post2003'",
"elif",
"maj",
"==",
"6",
":",
"if",
"hasattr",
"(",
"winver",
",",
"\"product_type\"",
")",
":",
"product_type",
"=",
"winver",
".",
"product_type",
"else",
":",
"product_type",
"=",
"VER_NT_WORKSTATION",
"# Without an OSVERSIONINFOEX capable sys.getwindowsversion(),",
"# or help from the registry, we cannot properly identify",
"# non-workstation versions.",
"try",
":",
"key",
"=",
"RegOpenKeyEx",
"(",
"HKEY_LOCAL_MACHINE",
",",
"regkey",
")",
"name",
",",
"type",
"=",
"RegQueryValueEx",
"(",
"key",
",",
"\"ProductName\"",
")",
"# Discard any type that isn't REG_SZ",
"if",
"type",
"==",
"REG_SZ",
"and",
"name",
".",
"find",
"(",
"\"Server\"",
")",
"!=",
"-",
"1",
":",
"product_type",
"=",
"VER_NT_SERVER",
"except",
"WindowsError",
":",
"# Use default of VER_NT_WORKSTATION",
"pass",
"if",
"min",
"==",
"0",
":",
"if",
"product_type",
"==",
"VER_NT_WORKSTATION",
":",
"release",
"=",
"'Vista'",
"else",
":",
"release",
"=",
"'2008Server'",
"elif",
"min",
"==",
"1",
":",
"if",
"product_type",
"==",
"VER_NT_WORKSTATION",
":",
"release",
"=",
"'7'",
"else",
":",
"release",
"=",
"'2008ServerR2'",
"elif",
"min",
"==",
"2",
":",
"if",
"product_type",
"==",
"VER_NT_WORKSTATION",
":",
"release",
"=",
"'8'",
"else",
":",
"release",
"=",
"'2012Server'",
"else",
":",
"release",
"=",
"'post2012Server'",
"else",
":",
"if",
"not",
"release",
":",
"# E.g. Win3.1 with win32s",
"release",
"=",
"'%i.%i'",
"%",
"(",
"maj",
",",
"min",
")",
"return",
"release",
",",
"version",
",",
"csd",
",",
"ptype",
"# Open the registry key",
"try",
":",
"keyCurVer",
"=",
"RegOpenKeyEx",
"(",
"HKEY_LOCAL_MACHINE",
",",
"regkey",
")",
"# Get a value to make sure the key exists...",
"RegQueryValueEx",
"(",
"keyCurVer",
",",
"'SystemRoot'",
")",
"except",
":",
"return",
"release",
",",
"version",
",",
"csd",
",",
"ptype",
"# Parse values",
"#subversion = _win32_getvalue(keyCurVer,",
"# 'SubVersionNumber',",
"# ('',1))[0]",
"#if subversion:",
"# release = release + subversion # 95a, 95b, etc.",
"build",
"=",
"_win32_getvalue",
"(",
"keyCurVer",
",",
"'CurrentBuildNumber'",
",",
"(",
"''",
",",
"1",
")",
")",
"[",
"0",
"]",
"ptype",
"=",
"_win32_getvalue",
"(",
"keyCurVer",
",",
"'CurrentType'",
",",
"(",
"ptype",
",",
"1",
")",
")",
"[",
"0",
"]",
"# Normalize version",
"version",
"=",
"_norm_version",
"(",
"version",
",",
"build",
")",
"# Close key",
"RegCloseKey",
"(",
"keyCurVer",
")",
"return",
"release",
",",
"version",
",",
"csd",
",",
"ptype"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/platform.py#L553-L716 | |
netket/netket | 0d534e54ecbf25b677ea72af6b85947979420652 | netket/graph/graph.py | python | Graph.to_networkx | (self) | return self._igraph.to_networkx() | Returns a copy of this graph as an igraph.Graph instance.
This method requires networkx to be installed. | Returns a copy of this graph as an igraph.Graph instance.
This method requires networkx to be installed. | [
"Returns",
"a",
"copy",
"of",
"this",
"graph",
"as",
"an",
"igraph",
".",
"Graph",
"instance",
".",
"This",
"method",
"requires",
"networkx",
"to",
"be",
"installed",
"."
] | def to_networkx(self):
"""
Returns a copy of this graph as an igraph.Graph instance.
This method requires networkx to be installed.
"""
return self._igraph.to_networkx() | [
"def",
"to_networkx",
"(",
"self",
")",
":",
"return",
"self",
".",
"_igraph",
".",
"to_networkx",
"(",
")"
] | https://github.com/netket/netket/blob/0d534e54ecbf25b677ea72af6b85947979420652/netket/graph/graph.py#L121-L126 | |
epiqc/ScaffCC | 66a79944ee4cd116b27bc1a69137276885461db8 | clang/tools/scan-build-py/libscanbuild/clang.py | python | is_active | (checkers) | return predicate | Returns a method, which classifies the checker active or not,
based on the received checker name list. | Returns a method, which classifies the checker active or not,
based on the received checker name list. | [
"Returns",
"a",
"method",
"which",
"classifies",
"the",
"checker",
"active",
"or",
"not",
"based",
"on",
"the",
"received",
"checker",
"name",
"list",
"."
] | def is_active(checkers):
""" Returns a method, which classifies the checker active or not,
based on the received checker name list. """
def predicate(checker):
""" Returns True if the given checker is active. """
return any(pattern.match(checker) for pattern in predicate.patterns)
predicate.patterns = [re.compile(r'^' + a + r'(\.|$)') for a in checkers]
return predicate | [
"def",
"is_active",
"(",
"checkers",
")",
":",
"def",
"predicate",
"(",
"checker",
")",
":",
"\"\"\" Returns True if the given checker is active. \"\"\"",
"return",
"any",
"(",
"pattern",
".",
"match",
"(",
"checker",
")",
"for",
"pattern",
"in",
"predicate",
".",
"patterns",
")",
"predicate",
".",
"patterns",
"=",
"[",
"re",
".",
"compile",
"(",
"r'^'",
"+",
"a",
"+",
"r'(\\.|$)'",
")",
"for",
"a",
"in",
"checkers",
"]",
"return",
"predicate"
] | https://github.com/epiqc/ScaffCC/blob/66a79944ee4cd116b27bc1a69137276885461db8/clang/tools/scan-build-py/libscanbuild/clang.py#L87-L97 | |
qboticslabs/mastering_ros | d83e78f30acc45b0f18522c1d5fae3a7f52974b9 | chapter_3_codes/seven_dof_arm_gazebo/scripts/pick_and_place_pick_working.py | python | CokeCanPickAndPlace._create_place_goal | (self, group, target, places) | return goal | Create a MoveIt! PlaceGoal | Create a MoveIt! PlaceGoal | [
"Create",
"a",
"MoveIt!",
"PlaceGoal"
] | def _create_place_goal(self, group, target, places):
"""
Create a MoveIt! PlaceGoal
"""
# Create goal:
goal = PlaceGoal()
goal.group_name = group
goal.attached_object_name = target
goal.place_locations.extend(places)
# Configure goal planning options:
goal.allowed_planning_time = 5.0
goal.planning_options.planning_scene_diff.is_diff = True
goal.planning_options.planning_scene_diff.robot_state.is_diff = True
goal.planning_options.plan_only = False
goal.planning_options.replan = True
goal.planning_options.replan_attempts = 10
return goal | [
"def",
"_create_place_goal",
"(",
"self",
",",
"group",
",",
"target",
",",
"places",
")",
":",
"# Create goal:",
"goal",
"=",
"PlaceGoal",
"(",
")",
"goal",
".",
"group_name",
"=",
"group",
"goal",
".",
"attached_object_name",
"=",
"target",
"goal",
".",
"place_locations",
".",
"extend",
"(",
"places",
")",
"# Configure goal planning options:",
"goal",
".",
"allowed_planning_time",
"=",
"5.0",
"goal",
".",
"planning_options",
".",
"planning_scene_diff",
".",
"is_diff",
"=",
"True",
"goal",
".",
"planning_options",
".",
"planning_scene_diff",
".",
"robot_state",
".",
"is_diff",
"=",
"True",
"goal",
".",
"planning_options",
".",
"plan_only",
"=",
"False",
"goal",
".",
"planning_options",
".",
"replan",
"=",
"True",
"goal",
".",
"planning_options",
".",
"replan_attempts",
"=",
"10",
"return",
"goal"
] | https://github.com/qboticslabs/mastering_ros/blob/d83e78f30acc45b0f18522c1d5fae3a7f52974b9/chapter_3_codes/seven_dof_arm_gazebo/scripts/pick_and_place_pick_working.py#L273-L295 | |
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/contrib/learn/python/learn/estimators/estimator.py | python | BaseEstimator.fit | (self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None) | return self | See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`. | See `Trainable`. | [
"See",
"Trainable",
"."
] | def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
input_fn, feed_fn = _get_input_fn(x, y, input_fn, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
loss = self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitors=monitors,
max_steps=max_steps)
logging.info('Loss for final step: %s.', loss)
return self | [
"def",
"fit",
"(",
"self",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"input_fn",
"=",
"None",
",",
"steps",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"monitors",
"=",
"None",
",",
"max_steps",
"=",
"None",
")",
":",
"# pylint: disable=g-doc-args,g-doc-return-or-yield",
"if",
"(",
"steps",
"is",
"not",
"None",
")",
"and",
"(",
"max_steps",
"is",
"not",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'Can not provide both steps and max_steps.'",
")",
"input_fn",
",",
"feed_fn",
"=",
"_get_input_fn",
"(",
"x",
",",
"y",
",",
"input_fn",
",",
"feed_fn",
"=",
"None",
",",
"batch_size",
"=",
"batch_size",
",",
"shuffle",
"=",
"True",
",",
"epochs",
"=",
"None",
")",
"loss",
"=",
"self",
".",
"_train_model",
"(",
"input_fn",
"=",
"input_fn",
",",
"feed_fn",
"=",
"feed_fn",
",",
"steps",
"=",
"steps",
",",
"monitors",
"=",
"monitors",
",",
"max_steps",
"=",
"max_steps",
")",
"logging",
".",
"info",
"(",
"'Loss for final step: %s.'",
",",
"loss",
")",
"return",
"self"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/contrib/learn/python/learn/estimators/estimator.py#L200-L221 | |
martinrotter/textosaurus | 4e2ad75abaf5b7e6a823766a2aa8a30f0c965cb8 | src/libtextosaurus/3rd-party/scintilla/scripts/FileGenerator.py | python | UpdateFile | (filename, updated) | If the file contents are different to updated then copy updated into the
file else leave alone so Mercurial and make don't treat it as modified. | If the file contents are different to updated then copy updated into the
file else leave alone so Mercurial and make don't treat it as modified. | [
"If",
"the",
"file",
"contents",
"are",
"different",
"to",
"updated",
"then",
"copy",
"updated",
"into",
"the",
"file",
"else",
"leave",
"alone",
"so",
"Mercurial",
"and",
"make",
"don",
"t",
"treat",
"it",
"as",
"modified",
"."
] | def UpdateFile(filename, updated):
""" If the file contents are different to updated then copy updated into the
file else leave alone so Mercurial and make don't treat it as modified. """
newOrChanged = "Changed"
try:
with codecs.open(filename, "r", "utf-8") as infile:
original = infile.read()
if updated == original:
# Same as before so don't write
return
os.unlink(filename)
except IOError: # File is not there yet
newOrChanged = "New"
with codecs.open(filename, "w", "utf-8") as outfile:
outfile.write(updated)
print("%s %s" % (newOrChanged, filename)) | [
"def",
"UpdateFile",
"(",
"filename",
",",
"updated",
")",
":",
"newOrChanged",
"=",
"\"Changed\"",
"try",
":",
"with",
"codecs",
".",
"open",
"(",
"filename",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"infile",
":",
"original",
"=",
"infile",
".",
"read",
"(",
")",
"if",
"updated",
"==",
"original",
":",
"# Same as before so don't write",
"return",
"os",
".",
"unlink",
"(",
"filename",
")",
"except",
"IOError",
":",
"# File is not there yet",
"newOrChanged",
"=",
"\"New\"",
"with",
"codecs",
".",
"open",
"(",
"filename",
",",
"\"w\"",
",",
"\"utf-8\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"updated",
")",
"print",
"(",
"\"%s %s\"",
"%",
"(",
"newOrChanged",
",",
"filename",
")",
")"
] | https://github.com/martinrotter/textosaurus/blob/4e2ad75abaf5b7e6a823766a2aa8a30f0c965cb8/src/libtextosaurus/3rd-party/scintilla/scripts/FileGenerator.py#L20-L35 | ||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/utils/generic_utils.py | python | custom_object_scope | (*args) | return CustomObjectScope(*args) | Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
Arguments:
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
Returns:
Object of type `CustomObjectScope`. | Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. | [
"Provides",
"a",
"scope",
"that",
"changes",
"to",
"_GLOBAL_CUSTOM_OBJECTS",
"cannot",
"escape",
"."
] | def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
Arguments:
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
Returns:
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args) | [
"def",
"custom_object_scope",
"(",
"*",
"args",
")",
":",
"return",
"CustomObjectScope",
"(",
"*",
"args",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/utils/generic_utils.py#L77-L104 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/aui.py | python | AuiNotebook.SetTabCtrlHeight | (*args, **kwargs) | return _aui.AuiNotebook_SetTabCtrlHeight(*args, **kwargs) | SetTabCtrlHeight(self, int height) | SetTabCtrlHeight(self, int height) | [
"SetTabCtrlHeight",
"(",
"self",
"int",
"height",
")"
] | def SetTabCtrlHeight(*args, **kwargs):
"""SetTabCtrlHeight(self, int height)"""
return _aui.AuiNotebook_SetTabCtrlHeight(*args, **kwargs) | [
"def",
"SetTabCtrlHeight",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiNotebook_SetTabCtrlHeight",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/aui.py#L1317-L1319 | |
klzgrad/naiveproxy | ed2c513637c77b18721fe428d7ed395b4d284c83 | src/build/fuchsia/binary_sizes.py | python | GetBinarySizesAndBlobs | (args, sizes_config) | return package_sizes, package_blobs | Get binary size data and contained blobs for packages specified in args.
If "total_size_name" is set, then computes a synthetic package size which is
the aggregated sizes across all packages. | Get binary size data and contained blobs for packages specified in args. | [
"Get",
"binary",
"size",
"data",
"and",
"contained",
"blobs",
"for",
"packages",
"specified",
"in",
"args",
"."
] | def GetBinarySizesAndBlobs(args, sizes_config):
"""Get binary size data and contained blobs for packages specified in args.
If "total_size_name" is set, then computes a synthetic package size which is
the aggregated sizes across all packages."""
# Calculate compressed and uncompressed package sizes.
package_blobs = GetPackageBlobs(sizes_config['far_files'], args.build_out_dir)
package_sizes = GetPackageSizes(package_blobs)
# Optionally calculate total compressed and uncompressed package sizes.
if 'far_total_name' in sizes_config:
compressed = sum([a.compressed for a in package_sizes.values()])
uncompressed = sum([a.uncompressed for a in package_sizes.values()])
package_sizes[sizes_config['far_total_name']] = PackageSizes(
compressed, uncompressed)
for name, size in package_sizes.items():
print('%s: compressed size %d, uncompressed size %d' %
(name, size.compressed, size.uncompressed))
return package_sizes, package_blobs | [
"def",
"GetBinarySizesAndBlobs",
"(",
"args",
",",
"sizes_config",
")",
":",
"# Calculate compressed and uncompressed package sizes.",
"package_blobs",
"=",
"GetPackageBlobs",
"(",
"sizes_config",
"[",
"'far_files'",
"]",
",",
"args",
".",
"build_out_dir",
")",
"package_sizes",
"=",
"GetPackageSizes",
"(",
"package_blobs",
")",
"# Optionally calculate total compressed and uncompressed package sizes.",
"if",
"'far_total_name'",
"in",
"sizes_config",
":",
"compressed",
"=",
"sum",
"(",
"[",
"a",
".",
"compressed",
"for",
"a",
"in",
"package_sizes",
".",
"values",
"(",
")",
"]",
")",
"uncompressed",
"=",
"sum",
"(",
"[",
"a",
".",
"uncompressed",
"for",
"a",
"in",
"package_sizes",
".",
"values",
"(",
")",
"]",
")",
"package_sizes",
"[",
"sizes_config",
"[",
"'far_total_name'",
"]",
"]",
"=",
"PackageSizes",
"(",
"compressed",
",",
"uncompressed",
")",
"for",
"name",
",",
"size",
"in",
"package_sizes",
".",
"items",
"(",
")",
":",
"print",
"(",
"'%s: compressed size %d, uncompressed size %d'",
"%",
"(",
"name",
",",
"size",
".",
"compressed",
",",
"size",
".",
"uncompressed",
")",
")",
"return",
"package_sizes",
",",
"package_blobs"
] | https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/fuchsia/binary_sizes.py#L454-L475 | |
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/feature_column/feature_column.py | python | bucketized_column | (source_column, boundaries) | return _BucketizedColumn(source_column, tuple(boundaries)) | Represents discretized dense input.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = numeric_column('price')
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
`bucketized_column` can also be crossed with another categorical column using
`crossed_column`:
```python
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `_BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple. | Represents discretized dense input. | [
"Represents",
"discretized",
"dense",
"input",
"."
] | def bucketized_column(source_column, boundaries):
"""Represents discretized dense input.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = numeric_column('price')
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
`bucketized_column` can also be crossed with another categorical column using
`crossed_column`:
```python
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `_BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, _NumericColumn):
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if (not boundaries or
not (isinstance(boundaries, list) or isinstance(boundaries, tuple))):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return _BucketizedColumn(source_column, tuple(boundaries)) | [
"def",
"bucketized_column",
"(",
"source_column",
",",
"boundaries",
")",
":",
"if",
"not",
"isinstance",
"(",
"source_column",
",",
"_NumericColumn",
")",
":",
"raise",
"ValueError",
"(",
"'source_column must be a column generated with numeric_column(). '",
"'Given: {}'",
".",
"format",
"(",
"source_column",
")",
")",
"if",
"len",
"(",
"source_column",
".",
"shape",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'source_column must be one-dimensional column. '",
"'Given: {}'",
".",
"format",
"(",
"source_column",
")",
")",
"if",
"(",
"not",
"boundaries",
"or",
"not",
"(",
"isinstance",
"(",
"boundaries",
",",
"list",
")",
"or",
"isinstance",
"(",
"boundaries",
",",
"tuple",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"'boundaries must be a sorted list.'",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"boundaries",
")",
"-",
"1",
")",
":",
"if",
"boundaries",
"[",
"i",
"]",
">=",
"boundaries",
"[",
"i",
"+",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'boundaries must be a sorted list.'",
")",
"return",
"_BucketizedColumn",
"(",
"source_column",
",",
"tuple",
"(",
"boundaries",
")",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/feature_column/feature_column.py#L634-L714 | |
gsl-lite/gsl-lite | 4b5e9ab7474841fc2d7efc2e0064ef81785535d1 | script/create-vcpkg.py | python | createPortfile | ( args ) | Create vcpkg portfile | Create vcpkg portfile | [
"Create",
"vcpkg",
"portfile"
] | def createPortfile( args ):
"""Create vcpkg portfile"""
output = tpl_vcpkg_portfile.format(
usr=args.user, prj=args.project, ref=to_ref(args.version), sha=args.sha, lic=cfg_license )
if args.verbose:
print( "Creating portfile '{f}':".format( f=portfile_path( args ) ) )
if args.verbose > 1:
print( output )
os.makedirs( os.path.dirname( portfile_path( args ) ), exist_ok=True )
with open( portfile_path( args ), 'w') as f:
print( output, file=f ) | [
"def",
"createPortfile",
"(",
"args",
")",
":",
"output",
"=",
"tpl_vcpkg_portfile",
".",
"format",
"(",
"usr",
"=",
"args",
".",
"user",
",",
"prj",
"=",
"args",
".",
"project",
",",
"ref",
"=",
"to_ref",
"(",
"args",
".",
"version",
")",
",",
"sha",
"=",
"args",
".",
"sha",
",",
"lic",
"=",
"cfg_license",
")",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"Creating portfile '{f}':\"",
".",
"format",
"(",
"f",
"=",
"portfile_path",
"(",
"args",
")",
")",
")",
"if",
"args",
".",
"verbose",
">",
"1",
":",
"print",
"(",
"output",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"portfile_path",
"(",
"args",
")",
")",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"portfile_path",
"(",
"args",
")",
",",
"'w'",
")",
"as",
"f",
":",
"print",
"(",
"output",
",",
"file",
"=",
"f",
")"
] | https://github.com/gsl-lite/gsl-lite/blob/4b5e9ab7474841fc2d7efc2e0064ef81785535d1/script/create-vcpkg.py#L114-L124 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/dateutil/dateutil/tz/tz.py | python | resolve_imaginary | (dt) | return dt | Given a datetime that may be imaginary, return an existing datetime.
This function assumes that an imaginary datetime represents what the
wall time would be in a zone had the offset transition not occurred, so
it will always fall forward by the transition's change in offset.
.. doctest::
>>> from dateutil import tz
>>> from datetime import datetime
>>> NYC = tz.gettz('America/New_York')
>>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC)))
2017-03-12 03:30:00-04:00
>>> KIR = tz.gettz('Pacific/Kiritimati')
>>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR)))
1995-01-02 12:30:00+14:00
As a note, :func:`datetime.astimezone` is guaranteed to produce a valid,
existing datetime, so a round-trip to and from UTC is sufficient to get
an extant datetime, however, this generally "falls back" to an earlier time
rather than falling forward to the STD side (though no guarantees are made
about this behavior).
:param dt:
A :class:`datetime.datetime` which may or may not exist.
:return:
Returns an existing :class:`datetime.datetime`. If ``dt`` was not
imaginary, the datetime returned is guaranteed to be the same object
passed to the function.
.. versionadded:: 2.7.0 | Given a datetime that may be imaginary, return an existing datetime. | [
"Given",
"a",
"datetime",
"that",
"may",
"be",
"imaginary",
"return",
"an",
"existing",
"datetime",
"."
] | def resolve_imaginary(dt):
"""
Given a datetime that may be imaginary, return an existing datetime.
This function assumes that an imaginary datetime represents what the
wall time would be in a zone had the offset transition not occurred, so
it will always fall forward by the transition's change in offset.
.. doctest::
>>> from dateutil import tz
>>> from datetime import datetime
>>> NYC = tz.gettz('America/New_York')
>>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC)))
2017-03-12 03:30:00-04:00
>>> KIR = tz.gettz('Pacific/Kiritimati')
>>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR)))
1995-01-02 12:30:00+14:00
As a note, :func:`datetime.astimezone` is guaranteed to produce a valid,
existing datetime, so a round-trip to and from UTC is sufficient to get
an extant datetime, however, this generally "falls back" to an earlier time
rather than falling forward to the STD side (though no guarantees are made
about this behavior).
:param dt:
A :class:`datetime.datetime` which may or may not exist.
:return:
Returns an existing :class:`datetime.datetime`. If ``dt`` was not
imaginary, the datetime returned is guaranteed to be the same object
passed to the function.
.. versionadded:: 2.7.0
"""
if dt.tzinfo is not None and not datetime_exists(dt):
curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset()
old_offset = (dt - datetime.timedelta(hours=24)).utcoffset()
dt += curr_offset - old_offset
return dt | [
"def",
"resolve_imaginary",
"(",
"dt",
")",
":",
"if",
"dt",
".",
"tzinfo",
"is",
"not",
"None",
"and",
"not",
"datetime_exists",
"(",
"dt",
")",
":",
"curr_offset",
"=",
"(",
"dt",
"+",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"24",
")",
")",
".",
"utcoffset",
"(",
")",
"old_offset",
"=",
"(",
"dt",
"-",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"24",
")",
")",
".",
"utcoffset",
"(",
")",
"dt",
"+=",
"curr_offset",
"-",
"old_offset",
"return",
"dt"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/dateutil/dateutil/tz/tz.py#L1763-L1806 | |
s9xie/hed | 94fb22f10cbfec8d84fbc0642b224022014b6bd6 | scripts/cpp_lint.py | python | CheckIncludeLine | (filename, clean_lines, linenum, include_state, error) | Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found. | Check rules that are applicable to #include lines. | [
"Check",
"rules",
"that",
"are",
"applicable",
"to",
"#include",
"lines",
"."
] | def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include_dir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.') | [
"def",
"CheckIncludeLine",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"include_state",
",",
"error",
")",
":",
"fileinfo",
"=",
"FileInfo",
"(",
"filename",
")",
"line",
"=",
"clean_lines",
".",
"lines",
"[",
"linenum",
"]",
"# \"include\" should use the new style \"foo/bar.h\" instead of just \"bar.h\"",
"if",
"_RE_PATTERN_INCLUDE_NEW_STYLE",
".",
"search",
"(",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/include_dir'",
",",
"4",
",",
"'Include the directory when naming .h files'",
")",
"# we shouldn't include a file more than once. actually, there are a",
"# handful of instances where doing so is okay, but in general it's",
"# not.",
"match",
"=",
"_RE_PATTERN_INCLUDE",
".",
"search",
"(",
"line",
")",
"if",
"match",
":",
"include",
"=",
"match",
".",
"group",
"(",
"2",
")",
"is_system",
"=",
"(",
"match",
".",
"group",
"(",
"1",
")",
"==",
"'<'",
")",
"if",
"include",
"in",
"include_state",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/include'",
",",
"4",
",",
"'\"%s\" already included at %s:%s'",
"%",
"(",
"include",
",",
"filename",
",",
"include_state",
"[",
"include",
"]",
")",
")",
"else",
":",
"include_state",
"[",
"include",
"]",
"=",
"linenum",
"# We want to ensure that headers appear in the right order:",
"# 1) for foo.cc, foo.h (preferred location)",
"# 2) c system files",
"# 3) cpp system files",
"# 4) for foo.cc, foo.h (deprecated location)",
"# 5) other google headers",
"#",
"# We classify each include statement as one of those 5 types",
"# using a number of techniques. The include_state object keeps",
"# track of the highest type seen, and complains if we see a",
"# lower type after that.",
"error_message",
"=",
"include_state",
".",
"CheckNextIncludeOrder",
"(",
"_ClassifyInclude",
"(",
"fileinfo",
",",
"include",
",",
"is_system",
")",
")",
"if",
"error_message",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/include_order'",
",",
"4",
",",
"'%s. Should be: %s.h, c system, c++ system, other.'",
"%",
"(",
"error_message",
",",
"fileinfo",
".",
"BaseName",
"(",
")",
")",
")",
"canonical_include",
"=",
"include_state",
".",
"CanonicalizeAlphabeticalOrder",
"(",
"include",
")",
"if",
"not",
"include_state",
".",
"IsInAlphabeticalOrder",
"(",
"clean_lines",
",",
"linenum",
",",
"canonical_include",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/include_alpha'",
",",
"4",
",",
"'Include \"%s\" not in alphabetical order'",
"%",
"include",
")",
"include_state",
".",
"SetLastHeader",
"(",
"canonical_include",
")",
"# Look for any of the stream classes that are part of standard C++.",
"match",
"=",
"_RE_PATTERN_INCLUDE",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"include",
"=",
"match",
".",
"group",
"(",
"2",
")",
"if",
"Match",
"(",
"r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$'",
",",
"include",
")",
":",
"# Many unit tests use cout, so we exempt them.",
"if",
"not",
"_IsTestFilename",
"(",
"filename",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/streams'",
",",
"3",
",",
"'Streams are highly discouraged.'",
")"
] | https://github.com/s9xie/hed/blob/94fb22f10cbfec8d84fbc0642b224022014b6bd6/scripts/cpp_lint.py#L3680-L3749 | ||
scribusproject/scribus | 41ec7c775a060912cf251682a8b1437f753f80f4 | scribus/plugins/scriptplugin_py2x/scripts/CalendarWizard.py | python | ScEventCalendar.printMonth | (self, cal, month, week) | Print the month name(s) | Print the month name(s) | [
"Print",
"the",
"month",
"name",
"(",
"s",
")"
] | def printMonth(self, cal, month, week):
""" Print the month name(s) """
if week[6].day < 7:
if (week == cal[len(cal)-1]):
self.createHeader(localization[self.lang][0][month] + self.sepMonths + localization[self.lang][0][(month+1)%12])
elif ((month-1) not in self.months):
self.createHeader(localization[self.lang][0][(month-1)%12] + self.sepMonths + localization[self.lang][0][month])
else:
self.createHeader(localization[self.lang][0][month]) | [
"def",
"printMonth",
"(",
"self",
",",
"cal",
",",
"month",
",",
"week",
")",
":",
"if",
"week",
"[",
"6",
"]",
".",
"day",
"<",
"7",
":",
"if",
"(",
"week",
"==",
"cal",
"[",
"len",
"(",
"cal",
")",
"-",
"1",
"]",
")",
":",
"self",
".",
"createHeader",
"(",
"localization",
"[",
"self",
".",
"lang",
"]",
"[",
"0",
"]",
"[",
"month",
"]",
"+",
"self",
".",
"sepMonths",
"+",
"localization",
"[",
"self",
".",
"lang",
"]",
"[",
"0",
"]",
"[",
"(",
"month",
"+",
"1",
")",
"%",
"12",
"]",
")",
"elif",
"(",
"(",
"month",
"-",
"1",
")",
"not",
"in",
"self",
".",
"months",
")",
":",
"self",
".",
"createHeader",
"(",
"localization",
"[",
"self",
".",
"lang",
"]",
"[",
"0",
"]",
"[",
"(",
"month",
"-",
"1",
")",
"%",
"12",
"]",
"+",
"self",
".",
"sepMonths",
"+",
"localization",
"[",
"self",
".",
"lang",
"]",
"[",
"0",
"]",
"[",
"month",
"]",
")",
"else",
":",
"self",
".",
"createHeader",
"(",
"localization",
"[",
"self",
".",
"lang",
"]",
"[",
"0",
"]",
"[",
"month",
"]",
")"
] | https://github.com/scribusproject/scribus/blob/41ec7c775a060912cf251682a8b1437f753f80f4/scribus/plugins/scriptplugin_py2x/scripts/CalendarWizard.py#L309-L317 | ||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/third_party/boto/boto/rds2/__init__.py | python | connect_to_region | (region_name, **kw_params) | return None | Given a valid region name, return a
:class:`boto.rds2.layer1.RDSConnection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.rds2.layer1.RDSConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given | Given a valid region name, return a
:class:`boto.rds2.layer1.RDSConnection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object. | [
"Given",
"a",
"valid",
"region",
"name",
"return",
"a",
":",
"class",
":",
"boto",
".",
"rds2",
".",
"layer1",
".",
"RDSConnection",
".",
"Any",
"additional",
"parameters",
"after",
"the",
"region_name",
"are",
"passed",
"on",
"to",
"the",
"connect",
"method",
"of",
"the",
"region",
"object",
"."
] | def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.rds2.layer1.RDSConnection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.rds2.layer1.RDSConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None | [
"def",
"connect_to_region",
"(",
"region_name",
",",
"*",
"*",
"kw_params",
")",
":",
"for",
"region",
"in",
"regions",
"(",
")",
":",
"if",
"region",
".",
"name",
"==",
"region_name",
":",
"return",
"region",
".",
"connect",
"(",
"*",
"*",
"kw_params",
")",
"return",
"None"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/rds2/__init__.py#L36-L53 | |
protocolbuffers/protobuf | b5ab0b7a18b7336c60130f4ddb2d97c51792f896 | python/google/protobuf/internal/python_message.py | python | _BytesForNonRepeatedElement | (value, field_number, field_type) | Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor. | Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value. | [
"Returns",
"the",
"number",
"of",
"bytes",
"needed",
"to",
"serialize",
"a",
"non",
"-",
"repeated",
"element",
".",
"The",
"returned",
"byte",
"count",
"includes",
"space",
"for",
"tag",
"information",
"and",
"any",
"other",
"additional",
"space",
"associated",
"with",
"serializing",
"value",
"."
] | def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) | [
"def",
"_BytesForNonRepeatedElement",
"(",
"value",
",",
"field_number",
",",
"field_type",
")",
":",
"try",
":",
"fn",
"=",
"type_checkers",
".",
"TYPE_TO_BYTE_SIZE_FN",
"[",
"field_type",
"]",
"return",
"fn",
"(",
"field_number",
",",
"value",
")",
"except",
"KeyError",
":",
"raise",
"message_mod",
".",
"EncodeError",
"(",
"'Unrecognized field type: %d'",
"%",
"field_type",
")"
] | https://github.com/protocolbuffers/protobuf/blob/b5ab0b7a18b7336c60130f4ddb2d97c51792f896/python/google/protobuf/internal/python_message.py#L1030-L1047 | ||
asLody/whale | 6a661b27cc4cf83b7b5a3b02451597ee1ac7f264 | whale/cpplint.py | python | PrintCategories | () | Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter. | Prints a list of all the error-categories used by error messages. | [
"Prints",
"a",
"list",
"of",
"all",
"the",
"error",
"-",
"categories",
"used",
"by",
"error",
"messages",
"."
] | def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0) | [
"def",
"PrintCategories",
"(",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"''",
".",
"join",
"(",
"' %s\\n'",
"%",
"cat",
"for",
"cat",
"in",
"_ERROR_CATEGORIES",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | https://github.com/asLody/whale/blob/6a661b27cc4cf83b7b5a3b02451597ee1ac7f264/whale/cpplint.py#L6137-L6143 | ||
devpack/android-python27 | d42dd67565e104cf7b0b50eb473f615db3e69901 | python-build-with-qt/PyQt-x11-gpl-4.8/pyuic/uic/driver.py | python | Driver._generate | (self) | Generate the Python code. | Generate the Python code. | [
"Generate",
"the",
"Python",
"code",
"."
] | def _generate(self):
""" Generate the Python code. """
if self._opts.output == "-":
pyfile = sys.stdout
elif sys.hexversion >= 0x03000000:
pyfile = open(self._opts.output, 'wt', encoding='utf8')
else:
pyfile = open(self._opts.output, 'wt')
compileUi(self._ui_file, pyfile, self._opts.execute, self._opts.indent,
self._opts.pyqt3_wrapper, self._opts.from_imports) | [
"def",
"_generate",
"(",
"self",
")",
":",
"if",
"self",
".",
"_opts",
".",
"output",
"==",
"\"-\"",
":",
"pyfile",
"=",
"sys",
".",
"stdout",
"elif",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"pyfile",
"=",
"open",
"(",
"self",
".",
"_opts",
".",
"output",
",",
"'wt'",
",",
"encoding",
"=",
"'utf8'",
")",
"else",
":",
"pyfile",
"=",
"open",
"(",
"self",
".",
"_opts",
".",
"output",
",",
"'wt'",
")",
"compileUi",
"(",
"self",
".",
"_ui_file",
",",
"pyfile",
",",
"self",
".",
"_opts",
".",
"execute",
",",
"self",
".",
"_opts",
".",
"indent",
",",
"self",
".",
"_opts",
".",
"pyqt3_wrapper",
",",
"self",
".",
"_opts",
".",
"from_imports",
")"
] | https://github.com/devpack/android-python27/blob/d42dd67565e104cf7b0b50eb473f615db3e69901/python-build-with-qt/PyQt-x11-gpl-4.8/pyuic/uic/driver.py#L57-L68 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/nntplib.py | python | NNTP.last | (self) | return self._statcmd('LAST') | Process a LAST command. No arguments. Return as for STAT. | Process a LAST command. No arguments. Return as for STAT. | [
"Process",
"a",
"LAST",
"command",
".",
"No",
"arguments",
".",
"Return",
"as",
"for",
"STAT",
"."
] | def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST') | [
"def",
"last",
"(",
"self",
")",
":",
"return",
"self",
".",
"_statcmd",
"(",
"'LAST'",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/nntplib.py#L749-L751 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py2/sklearn/datasets/kddcup99.py | python | fetch_kddcup99 | (subset=None, shuffle=False, random_state=None,
percent10=True, download_if_missing=True) | return Bunch(data=data, target=target) | Load and return the kddcup 99 dataset (classification).
The KDD Cup '99 dataset was created by processing the tcpdump portions
of the 1998 DARPA Intrusion Detection System (IDS) Evaluation dataset,
created by MIT Lincoln Lab [1] . The artificial data was generated using
a closed network and hand-injected attacks to produce a large number of
different types of attack with normal activity in the background.
As the initial goal was to produce a large training set for supervised
learning algorithms, there is a large proportion (80.1%) of abnormal
data which is unrealistic in real world, and inappropriate for unsupervised
anomaly detection which aims at detecting 'abnormal' data, ie
1) qualitatively different from normal data.
2) in large minority among the observations.
We thus transform the KDD Data set into two different data sets: SA and SF.
- SA is obtained by simply selecting all the normal data, and a small
proportion of abnormal data to gives an anomaly proportion of 1%.
- SF is obtained as in [2]
by simply picking up the data whose attribute logged_in is positive, thus
focusing on the intrusion attack, which gives a proportion of 0.3% of
attack.
- http and smtp are two subsets of SF corresponding with third feature
equal to 'http' (resp. to 'smtp')
General KDD structure :
================ ==========================================
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SA structure :
================ ==========================================
Samples total 976158
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SF structure :
================ ==========================================
Samples total 699691
Dimensionality 4
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
http structure :
================ ==========================================
Samples total 619052
Dimensionality 3
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
smtp structure :
================ ==========================================
Samples total 95373
Dimensionality 3
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
.. versionadded:: 0.18
Parameters
----------
subset : None, 'SA', 'SF', 'http', 'smtp'
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
percent10 : bool, default=False
Whether to load only 10 percent of the data.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
References
----------
.. [1] Analysis and Results of the 1999 DARPA Off-Line Intrusion
Detection Evaluation Richard Lippmann, Joshua W. Haines,
David J. Fried, Jonathan Korba, Kumar Das
.. [2] A Geometric Framework for Unsupervised Anomaly Detection: Detecting
Intrusions in Unlabeled Data (2002) by Eleazar Eskin, Andrew Arnold,
Michael Prerau, Leonid Portnoy, Sal Stolfo | Load and return the kddcup 99 dataset (classification). | [
"Load",
"and",
"return",
"the",
"kddcup",
"99",
"dataset",
"(",
"classification",
")",
"."
] | def fetch_kddcup99(subset=None, shuffle=False, random_state=None,
percent10=True, download_if_missing=True):
"""Load and return the kddcup 99 dataset (classification).
The KDD Cup '99 dataset was created by processing the tcpdump portions
of the 1998 DARPA Intrusion Detection System (IDS) Evaluation dataset,
created by MIT Lincoln Lab [1] . The artificial data was generated using
a closed network and hand-injected attacks to produce a large number of
different types of attack with normal activity in the background.
As the initial goal was to produce a large training set for supervised
learning algorithms, there is a large proportion (80.1%) of abnormal
data which is unrealistic in real world, and inappropriate for unsupervised
anomaly detection which aims at detecting 'abnormal' data, ie
1) qualitatively different from normal data.
2) in large minority among the observations.
We thus transform the KDD Data set into two different data sets: SA and SF.
- SA is obtained by simply selecting all the normal data, and a small
proportion of abnormal data to gives an anomaly proportion of 1%.
- SF is obtained as in [2]
by simply picking up the data whose attribute logged_in is positive, thus
focusing on the intrusion attack, which gives a proportion of 0.3% of
attack.
- http and smtp are two subsets of SF corresponding with third feature
equal to 'http' (resp. to 'smtp')
General KDD structure :
================ ==========================================
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SA structure :
================ ==========================================
Samples total 976158
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SF structure :
================ ==========================================
Samples total 699691
Dimensionality 4
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
http structure :
================ ==========================================
Samples total 619052
Dimensionality 3
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
smtp structure :
================ ==========================================
Samples total 95373
Dimensionality 3
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
.. versionadded:: 0.18
Parameters
----------
subset : None, 'SA', 'SF', 'http', 'smtp'
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
percent10 : bool, default=False
Whether to load only 10 percent of the data.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
References
----------
.. [1] Analysis and Results of the 1999 DARPA Off-Line Intrusion
Detection Evaluation Richard Lippmann, Joshua W. Haines,
David J. Fried, Jonathan Korba, Kumar Das
.. [2] A Geometric Framework for Unsupervised Anomaly Detection: Detecting
Intrusions in Unlabeled Data (2002) by Eleazar Eskin, Andrew Arnold,
Michael Prerau, Leonid Portnoy, Sal Stolfo
"""
kddcup99 = _fetch_brute_kddcup99(shuffle=shuffle, percent10=percent10,
download_if_missing=download_if_missing)
data = kddcup99.data
target = kddcup99.target
if subset == 'SA':
s = target == b'normal.'
t = np.logical_not(s)
normal_samples = data[s, :]
normal_targets = target[s]
abnormal_samples = data[t, :]
abnormal_targets = target[t]
n_samples_abnormal = abnormal_samples.shape[0]
# selected abnormal samples:
random_state = check_random_state(random_state)
r = random_state.randint(0, n_samples_abnormal, 3377)
abnormal_samples = abnormal_samples[r]
abnormal_targets = abnormal_targets[r]
data = np.r_[normal_samples, abnormal_samples]
target = np.r_[normal_targets, abnormal_targets]
if subset == 'SF' or subset == 'http' or subset == 'smtp':
# select all samples with positive logged_in attribute:
s = data[:, 11] == 1
data = np.c_[data[s, :11], data[s, 12:]]
target = target[s]
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float))
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float))
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float))
if subset == 'http':
s = data[:, 2] == b'http'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
if subset == 'smtp':
s = data[:, 2] == b'smtp'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
if subset == 'SF':
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
return Bunch(data=data, target=target) | [
"def",
"fetch_kddcup99",
"(",
"subset",
"=",
"None",
",",
"shuffle",
"=",
"False",
",",
"random_state",
"=",
"None",
",",
"percent10",
"=",
"True",
",",
"download_if_missing",
"=",
"True",
")",
":",
"kddcup99",
"=",
"_fetch_brute_kddcup99",
"(",
"shuffle",
"=",
"shuffle",
",",
"percent10",
"=",
"percent10",
",",
"download_if_missing",
"=",
"download_if_missing",
")",
"data",
"=",
"kddcup99",
".",
"data",
"target",
"=",
"kddcup99",
".",
"target",
"if",
"subset",
"==",
"'SA'",
":",
"s",
"=",
"target",
"==",
"b'normal.'",
"t",
"=",
"np",
".",
"logical_not",
"(",
"s",
")",
"normal_samples",
"=",
"data",
"[",
"s",
",",
":",
"]",
"normal_targets",
"=",
"target",
"[",
"s",
"]",
"abnormal_samples",
"=",
"data",
"[",
"t",
",",
":",
"]",
"abnormal_targets",
"=",
"target",
"[",
"t",
"]",
"n_samples_abnormal",
"=",
"abnormal_samples",
".",
"shape",
"[",
"0",
"]",
"# selected abnormal samples:",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"r",
"=",
"random_state",
".",
"randint",
"(",
"0",
",",
"n_samples_abnormal",
",",
"3377",
")",
"abnormal_samples",
"=",
"abnormal_samples",
"[",
"r",
"]",
"abnormal_targets",
"=",
"abnormal_targets",
"[",
"r",
"]",
"data",
"=",
"np",
".",
"r_",
"[",
"normal_samples",
",",
"abnormal_samples",
"]",
"target",
"=",
"np",
".",
"r_",
"[",
"normal_targets",
",",
"abnormal_targets",
"]",
"if",
"subset",
"==",
"'SF'",
"or",
"subset",
"==",
"'http'",
"or",
"subset",
"==",
"'smtp'",
":",
"# select all samples with positive logged_in attribute:",
"s",
"=",
"data",
"[",
":",
",",
"11",
"]",
"==",
"1",
"data",
"=",
"np",
".",
"c_",
"[",
"data",
"[",
"s",
",",
":",
"11",
"]",
",",
"data",
"[",
"s",
",",
"12",
":",
"]",
"]",
"target",
"=",
"target",
"[",
"s",
"]",
"data",
"[",
":",
",",
"0",
"]",
"=",
"np",
".",
"log",
"(",
"(",
"data",
"[",
":",
",",
"0",
"]",
"+",
"0.1",
")",
".",
"astype",
"(",
"float",
")",
")",
"data",
"[",
":",
",",
"4",
"]",
"=",
"np",
".",
"log",
"(",
"(",
"data",
"[",
":",
",",
"4",
"]",
"+",
"0.1",
")",
".",
"astype",
"(",
"float",
")",
")",
"data",
"[",
":",
",",
"5",
"]",
"=",
"np",
".",
"log",
"(",
"(",
"data",
"[",
":",
",",
"5",
"]",
"+",
"0.1",
")",
".",
"astype",
"(",
"float",
")",
")",
"if",
"subset",
"==",
"'http'",
":",
"s",
"=",
"data",
"[",
":",
",",
"2",
"]",
"==",
"b'http'",
"data",
"=",
"data",
"[",
"s",
"]",
"target",
"=",
"target",
"[",
"s",
"]",
"data",
"=",
"np",
".",
"c_",
"[",
"data",
"[",
":",
",",
"0",
"]",
",",
"data",
"[",
":",
",",
"4",
"]",
",",
"data",
"[",
":",
",",
"5",
"]",
"]",
"if",
"subset",
"==",
"'smtp'",
":",
"s",
"=",
"data",
"[",
":",
",",
"2",
"]",
"==",
"b'smtp'",
"data",
"=",
"data",
"[",
"s",
"]",
"target",
"=",
"target",
"[",
"s",
"]",
"data",
"=",
"np",
".",
"c_",
"[",
"data",
"[",
":",
",",
"0",
"]",
",",
"data",
"[",
":",
",",
"4",
"]",
",",
"data",
"[",
":",
",",
"5",
"]",
"]",
"if",
"subset",
"==",
"'SF'",
":",
"data",
"=",
"np",
".",
"c_",
"[",
"data",
"[",
":",
",",
"0",
"]",
",",
"data",
"[",
":",
",",
"2",
"]",
",",
"data",
"[",
":",
",",
"4",
"]",
",",
"data",
"[",
":",
",",
"5",
"]",
"]",
"return",
"Bunch",
"(",
"data",
"=",
"data",
",",
"target",
"=",
"target",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/datasets/kddcup99.py#L42-L212 | |
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/telemetry/telemetry/core/platform/profiler/__init__.py | python | Profiler.WillCloseBrowser | (cls, browser_backend, platform_backend) | Called before the browser is stopped. | Called before the browser is stopped. | [
"Called",
"before",
"the",
"browser",
"is",
"stopped",
"."
] | def WillCloseBrowser(cls, browser_backend, platform_backend):
"""Called before the browser is stopped."""
pass | [
"def",
"WillCloseBrowser",
"(",
"cls",
",",
"browser_backend",
",",
"platform_backend",
")",
":",
"pass"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/telemetry/telemetry/core/platform/profiler/__init__.py#L40-L42 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/mimetypes.py | python | guess_type | (url, strict=True) | return _db.guess_type(url, strict) | Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types. | Guess the type of a file based on its URL. | [
"Guess",
"the",
"type",
"of",
"a",
"file",
"based",
"on",
"its",
"URL",
"."
] | def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
if _db is None:
init()
return _db.guess_type(url, strict) | [
"def",
"guess_type",
"(",
"url",
",",
"strict",
"=",
"True",
")",
":",
"if",
"_db",
"is",
"None",
":",
"init",
"(",
")",
"return",
"_db",
".",
"guess_type",
"(",
"url",
",",
"strict",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/mimetypes.py#L272-L292 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/http/client.py | python | HTTPResponse.getheader | (self, name, default=None) | Returns the value of the header matching *name*.
If there are multiple matching headers, the values are
combined into a single string separated by commas and spaces.
If no matching header is found, returns *default* or None if
the *default* is not specified.
If the headers are unknown, raises http.client.ResponseNotReady. | Returns the value of the header matching *name*. | [
"Returns",
"the",
"value",
"of",
"the",
"header",
"matching",
"*",
"name",
"*",
"."
] | def getheader(self, name, default=None):
'''Returns the value of the header matching *name*.
If there are multiple matching headers, the values are
combined into a single string separated by commas and spaces.
If no matching header is found, returns *default* or None if
the *default* is not specified.
If the headers are unknown, raises http.client.ResponseNotReady.
'''
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers) | [
"def",
"getheader",
"(",
"self",
",",
"name",
",",
"default",
"=",
"None",
")",
":",
"if",
"self",
".",
"headers",
"is",
"None",
":",
"raise",
"ResponseNotReady",
"(",
")",
"headers",
"=",
"self",
".",
"headers",
".",
"get_all",
"(",
"name",
")",
"or",
"default",
"if",
"isinstance",
"(",
"headers",
",",
"str",
")",
"or",
"not",
"hasattr",
"(",
"headers",
",",
"'__iter__'",
")",
":",
"return",
"headers",
"else",
":",
"return",
"', '",
".",
"join",
"(",
"headers",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/http/client.py#L718-L736 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/cython/Cython/Utils.py | python | build_hex_version | (version_string) | return '0x%08X' % hexversion | Parse and translate '4.3a1' into the readable hex representation '0x040300A1' (like PY_VERSION_HEX). | Parse and translate '4.3a1' into the readable hex representation '0x040300A1' (like PY_VERSION_HEX). | [
"Parse",
"and",
"translate",
"4",
".",
"3a1",
"into",
"the",
"readable",
"hex",
"representation",
"0x040300A1",
"(",
"like",
"PY_VERSION_HEX",
")",
"."
] | def build_hex_version(version_string):
"""
Parse and translate '4.3a1' into the readable hex representation '0x040300A1' (like PY_VERSION_HEX).
"""
# First, parse '4.12a1' into [4, 12, 0, 0xA01].
digits = []
release_status = 0xF0
for digit in re.split('([.abrc]+)', version_string):
if digit in ('a', 'b', 'rc'):
release_status = {'a': 0xA0, 'b': 0xB0, 'rc': 0xC0}[digit]
digits = (digits + [0, 0])[:3] # 1.2a1 -> 1.2.0a1
elif digit != '.':
digits.append(int(digit))
digits = (digits + [0] * 3)[:4]
digits[3] += release_status
# Then, build a single hex value, two hex digits per version part.
hexversion = 0
for digit in digits:
hexversion = (hexversion << 8) + digit
return '0x%08X' % hexversion | [
"def",
"build_hex_version",
"(",
"version_string",
")",
":",
"# First, parse '4.12a1' into [4, 12, 0, 0xA01].",
"digits",
"=",
"[",
"]",
"release_status",
"=",
"0xF0",
"for",
"digit",
"in",
"re",
".",
"split",
"(",
"'([.abrc]+)'",
",",
"version_string",
")",
":",
"if",
"digit",
"in",
"(",
"'a'",
",",
"'b'",
",",
"'rc'",
")",
":",
"release_status",
"=",
"{",
"'a'",
":",
"0xA0",
",",
"'b'",
":",
"0xB0",
",",
"'rc'",
":",
"0xC0",
"}",
"[",
"digit",
"]",
"digits",
"=",
"(",
"digits",
"+",
"[",
"0",
",",
"0",
"]",
")",
"[",
":",
"3",
"]",
"# 1.2a1 -> 1.2.0a1",
"elif",
"digit",
"!=",
"'.'",
":",
"digits",
".",
"append",
"(",
"int",
"(",
"digit",
")",
")",
"digits",
"=",
"(",
"digits",
"+",
"[",
"0",
"]",
"*",
"3",
")",
"[",
":",
"4",
"]",
"digits",
"[",
"3",
"]",
"+=",
"release_status",
"# Then, build a single hex value, two hex digits per version part.",
"hexversion",
"=",
"0",
"for",
"digit",
"in",
"digits",
":",
"hexversion",
"=",
"(",
"hexversion",
"<<",
"8",
")",
"+",
"digit",
"return",
"'0x%08X'",
"%",
"hexversion"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/cython/Cython/Utils.py#L428-L449 | |
tcpexmachina/remy | 687b5db29b81df7ae8737889c78b47e7f9788297 | scripts/plot.py | python | BaseRemyCCPerformancePlotGenerator.parse_senderrunner_output | (cls, result) | return norm_score, sender_data, link_ppt_prior | Parses the output of sender-runner to extract the normalized score, and
sender throughputs and delays. Returns a 3-tuple. The first element is the
normalized score from the sender-runner script. The second element is a list
of lists, one list for each sender, each inner list having two elements,
[throughput, delay]. The third element is a list [low, high], being
the link rate range under "prior assumptions". | Parses the output of sender-runner to extract the normalized score, and
sender throughputs and delays. Returns a 3-tuple. The first element is the
normalized score from the sender-runner script. The second element is a list
of lists, one list for each sender, each inner list having two elements,
[throughput, delay]. The third element is a list [low, high], being
the link rate range under "prior assumptions". | [
"Parses",
"the",
"output",
"of",
"sender",
"-",
"runner",
"to",
"extract",
"the",
"normalized",
"score",
"and",
"sender",
"throughputs",
"and",
"delays",
".",
"Returns",
"a",
"3",
"-",
"tuple",
".",
"The",
"first",
"element",
"is",
"the",
"normalized",
"score",
"from",
"the",
"sender",
"-",
"runner",
"script",
".",
"The",
"second",
"element",
"is",
"a",
"list",
"of",
"lists",
"one",
"list",
"for",
"each",
"sender",
"each",
"inner",
"list",
"having",
"two",
"elements",
"[",
"throughput",
"delay",
"]",
".",
"The",
"third",
"element",
"is",
"a",
"list",
"[",
"low",
"high",
"]",
"being",
"the",
"link",
"rate",
"range",
"under",
"prior",
"assumptions",
"."
] | def parse_senderrunner_output(cls, result):
"""Parses the output of sender-runner to extract the normalized score, and
sender throughputs and delays. Returns a 3-tuple. The first element is the
normalized score from the sender-runner script. The second element is a list
of lists, one list for each sender, each inner list having two elements,
[throughput, delay]. The third element is a list [low, high], being
the link rate range under "prior assumptions"."""
norm_matches = cls.NORM_SCORE_REGEX.findall(result)
if len(norm_matches) != 1:
print(result)
raise RuntimeError("Found no or duplicate normalized scores in this output.")
norm_score = float(norm_matches[0])
sender_matches = cls.SENDER_REGEX.findall(result)
sender_data = [map(float, x) for x in sender_matches] # [[throughput, delay], [throughput, delay], ...]
if len(sender_data) == 0:
print(result)
warn("No senders found in this output.")
link_ppt_prior_matches = cls.LINK_PPT_PRIOR_REGEX.findall(result)
if len(link_ppt_prior_matches) != 1:
print(result)
raise RuntimeError("Found no or duplicate link packets per ms prior assumptions in this output.")
link_ppt_prior = tuple(map(float, link_ppt_prior_matches[0]))
# Divide norm_score the number of senders (sender-runner returns the sum)
norm_score /= len(sender_data)
return norm_score, sender_data, link_ppt_prior | [
"def",
"parse_senderrunner_output",
"(",
"cls",
",",
"result",
")",
":",
"norm_matches",
"=",
"cls",
".",
"NORM_SCORE_REGEX",
".",
"findall",
"(",
"result",
")",
"if",
"len",
"(",
"norm_matches",
")",
"!=",
"1",
":",
"print",
"(",
"result",
")",
"raise",
"RuntimeError",
"(",
"\"Found no or duplicate normalized scores in this output.\"",
")",
"norm_score",
"=",
"float",
"(",
"norm_matches",
"[",
"0",
"]",
")",
"sender_matches",
"=",
"cls",
".",
"SENDER_REGEX",
".",
"findall",
"(",
"result",
")",
"sender_data",
"=",
"[",
"map",
"(",
"float",
",",
"x",
")",
"for",
"x",
"in",
"sender_matches",
"]",
"# [[throughput, delay], [throughput, delay], ...]",
"if",
"len",
"(",
"sender_data",
")",
"==",
"0",
":",
"print",
"(",
"result",
")",
"warn",
"(",
"\"No senders found in this output.\"",
")",
"link_ppt_prior_matches",
"=",
"cls",
".",
"LINK_PPT_PRIOR_REGEX",
".",
"findall",
"(",
"result",
")",
"if",
"len",
"(",
"link_ppt_prior_matches",
")",
"!=",
"1",
":",
"print",
"(",
"result",
")",
"raise",
"RuntimeError",
"(",
"\"Found no or duplicate link packets per ms prior assumptions in this output.\"",
")",
"link_ppt_prior",
"=",
"tuple",
"(",
"map",
"(",
"float",
",",
"link_ppt_prior_matches",
"[",
"0",
"]",
")",
")",
"# Divide norm_score the number of senders (sender-runner returns the sum)",
"norm_score",
"/=",
"len",
"(",
"sender_data",
")",
"return",
"norm_score",
",",
"sender_data",
",",
"link_ppt_prior"
] | https://github.com/tcpexmachina/remy/blob/687b5db29b81df7ae8737889c78b47e7f9788297/scripts/plot.py#L116-L145 | |
mapeditor/tiled | b7abf3c9606aa53442bab8fc6a44a1b2797226e0 | src/plugins/python/scripts/pk2.py | python | PK2MAPLAYER.findBounds | (self) | return mx, my, mw, mh | find bounding box for coords that have tiles | find bounding box for coords that have tiles | [
"find",
"bounding",
"box",
"for",
"coords",
"that",
"have",
"tiles"
] | def findBounds(self):
"find bounding box for coords that have tiles"
mx,my,mw,mh = None,None,10,10
for y in range(self.ly, self.ly+self.height()):
for x in range(self.lx, self.lx+self.width()):
if self.layer[x + y * self.MAXW] != 255:
if not my: my = y
if not mx or x < mx: mx = x
if x > mw: mw = x
if y > mh: mh = y
if not mx: mx = 0
if not my: my = 0
return mx, my, mw, mh | [
"def",
"findBounds",
"(",
"self",
")",
":",
"mx",
",",
"my",
",",
"mw",
",",
"mh",
"=",
"None",
",",
"None",
",",
"10",
",",
"10",
"for",
"y",
"in",
"range",
"(",
"self",
".",
"ly",
",",
"self",
".",
"ly",
"+",
"self",
".",
"height",
"(",
")",
")",
":",
"for",
"x",
"in",
"range",
"(",
"self",
".",
"lx",
",",
"self",
".",
"lx",
"+",
"self",
".",
"width",
"(",
")",
")",
":",
"if",
"self",
".",
"layer",
"[",
"x",
"+",
"y",
"*",
"self",
".",
"MAXW",
"]",
"!=",
"255",
":",
"if",
"not",
"my",
":",
"my",
"=",
"y",
"if",
"not",
"mx",
"or",
"x",
"<",
"mx",
":",
"mx",
"=",
"x",
"if",
"x",
">",
"mw",
":",
"mw",
"=",
"x",
"if",
"y",
">",
"mh",
":",
"mh",
"=",
"y",
"if",
"not",
"mx",
":",
"mx",
"=",
"0",
"if",
"not",
"my",
":",
"my",
"=",
"0",
"return",
"mx",
",",
"my",
",",
"mw",
",",
"mh"
] | https://github.com/mapeditor/tiled/blob/b7abf3c9606aa53442bab8fc6a44a1b2797226e0/src/plugins/python/scripts/pk2.py#L209-L223 | |
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | tools/generate_stubs/generate_stubs.py | python | PosixStubWriter.UninitializeModuleName | (cls, module_name) | return 'Uninitialize%s' % PosixStubWriter.CStyleIdentifier(module_name) | Gets the name of the function that uninitializes this module.
The name is in the format UninitializeModule. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the uninitialization function. | Gets the name of the function that uninitializes this module. | [
"Gets",
"the",
"name",
"of",
"the",
"function",
"that",
"uninitializes",
"this",
"module",
"."
] | def UninitializeModuleName(cls, module_name):
"""Gets the name of the function that uninitializes this module.
The name is in the format UninitializeModule. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the uninitialization function.
"""
return 'Uninitialize%s' % PosixStubWriter.CStyleIdentifier(module_name) | [
"def",
"UninitializeModuleName",
"(",
"cls",
",",
"module_name",
")",
":",
"return",
"'Uninitialize%s'",
"%",
"PosixStubWriter",
".",
"CStyleIdentifier",
"(",
"module_name",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/generate_stubs/generate_stubs.py#L601-L613 | |
apiaryio/drafter | 4634ebd07f6c6f257cc656598ccd535492fdfb55 | tools/gyp/pylib/gyp/MSVSUtil.py | python | ShardTargets | (target_list, target_dicts) | return (new_target_list, new_target_dicts) | Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs. | Shard some targets apart to work around the linkers limits. | [
"Shard",
"some",
"targets",
"apart",
"to",
"work",
"around",
"the",
"linkers",
"limits",
"."
] | def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in sorted(new_target_dicts):
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts) | [
"def",
"ShardTargets",
"(",
"target_list",
",",
"target_dicts",
")",
":",
"# Gather the targets to shard, and how many pieces.",
"targets_to_shard",
"=",
"{",
"}",
"for",
"t",
"in",
"target_dicts",
":",
"shards",
"=",
"int",
"(",
"target_dicts",
"[",
"t",
"]",
".",
"get",
"(",
"'msvs_shard'",
",",
"0",
")",
")",
"if",
"shards",
":",
"targets_to_shard",
"[",
"t",
"]",
"=",
"shards",
"# Shard target_list.",
"new_target_list",
"=",
"[",
"]",
"for",
"t",
"in",
"target_list",
":",
"if",
"t",
"in",
"targets_to_shard",
":",
"for",
"i",
"in",
"range",
"(",
"targets_to_shard",
"[",
"t",
"]",
")",
":",
"new_target_list",
".",
"append",
"(",
"_ShardName",
"(",
"t",
",",
"i",
")",
")",
"else",
":",
"new_target_list",
".",
"append",
"(",
"t",
")",
"# Shard target_dict.",
"new_target_dicts",
"=",
"{",
"}",
"for",
"t",
"in",
"target_dicts",
":",
"if",
"t",
"in",
"targets_to_shard",
":",
"for",
"i",
"in",
"range",
"(",
"targets_to_shard",
"[",
"t",
"]",
")",
":",
"name",
"=",
"_ShardName",
"(",
"t",
",",
"i",
")",
"new_target_dicts",
"[",
"name",
"]",
"=",
"copy",
".",
"copy",
"(",
"target_dicts",
"[",
"t",
"]",
")",
"new_target_dicts",
"[",
"name",
"]",
"[",
"'target_name'",
"]",
"=",
"_ShardName",
"(",
"new_target_dicts",
"[",
"name",
"]",
"[",
"'target_name'",
"]",
",",
"i",
")",
"sources",
"=",
"new_target_dicts",
"[",
"name",
"]",
".",
"get",
"(",
"'sources'",
",",
"[",
"]",
")",
"new_sources",
"=",
"[",
"]",
"for",
"pos",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"sources",
")",
",",
"targets_to_shard",
"[",
"t",
"]",
")",
":",
"new_sources",
".",
"append",
"(",
"sources",
"[",
"pos",
"]",
")",
"new_target_dicts",
"[",
"name",
"]",
"[",
"'sources'",
"]",
"=",
"new_sources",
"else",
":",
"new_target_dicts",
"[",
"t",
"]",
"=",
"target_dicts",
"[",
"t",
"]",
"# Shard dependencies.",
"for",
"t",
"in",
"sorted",
"(",
"new_target_dicts",
")",
":",
"for",
"deptype",
"in",
"(",
"'dependencies'",
",",
"'dependencies_original'",
")",
":",
"dependencies",
"=",
"copy",
".",
"copy",
"(",
"new_target_dicts",
"[",
"t",
"]",
".",
"get",
"(",
"deptype",
",",
"[",
"]",
")",
")",
"new_dependencies",
"=",
"[",
"]",
"for",
"d",
"in",
"dependencies",
":",
"if",
"d",
"in",
"targets_to_shard",
":",
"for",
"i",
"in",
"range",
"(",
"targets_to_shard",
"[",
"d",
"]",
")",
":",
"new_dependencies",
".",
"append",
"(",
"_ShardName",
"(",
"d",
",",
"i",
")",
")",
"else",
":",
"new_dependencies",
".",
"append",
"(",
"d",
")",
"new_target_dicts",
"[",
"t",
"]",
"[",
"deptype",
"]",
"=",
"new_dependencies",
"return",
"(",
"new_target_list",
",",
"new_target_dicts",
")"
] | https://github.com/apiaryio/drafter/blob/4634ebd07f6c6f257cc656598ccd535492fdfb55/tools/gyp/pylib/gyp/MSVSUtil.py#L73-L125 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/aui.py | python | AuiPaneInfo.Direction | (*args, **kwargs) | return _aui.AuiPaneInfo_Direction(*args, **kwargs) | Direction(self, int direction) -> AuiPaneInfo | Direction(self, int direction) -> AuiPaneInfo | [
"Direction",
"(",
"self",
"int",
"direction",
")",
"-",
">",
"AuiPaneInfo"
] | def Direction(*args, **kwargs):
"""Direction(self, int direction) -> AuiPaneInfo"""
return _aui.AuiPaneInfo_Direction(*args, **kwargs) | [
"def",
"Direction",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiPaneInfo_Direction",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/aui.py#L373-L375 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/pathlib.py | python | PurePath.is_absolute | (self) | return not self._flavour.has_drv or bool(self._drv) | True if the path is absolute (has both a root and, if applicable,
a drive). | True if the path is absolute (has both a root and, if applicable,
a drive). | [
"True",
"if",
"the",
"path",
"is",
"absolute",
"(",
"has",
"both",
"a",
"root",
"and",
"if",
"applicable",
"a",
"drive",
")",
"."
] | def is_absolute(self):
"""True if the path is absolute (has both a root and, if applicable,
a drive)."""
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv) | [
"def",
"is_absolute",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_root",
":",
"return",
"False",
"return",
"not",
"self",
".",
"_flavour",
".",
"has_drv",
"or",
"bool",
"(",
"self",
".",
"_drv",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/pathlib.py#L1001-L1006 | |
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/contrib/distributions/python/ops/categorical.py | python | Categorical.allow_nan_stats | (self) | return self._allow_nan_stats | Boolean describing behavior when a stat is undefined for batch member. | Boolean describing behavior when a stat is undefined for batch member. | [
"Boolean",
"describing",
"behavior",
"when",
"a",
"stat",
"is",
"undefined",
"for",
"batch",
"member",
"."
] | def allow_nan_stats(self):
"""Boolean describing behavior when a stat is undefined for batch member."""
return self._allow_nan_stats | [
"def",
"allow_nan_stats",
"(",
"self",
")",
":",
"return",
"self",
".",
"_allow_nan_stats"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/distributions/python/ops/categorical.py#L74-L76 | |
PrincetonUniversity/athena-public-version | 9c266692b9423743d8e23509b3ab266a232a92d2 | tst/style/cpplint.py | python | _VerboseLevel | () | return _cpplint_state.verbose_level | Returns the module's verbosity setting. | Returns the module's verbosity setting. | [
"Returns",
"the",
"module",
"s",
"verbosity",
"setting",
"."
] | def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level | [
"def",
"_VerboseLevel",
"(",
")",
":",
"return",
"_cpplint_state",
".",
"verbose_level"
] | https://github.com/PrincetonUniversity/athena-public-version/blob/9c266692b9423743d8e23509b3ab266a232a92d2/tst/style/cpplint.py#L1190-L1192 | |
Tencent/mars | 54969ba56b402a622db123e780a4f760b38c5c36 | mars/lint/cpplint.py | python | CheckAltTokens | (filename, clean_lines, linenum, error) | Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Check alternative keywords being used in boolean expressions. | [
"Check",
"alternative",
"keywords",
"being",
"used",
"in",
"boolean",
"expressions",
"."
] | def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) | [
"def",
"CheckAltTokens",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# Avoid preprocessor lines",
"if",
"Match",
"(",
"r'^\\s*#'",
",",
"line",
")",
":",
"return",
"# Last ditch effort to avoid multi-line comments. This will not help",
"# if the comment started before the current line or ended after the",
"# current line, but it catches most of the false positives. At least,",
"# it provides a way to workaround this warning for people who use",
"# multi-line comments in preprocessor macros.",
"#",
"# TODO(unknown): remove this once cpplint has better support for",
"# multi-line comments.",
"if",
"line",
".",
"find",
"(",
"'/*'",
")",
">=",
"0",
"or",
"line",
".",
"find",
"(",
"'*/'",
")",
">=",
"0",
":",
"return",
"for",
"match",
"in",
"_ALT_TOKEN_REPLACEMENT_PATTERN",
".",
"finditer",
"(",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/alt_tokens'",
",",
"2",
",",
"'Use operator %s instead of %s'",
"%",
"(",
"_ALT_TOKEN_REPLACEMENT",
"[",
"match",
".",
"group",
"(",
"1",
")",
"]",
",",
"match",
".",
"group",
"(",
"1",
")",
")",
")"
] | https://github.com/Tencent/mars/blob/54969ba56b402a622db123e780a4f760b38c5c36/mars/lint/cpplint.py#L4324-L4353 | ||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/fractions.py | python | Fraction.__neg__ | (a) | return Fraction(-a._numerator, a._denominator) | -a | -a | [
"-",
"a"
] | def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator) | [
"def",
"__neg__",
"(",
"a",
")",
":",
"return",
"Fraction",
"(",
"-",
"a",
".",
"_numerator",
",",
"a",
".",
"_denominator",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/fractions.py#L493-L495 | |
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Source/ThirdParty/CEF3/cef_source/tools/cef_parser.py | python | obj_analysis.is_result_map_multi | (self) | return (self.result_type == 'multimap') | Returns true if this is a multi map type. | Returns true if this is a multi map type. | [
"Returns",
"true",
"if",
"this",
"is",
"a",
"multi",
"map",
"type",
"."
] | def is_result_map_multi(self):
""" Returns true if this is a multi map type. """
return (self.result_type == 'multimap') | [
"def",
"is_result_map_multi",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"result_type",
"==",
"'multimap'",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Source/ThirdParty/CEF3/cef_source/tools/cef_parser.py#L1966-L1968 | |
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/protobuf/python/google/protobuf/internal/well_known_types.py | python | Duration.FromMilliseconds | (self, millis) | Converts milliseconds to Duration. | Converts milliseconds to Duration. | [
"Converts",
"milliseconds",
"to",
"Duration",
"."
] | def FromMilliseconds(self, millis):
"""Converts milliseconds to Duration."""
self._NormalizeDuration(
millis // _MILLIS_PER_SECOND,
(millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND) | [
"def",
"FromMilliseconds",
"(",
"self",
",",
"millis",
")",
":",
"self",
".",
"_NormalizeDuration",
"(",
"millis",
"//",
"_MILLIS_PER_SECOND",
",",
"(",
"millis",
"%",
"_MILLIS_PER_SECOND",
")",
"*",
"_NANOS_PER_MILLISECOND",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/protobuf/python/google/protobuf/internal/well_known_types.py#L330-L334 | ||
runtimejs/runtime | 0a6e84c30823d35a4548d6634166784260ae7b74 | deps/v8/tools/stats-viewer.py | python | ChromeCounter.Name | (self) | return result | Return the ascii name of this counter. | Return the ascii name of this counter. | [
"Return",
"the",
"ascii",
"name",
"of",
"this",
"counter",
"."
] | def Name(self):
"""Return the ascii name of this counter."""
result = ""
index = self.name_offset
current = self.data.ByteAt(index)
while current:
result += chr(current)
index += 1
current = self.data.ByteAt(index)
return result | [
"def",
"Name",
"(",
"self",
")",
":",
"result",
"=",
"\"\"",
"index",
"=",
"self",
".",
"name_offset",
"current",
"=",
"self",
".",
"data",
".",
"ByteAt",
"(",
"index",
")",
"while",
"current",
":",
"result",
"+=",
"chr",
"(",
"current",
")",
"index",
"+=",
"1",
"current",
"=",
"self",
".",
"data",
".",
"ByteAt",
"(",
"index",
")",
"return",
"result"
] | https://github.com/runtimejs/runtime/blob/0a6e84c30823d35a4548d6634166784260ae7b74/deps/v8/tools/stats-viewer.py#L402-L411 | |
pristineio/webrtc-mirror | 7a5bcdffaab90a05bc1146b2b1ea71c004e54d71 | tools_webrtc/valgrind/common.py | python | NormalizeWindowsPath | (path) | If we're using Cygwin Python, turn the path into a Windows path.
Don't turn forward slashes into backslashes for easier copy-pasting and
escaping.
TODO(rnk): If we ever want to cut out the subprocess invocation, we can use
_winreg to get the root Cygwin directory from the registry key:
HKEY_LOCAL_MACHINE\SOFTWARE\Cygwin\setup\rootdir. | If we're using Cygwin Python, turn the path into a Windows path. | [
"If",
"we",
"re",
"using",
"Cygwin",
"Python",
"turn",
"the",
"path",
"into",
"a",
"Windows",
"path",
"."
] | def NormalizeWindowsPath(path):
"""If we're using Cygwin Python, turn the path into a Windows path.
Don't turn forward slashes into backslashes for easier copy-pasting and
escaping.
TODO(rnk): If we ever want to cut out the subprocess invocation, we can use
_winreg to get the root Cygwin directory from the registry key:
HKEY_LOCAL_MACHINE\SOFTWARE\Cygwin\setup\rootdir.
"""
if sys.platform.startswith("cygwin"):
p = subprocess.Popen(["cygpath", "-m", path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
if err:
logging.warning("WARNING: cygpath error: %s", err)
return out.strip()
else:
return path | [
"def",
"NormalizeWindowsPath",
"(",
"path",
")",
":",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"cygwin\"",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"cygpath\"",
",",
"\"-m\"",
",",
"path",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"(",
"out",
",",
"err",
")",
"=",
"p",
".",
"communicate",
"(",
")",
"if",
"err",
":",
"logging",
".",
"warning",
"(",
"\"WARNING: cygpath error: %s\"",
",",
"err",
")",
"return",
"out",
".",
"strip",
"(",
")",
"else",
":",
"return",
"path"
] | https://github.com/pristineio/webrtc-mirror/blob/7a5bcdffaab90a05bc1146b2b1ea71c004e54d71/tools_webrtc/valgrind/common.py#L214-L233 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_misc.py | python | FileTypeInfo.SetIcon | (*args, **kwargs) | return _misc_.FileTypeInfo_SetIcon(*args, **kwargs) | SetIcon(self, String iconFile, int iconIndex=0) | SetIcon(self, String iconFile, int iconIndex=0) | [
"SetIcon",
"(",
"self",
"String",
"iconFile",
"int",
"iconIndex",
"=",
"0",
")"
] | def SetIcon(*args, **kwargs):
"""SetIcon(self, String iconFile, int iconIndex=0)"""
return _misc_.FileTypeInfo_SetIcon(*args, **kwargs) | [
"def",
"SetIcon",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"FileTypeInfo_SetIcon",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L2507-L2509 | |
asLody/whale | 6a661b27cc4cf83b7b5a3b02451597ee1ac7f264 | whale/cpplint.py | python | NestingState.InNamespaceBody | (self) | return self.stack and isinstance(self.stack[-1], _NamespaceInfo) | Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise. | Check if we are currently one level inside a namespace body. | [
"Check",
"if",
"we",
"are",
"currently",
"one",
"level",
"inside",
"a",
"namespace",
"body",
"."
] | def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo) | [
"def",
"InNamespaceBody",
"(",
"self",
")",
":",
"return",
"self",
".",
"stack",
"and",
"isinstance",
"(",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
",",
"_NamespaceInfo",
")"
] | https://github.com/asLody/whale/blob/6a661b27cc4cf83b7b5a3b02451597ee1ac7f264/whale/cpplint.py#L2441-L2447 | |
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/cr/cr/actions/debugger.py | python | Debugger.Attach | (self, context, targets, arguments) | Attach a debugger to a running program. | Attach a debugger to a running program. | [
"Attach",
"a",
"debugger",
"to",
"a",
"running",
"program",
"."
] | def Attach(self, context, targets, arguments):
"""Attach a debugger to a running program."""
raise NotImplementedError('Must be overridden.') | [
"def",
"Attach",
"(",
"self",
",",
"context",
",",
"targets",
",",
"arguments",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Must be overridden.'",
")"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/cr/cr/actions/debugger.py#L49-L51 | ||
llvm-dcpu16/llvm-dcpu16 | ae6b01fecd03219677e391d4421df5d966d80dcf | utils/lint/common_lint.py | python | VerifyLineLength | (filename, lines, max_length) | return lint | Checks to make sure the file has no lines with lines exceeding the length
limit.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
max_length: maximum acceptable line length as number
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found. | Checks to make sure the file has no lines with lines exceeding the length
limit. | [
"Checks",
"to",
"make",
"sure",
"the",
"file",
"has",
"no",
"lines",
"with",
"lines",
"exceeding",
"the",
"length",
"limit",
"."
] | def VerifyLineLength(filename, lines, max_length):
"""Checks to make sure the file has no lines with lines exceeding the length
limit.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
max_length: maximum acceptable line length as number
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
line_num = 1
for line in lines:
length = len(line.rstrip('\n'))
if length > max_length:
lint.append((filename, line_num,
'Line exceeds %d chars (%d)' % (max_length, length)))
line_num += 1
return lint | [
"def",
"VerifyLineLength",
"(",
"filename",
",",
"lines",
",",
"max_length",
")",
":",
"lint",
"=",
"[",
"]",
"line_num",
"=",
"1",
"for",
"line",
"in",
"lines",
":",
"length",
"=",
"len",
"(",
"line",
".",
"rstrip",
"(",
"'\\n'",
")",
")",
"if",
"length",
">",
"max_length",
":",
"lint",
".",
"append",
"(",
"(",
"filename",
",",
"line_num",
",",
"'Line exceeds %d chars (%d)'",
"%",
"(",
"max_length",
",",
"length",
")",
")",
")",
"line_num",
"+=",
"1",
"return",
"lint"
] | https://github.com/llvm-dcpu16/llvm-dcpu16/blob/ae6b01fecd03219677e391d4421df5d966d80dcf/utils/lint/common_lint.py#L7-L28 | |
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/python/turicreate/data_structures/sarray.py | python | SArray.sort | (self, ascending=True) | return sf.sort("a", ascending)["a"] | Sort all values in this SArray.
Sort only works for sarray of type str, int and float, otherwise TypeError
will be raised. Creates a new, sorted SArray.
Parameters
----------
ascending: boolean, optional
If true, the sarray values are sorted in ascending order, otherwise,
descending order.
Returns
-------
out: SArray
Examples
--------
>>> sa = SArray([3,2,1])
>>> sa.sort()
dtype: int
Rows: 3
[1, 2, 3] | Sort all values in this SArray. | [
"Sort",
"all",
"values",
"in",
"this",
"SArray",
"."
] | def sort(self, ascending=True):
"""
Sort all values in this SArray.
Sort only works for sarray of type str, int and float, otherwise TypeError
will be raised. Creates a new, sorted SArray.
Parameters
----------
ascending: boolean, optional
If true, the sarray values are sorted in ascending order, otherwise,
descending order.
Returns
-------
out: SArray
Examples
--------
>>> sa = SArray([3,2,1])
>>> sa.sort()
dtype: int
Rows: 3
[1, 2, 3]
"""
from .sframe import SFrame as _SFrame
if self.dtype not in (int, float, str, datetime.datetime):
raise TypeError(
"Only sarray with type (int, float, str, datetime.datetime) can be sorted"
)
sf = _SFrame()
sf["a"] = self
return sf.sort("a", ascending)["a"] | [
"def",
"sort",
"(",
"self",
",",
"ascending",
"=",
"True",
")",
":",
"from",
".",
"sframe",
"import",
"SFrame",
"as",
"_SFrame",
"if",
"self",
".",
"dtype",
"not",
"in",
"(",
"int",
",",
"float",
",",
"str",
",",
"datetime",
".",
"datetime",
")",
":",
"raise",
"TypeError",
"(",
"\"Only sarray with type (int, float, str, datetime.datetime) can be sorted\"",
")",
"sf",
"=",
"_SFrame",
"(",
")",
"sf",
"[",
"\"a\"",
"]",
"=",
"self",
"return",
"sf",
".",
"sort",
"(",
"\"a\"",
",",
"ascending",
")",
"[",
"\"a\"",
"]"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/data_structures/sarray.py#L3634-L3667 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/stc.py | python | StyledTextCtrl.GetModify | (*args, **kwargs) | return _stc.StyledTextCtrl_GetModify(*args, **kwargs) | GetModify(self) -> bool
Is the document different from when it was last saved? | GetModify(self) -> bool | [
"GetModify",
"(",
"self",
")",
"-",
">",
"bool"
] | def GetModify(*args, **kwargs):
"""
GetModify(self) -> bool
Is the document different from when it was last saved?
"""
return _stc.StyledTextCtrl_GetModify(*args, **kwargs) | [
"def",
"GetModify",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_GetModify",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/stc.py#L3569-L3575 | |
CleverRaven/Cataclysm-DDA | 03e7363df0835ec1b39da973ea29f26f27833b38 | tools/gfx_tools/compose.py | python | write_to_json | (
pathname: str,
data: Union[dict, list],
format_json: bool = False,
) | Write data to a JSON file | Write data to a JSON file | [
"Write",
"data",
"to",
"a",
"JSON",
"file"
] | def write_to_json(
pathname: str,
data: Union[dict, list],
format_json: bool = False,
) -> None:
'''
Write data to a JSON file
'''
kwargs = {
'ensure_ascii': False,
}
if format_json:
kwargs['indent'] = 2
with open(pathname, 'w', encoding="utf-8") as file:
json.dump(data, file, **kwargs)
if not format_json:
return
json_formatter = Path('tools/format/json_formatter.cgi')
if json_formatter.is_file():
cmd = [json_formatter, pathname]
subprocess.call(cmd)
else:
log.warning(
'%s not found, Python built-in formatter was used.',
json_formatter) | [
"def",
"write_to_json",
"(",
"pathname",
":",
"str",
",",
"data",
":",
"Union",
"[",
"dict",
",",
"list",
"]",
",",
"format_json",
":",
"bool",
"=",
"False",
",",
")",
"->",
"None",
":",
"kwargs",
"=",
"{",
"'ensure_ascii'",
":",
"False",
",",
"}",
"if",
"format_json",
":",
"kwargs",
"[",
"'indent'",
"]",
"=",
"2",
"with",
"open",
"(",
"pathname",
",",
"'w'",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"file",
":",
"json",
".",
"dump",
"(",
"data",
",",
"file",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"format_json",
":",
"return",
"json_formatter",
"=",
"Path",
"(",
"'tools/format/json_formatter.cgi'",
")",
"if",
"json_formatter",
".",
"is_file",
"(",
")",
":",
"cmd",
"=",
"[",
"json_formatter",
",",
"pathname",
"]",
"subprocess",
".",
"call",
"(",
"cmd",
")",
"else",
":",
"log",
".",
"warning",
"(",
"'%s not found, Python built-in formatter was used.'",
",",
"json_formatter",
")"
] | https://github.com/CleverRaven/Cataclysm-DDA/blob/03e7363df0835ec1b39da973ea29f26f27833b38/tools/gfx_tools/compose.py#L157-L184 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/lib2to3/fixes/fix_metaclass.py | python | fixup_simple_stmt | (parent, i, stmt_node) | if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything after the semi-colon into its own simple_stmt node | if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything after the semi-colon into its own simple_stmt node | [
"if",
"there",
"is",
"a",
"semi",
"-",
"colon",
"all",
"the",
"parts",
"count",
"as",
"part",
"of",
"the",
"same",
"simple_stmt",
".",
"We",
"just",
"want",
"the",
"__metaclass__",
"part",
"so",
"we",
"move",
"everything",
"after",
"the",
"semi",
"-",
"colon",
"into",
"its",
"own",
"simple_stmt",
"node"
] | def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything after the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix | [
"def",
"fixup_simple_stmt",
"(",
"parent",
",",
"i",
",",
"stmt_node",
")",
":",
"for",
"semi_ind",
",",
"node",
"in",
"enumerate",
"(",
"stmt_node",
".",
"children",
")",
":",
"if",
"node",
".",
"type",
"==",
"token",
".",
"SEMI",
":",
"# *sigh*",
"break",
"else",
":",
"return",
"node",
".",
"remove",
"(",
")",
"# kill the semicolon",
"new_expr",
"=",
"Node",
"(",
"syms",
".",
"expr_stmt",
",",
"[",
"]",
")",
"new_stmt",
"=",
"Node",
"(",
"syms",
".",
"simple_stmt",
",",
"[",
"new_expr",
"]",
")",
"while",
"stmt_node",
".",
"children",
"[",
"semi_ind",
":",
"]",
":",
"move_node",
"=",
"stmt_node",
".",
"children",
"[",
"semi_ind",
"]",
"new_expr",
".",
"append_child",
"(",
"move_node",
".",
"clone",
"(",
")",
")",
"move_node",
".",
"remove",
"(",
")",
"parent",
".",
"insert_child",
"(",
"i",
",",
"new_stmt",
")",
"new_leaf1",
"=",
"new_stmt",
".",
"children",
"[",
"0",
"]",
".",
"children",
"[",
"0",
"]",
"old_leaf1",
"=",
"stmt_node",
".",
"children",
"[",
"0",
"]",
".",
"children",
"[",
"0",
"]",
"new_leaf1",
".",
"prefix",
"=",
"old_leaf1",
".",
"prefix"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/lib2to3/fixes/fix_metaclass.py#L71-L92 | ||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/eager/graph_only_ops.py | python | graph_placeholder | (dtype, shape, name=None) | return result | Graph-only version of tf.placeholder(), for internal use only. | Graph-only version of tf.placeholder(), for internal use only. | [
"Graph",
"-",
"only",
"version",
"of",
"tf",
".",
"placeholder",
"()",
"for",
"internal",
"use",
"only",
"."
] | def graph_placeholder(dtype, shape, name=None):
"""Graph-only version of tf.placeholder(), for internal use only."""
dtype = dtype.base_dtype
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
if isinstance(shape, (list, tuple)):
shape = tensor_shape.TensorShape(shape)
assert isinstance(shape, tensor_shape.TensorShape)
shape = attr_value_pb2.AttrValue(shape=shape.as_proto())
g = ops.get_default_graph()
with ops.name_scope(name, "placeholder", []) as name:
op = g.create_op("Placeholder", [], [dtype], input_types=[],
attrs={"dtype": dtype_value, "shape": shape}, name=name)
result, = op.outputs
return result | [
"def",
"graph_placeholder",
"(",
"dtype",
",",
"shape",
",",
"name",
"=",
"None",
")",
":",
"dtype",
"=",
"dtype",
".",
"base_dtype",
"dtype_value",
"=",
"attr_value_pb2",
".",
"AttrValue",
"(",
"type",
"=",
"dtype",
".",
"as_datatype_enum",
")",
"if",
"isinstance",
"(",
"shape",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"shape",
"=",
"tensor_shape",
".",
"TensorShape",
"(",
"shape",
")",
"assert",
"isinstance",
"(",
"shape",
",",
"tensor_shape",
".",
"TensorShape",
")",
"shape",
"=",
"attr_value_pb2",
".",
"AttrValue",
"(",
"shape",
"=",
"shape",
".",
"as_proto",
"(",
")",
")",
"g",
"=",
"ops",
".",
"get_default_graph",
"(",
")",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"placeholder\"",
",",
"[",
"]",
")",
"as",
"name",
":",
"op",
"=",
"g",
".",
"create_op",
"(",
"\"Placeholder\"",
",",
"[",
"]",
",",
"[",
"dtype",
"]",
",",
"input_types",
"=",
"[",
"]",
",",
"attrs",
"=",
"{",
"\"dtype\"",
":",
"dtype_value",
",",
"\"shape\"",
":",
"shape",
"}",
",",
"name",
"=",
"name",
")",
"result",
",",
"=",
"op",
".",
"outputs",
"return",
"result"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/eager/graph_only_ops.py#L41-L54 | |
InsightSoftwareConsortium/ITK | 87acfce9a93d928311c38bc371b666b515b9f19d | Modules/ThirdParty/pygccxml/src/pygccxml/declarations/declarations_matchers.py | python | operator_matcher_t.__init__ | (
self,
name=None,
symbol=None,
return_type=None,
arg_types=None,
decl_type=None,
header_dir=None,
header_file=None) | :param symbol: operator symbol
:type symbol: str | :param symbol: operator symbol
:type symbol: str | [
":",
"param",
"symbol",
":",
"operator",
"symbol",
":",
"type",
"symbol",
":",
"str"
] | def __init__(
self,
name=None,
symbol=None,
return_type=None,
arg_types=None,
decl_type=None,
header_dir=None,
header_file=None):
"""
:param symbol: operator symbol
:type symbol: str
"""
if None is decl_type:
decl_type = calldef_members.operator_t
calldef_matcher_t.__init__(
self,
name=name,
return_type=return_type,
arg_types=arg_types,
decl_type=decl_type,
header_dir=header_dir,
header_file=header_file)
self.symbol = symbol | [
"def",
"__init__",
"(",
"self",
",",
"name",
"=",
"None",
",",
"symbol",
"=",
"None",
",",
"return_type",
"=",
"None",
",",
"arg_types",
"=",
"None",
",",
"decl_type",
"=",
"None",
",",
"header_dir",
"=",
"None",
",",
"header_file",
"=",
"None",
")",
":",
"if",
"None",
"is",
"decl_type",
":",
"decl_type",
"=",
"calldef_members",
".",
"operator_t",
"calldef_matcher_t",
".",
"__init__",
"(",
"self",
",",
"name",
"=",
"name",
",",
"return_type",
"=",
"return_type",
",",
"arg_types",
"=",
"arg_types",
",",
"decl_type",
"=",
"decl_type",
",",
"header_dir",
"=",
"header_dir",
",",
"header_file",
"=",
"header_file",
")",
"self",
".",
"symbol",
"=",
"symbol"
] | https://github.com/InsightSoftwareConsortium/ITK/blob/87acfce9a93d928311c38bc371b666b515b9f19d/Modules/ThirdParty/pygccxml/src/pygccxml/declarations/declarations_matchers.py#L359-L382 | ||
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py | python | RepeatedScalarFieldContainer.__setitem__ | (self, key, value) | Sets the item on the specified position. | Sets the item on the specified position. | [
"Sets",
"the",
"item",
"on",
"the",
"specified",
"position",
"."
] | def __setitem__(self, key, value):
"""Sets the item on the specified position."""
if isinstance(key, slice): # PY3
if key.step is not None:
raise ValueError('Extended slices not supported')
self.__setslice__(key.start, key.stop, value)
else:
self._values[key] = self._type_checker.CheckValue(value)
self._message_listener.Modified() | [
"def",
"__setitem__",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"slice",
")",
":",
"# PY3",
"if",
"key",
".",
"step",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'Extended slices not supported'",
")",
"self",
".",
"__setslice__",
"(",
"key",
".",
"start",
",",
"key",
".",
"stop",
",",
"value",
")",
"else",
":",
"self",
".",
"_values",
"[",
"key",
"]",
"=",
"self",
".",
"_type_checker",
".",
"CheckValue",
"(",
"value",
")",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py#L298-L306 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | library/python/hnsw/hnsw/hnsw.py | python | OnlineHnsw.get_nearest_and_add_item | (self, query) | return self._online_index._get_nearest_neighbors_and_add_item(query) | Get approximate nearest neighbors for query from index and add item to index
Parameters
----------
query : list or numpy.ndarray
Vector for which nearest neighbors should be found.
Vector which should be added in index.
Returns
-------
neighbors : list of tuples (id, distance) with length = search_neighborhood_size | Get approximate nearest neighbors for query from index and add item to index | [
"Get",
"approximate",
"nearest",
"neighbors",
"for",
"query",
"from",
"index",
"and",
"add",
"item",
"to",
"index"
] | def get_nearest_and_add_item(self, query):
"""
Get approximate nearest neighbors for query from index and add item to index
Parameters
----------
query : list or numpy.ndarray
Vector for which nearest neighbors should be found.
Vector which should be added in index.
Returns
-------
neighbors : list of tuples (id, distance) with length = search_neighborhood_size
"""
return self._online_index._get_nearest_neighbors_and_add_item(query) | [
"def",
"get_nearest_and_add_item",
"(",
"self",
",",
"query",
")",
":",
"return",
"self",
".",
"_online_index",
".",
"_get_nearest_neighbors_and_add_item",
"(",
"query",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/library/python/hnsw/hnsw/hnsw.py#L587-L601 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_controls.py | python | ToolBarToolBase.SetDropdownMenu | (*args, **kwargs) | return _controls_.ToolBarToolBase_SetDropdownMenu(*args, **kwargs) | SetDropdownMenu(self, Menu menu) | SetDropdownMenu(self, Menu menu) | [
"SetDropdownMenu",
"(",
"self",
"Menu",
"menu",
")"
] | def SetDropdownMenu(*args, **kwargs):
"""SetDropdownMenu(self, Menu menu)"""
return _controls_.ToolBarToolBase_SetDropdownMenu(*args, **kwargs) | [
"def",
"SetDropdownMenu",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"ToolBarToolBase_SetDropdownMenu",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L3561-L3563 | |
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | buildscripts/packager.py | python | Distro.name | (self) | return self.dname | Return name. | Return name. | [
"Return",
"name",
"."
] | def name(self):
"""Return name."""
return self.dname | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"dname"
] | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/packager.py#L180-L182 | |
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | media/webrtc/trunk/build/android/pylib/android_commands.py | python | _GetFilesFromRecursiveLsOutput | (path, ls_output, re_file, utc_offset=None) | return files | Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC. | Gets a list of files from `ls` command output. | [
"Gets",
"a",
"list",
"of",
"files",
"from",
"ls",
"command",
"output",
"."
] | def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir)+1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files | [
"def",
"_GetFilesFromRecursiveLsOutput",
"(",
"path",
",",
"ls_output",
",",
"re_file",
",",
"utc_offset",
"=",
"None",
")",
":",
"re_directory",
"=",
"re",
".",
"compile",
"(",
"'^%s/(?P<dir>[^:]+):$'",
"%",
"re",
".",
"escape",
"(",
"path",
")",
")",
"path_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"current_dir",
"=",
"''",
"files",
"=",
"{",
"}",
"for",
"line",
"in",
"ls_output",
":",
"directory_match",
"=",
"re_directory",
".",
"match",
"(",
"line",
")",
"if",
"directory_match",
":",
"current_dir",
"=",
"directory_match",
".",
"group",
"(",
"'dir'",
")",
"continue",
"file_match",
"=",
"re_file",
".",
"match",
"(",
"line",
")",
"if",
"file_match",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"current_dir",
",",
"file_match",
".",
"group",
"(",
"'filename'",
")",
")",
"if",
"filename",
".",
"startswith",
"(",
"path_dir",
")",
":",
"filename",
"=",
"filename",
"[",
"len",
"(",
"path_dir",
")",
"+",
"1",
":",
"]",
"lastmod",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"file_match",
".",
"group",
"(",
"'date'",
")",
"+",
"' '",
"+",
"file_match",
".",
"group",
"(",
"'time'",
")",
"[",
":",
"5",
"]",
",",
"'%Y-%m-%d %H:%M'",
")",
"if",
"not",
"utc_offset",
"and",
"'timezone'",
"in",
"re_file",
".",
"groupindex",
":",
"utc_offset",
"=",
"file_match",
".",
"group",
"(",
"'timezone'",
")",
"if",
"isinstance",
"(",
"utc_offset",
",",
"str",
")",
"and",
"len",
"(",
"utc_offset",
")",
"==",
"5",
":",
"utc_delta",
"=",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"int",
"(",
"utc_offset",
"[",
"1",
":",
"3",
"]",
")",
",",
"minutes",
"=",
"int",
"(",
"utc_offset",
"[",
"3",
":",
"5",
"]",
")",
")",
"if",
"utc_offset",
"[",
"0",
":",
"1",
"]",
"==",
"'-'",
":",
"utc_delta",
"=",
"-",
"utc_delta",
"lastmod",
"-=",
"utc_delta",
"files",
"[",
"filename",
"]",
"=",
"(",
"int",
"(",
"file_match",
".",
"group",
"(",
"'size'",
")",
")",
",",
"lastmod",
")",
"return",
"files"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/media/webrtc/trunk/build/android/pylib/android_commands.py#L110-L160 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/combo.py | python | BitmapComboBox.Append | (*args, **kwargs) | return _combo.BitmapComboBox_Append(*args, **kwargs) | Append(self, String item, Bitmap bitmap=wxNullBitmap, PyObject clientData=None) -> int
Adds the item to the control, associating the given data with the item
if not None. The return value is the index of the newly added item. | Append(self, String item, Bitmap bitmap=wxNullBitmap, PyObject clientData=None) -> int | [
"Append",
"(",
"self",
"String",
"item",
"Bitmap",
"bitmap",
"=",
"wxNullBitmap",
"PyObject",
"clientData",
"=",
"None",
")",
"-",
">",
"int"
] | def Append(*args, **kwargs):
"""
Append(self, String item, Bitmap bitmap=wxNullBitmap, PyObject clientData=None) -> int
Adds the item to the control, associating the given data with the item
if not None. The return value is the index of the newly added item.
"""
return _combo.BitmapComboBox_Append(*args, **kwargs) | [
"def",
"Append",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_combo",
".",
"BitmapComboBox_Append",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/combo.py#L971-L978 | |
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/python/training/summary_io.py | python | SummaryWriter.__init__ | (self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None) | Creates a `SummaryWriter` and an event file.
This class is deprecated, and should be replaced with tf.summary.FileWriter.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead. | Creates a `SummaryWriter` and an event file. | [
"Creates",
"a",
"SummaryWriter",
"and",
"an",
"event",
"file",
"."
] | def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None):
"""Creates a `SummaryWriter` and an event file.
This class is deprecated, and should be replaced with tf.summary.FileWriter.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
super(SummaryWriter, self).__init__(logdir, graph, max_queue, flush_secs,
graph_def) | [
"def",
"__init__",
"(",
"self",
",",
"logdir",
",",
"graph",
"=",
"None",
",",
"max_queue",
"=",
"10",
",",
"flush_secs",
"=",
"120",
",",
"graph_def",
"=",
"None",
")",
":",
"super",
"(",
"SummaryWriter",
",",
"self",
")",
".",
"__init__",
"(",
"logdir",
",",
"graph",
",",
"max_queue",
",",
"flush_secs",
",",
"graph_def",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/training/summary_io.py#L34-L81 | ||
GoldenCheetah/GoldenCheetah | 919a418895040144be5579884446ed6cd701bf0d | util/gh-downloads.py | python | download_stats | (user=None, repo=None, tag=None, latest=False, token=None, quiet=False) | return stats | Get download statistics from GitHub API.
:param user: GitHub repository owner username. If empty, user will be prompted for input.
:param repo: GitHub repository name. If empty, user will be prompted for input.
:param tag: Release tag name. If empty, get stats for all releases.
:param latest: If True, ignore "tag" parameter and get stats for the latest release.
:param token: GitHub OAuth token. If empty, API request limit will be reduced.
:param quiet: If True, print nothing.
:return: Statistics on downloads.
:raises GithubRepoError: When accessing nonexistent GitHub username, repository or release tag.
:raises GithubLimitError: When GitHub API request limit is exceeded.
:raises GithubTokenError: When trying to pass invalid GitHub API OAuth token.
:raises ConnectionError: On connection error. | Get download statistics from GitHub API.
:param user: GitHub repository owner username. If empty, user will be prompted for input.
:param repo: GitHub repository name. If empty, user will be prompted for input.
:param tag: Release tag name. If empty, get stats for all releases.
:param latest: If True, ignore "tag" parameter and get stats for the latest release.
:param token: GitHub OAuth token. If empty, API request limit will be reduced.
:param quiet: If True, print nothing.
:return: Statistics on downloads.
:raises GithubRepoError: When accessing nonexistent GitHub username, repository or release tag.
:raises GithubLimitError: When GitHub API request limit is exceeded.
:raises GithubTokenError: When trying to pass invalid GitHub API OAuth token.
:raises ConnectionError: On connection error. | [
"Get",
"download",
"statistics",
"from",
"GitHub",
"API",
".",
":",
"param",
"user",
":",
"GitHub",
"repository",
"owner",
"username",
".",
"If",
"empty",
"user",
"will",
"be",
"prompted",
"for",
"input",
".",
":",
"param",
"repo",
":",
"GitHub",
"repository",
"name",
".",
"If",
"empty",
"user",
"will",
"be",
"prompted",
"for",
"input",
".",
":",
"param",
"tag",
":",
"Release",
"tag",
"name",
".",
"If",
"empty",
"get",
"stats",
"for",
"all",
"releases",
".",
":",
"param",
"latest",
":",
"If",
"True",
"ignore",
"tag",
"parameter",
"and",
"get",
"stats",
"for",
"the",
"latest",
"release",
".",
":",
"param",
"token",
":",
"GitHub",
"OAuth",
"token",
".",
"If",
"empty",
"API",
"request",
"limit",
"will",
"be",
"reduced",
".",
":",
"param",
"quiet",
":",
"If",
"True",
"print",
"nothing",
".",
":",
"return",
":",
"Statistics",
"on",
"downloads",
".",
":",
"raises",
"GithubRepoError",
":",
"When",
"accessing",
"nonexistent",
"GitHub",
"username",
"repository",
"or",
"release",
"tag",
".",
":",
"raises",
"GithubLimitError",
":",
"When",
"GitHub",
"API",
"request",
"limit",
"is",
"exceeded",
".",
":",
"raises",
"GithubTokenError",
":",
"When",
"trying",
"to",
"pass",
"invalid",
"GitHub",
"API",
"OAuth",
"token",
".",
":",
"raises",
"ConnectionError",
":",
"On",
"connection",
"error",
"."
] | def download_stats(user=None, repo=None, tag=None, latest=False, token=None, quiet=False):
"""
Get download statistics from GitHub API.
:param user: GitHub repository owner username. If empty, user will be prompted for input.
:param repo: GitHub repository name. If empty, user will be prompted for input.
:param tag: Release tag name. If empty, get stats for all releases.
:param latest: If True, ignore "tag" parameter and get stats for the latest release.
:param token: GitHub OAuth token. If empty, API request limit will be reduced.
:param quiet: If True, print nothing.
:return: Statistics on downloads.
:raises GithubRepoError: When accessing nonexistent GitHub username, repository or release tag.
:raises GithubLimitError: When GitHub API request limit is exceeded.
:raises GithubTokenError: When trying to pass invalid GitHub API OAuth token.
:raises ConnectionError: On connection error.
"""
if not user:
user = input("GitHub Username: ")
if not repo:
repo = input("GitHub Repository: ")
if not quiet:
print("Downloading {0}/{1} stats...".format(user, repo))
url = "https://api.github.com/repos/{0}/{1}/releases".format(user, repo)
url += ("" if not tag else "/tags/" + tag) if not latest else "/latest"
headers = {} if not token else {"Authorization": "token " + token}
request = urllib2.Request(url, headers=headers)
start = time.time()
try:
response = urllib2.urlopen(request).read().decode("utf-8")
except urllib2.HTTPError as e:
if e.code == 404:
raise GithubRepoError() # Invalid GitHub username, repository or release tag.
elif e.code == 403:
raise GithubLimitError() # GitHub API request limit exceeded.
elif e.code == 401:
raise GithubTokenError() # Invalid GitHub OAuth token.
else:
raise GithubError(e.code) # Generic GitHub API exception.
except urllib2.URLError as e:
raise ConnectionError(e.reason)
stats = json.loads(response)
if not quiet:
end = time.time()
print("Downloaded in {0:.3f}s".format(end - start))
return stats | [
"def",
"download_stats",
"(",
"user",
"=",
"None",
",",
"repo",
"=",
"None",
",",
"tag",
"=",
"None",
",",
"latest",
"=",
"False",
",",
"token",
"=",
"None",
",",
"quiet",
"=",
"False",
")",
":",
"if",
"not",
"user",
":",
"user",
"=",
"input",
"(",
"\"GitHub Username: \"",
")",
"if",
"not",
"repo",
":",
"repo",
"=",
"input",
"(",
"\"GitHub Repository: \"",
")",
"if",
"not",
"quiet",
":",
"print",
"(",
"\"Downloading {0}/{1} stats...\"",
".",
"format",
"(",
"user",
",",
"repo",
")",
")",
"url",
"=",
"\"https://api.github.com/repos/{0}/{1}/releases\"",
".",
"format",
"(",
"user",
",",
"repo",
")",
"url",
"+=",
"(",
"\"\"",
"if",
"not",
"tag",
"else",
"\"/tags/\"",
"+",
"tag",
")",
"if",
"not",
"latest",
"else",
"\"/latest\"",
"headers",
"=",
"{",
"}",
"if",
"not",
"token",
"else",
"{",
"\"Authorization\"",
":",
"\"token \"",
"+",
"token",
"}",
"request",
"=",
"urllib2",
".",
"Request",
"(",
"url",
",",
"headers",
"=",
"headers",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"request",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"except",
"urllib2",
".",
"HTTPError",
"as",
"e",
":",
"if",
"e",
".",
"code",
"==",
"404",
":",
"raise",
"GithubRepoError",
"(",
")",
"# Invalid GitHub username, repository or release tag.",
"elif",
"e",
".",
"code",
"==",
"403",
":",
"raise",
"GithubLimitError",
"(",
")",
"# GitHub API request limit exceeded.",
"elif",
"e",
".",
"code",
"==",
"401",
":",
"raise",
"GithubTokenError",
"(",
")",
"# Invalid GitHub OAuth token.",
"else",
":",
"raise",
"GithubError",
"(",
"e",
".",
"code",
")",
"# Generic GitHub API exception.",
"except",
"urllib2",
".",
"URLError",
"as",
"e",
":",
"raise",
"ConnectionError",
"(",
"e",
".",
"reason",
")",
"stats",
"=",
"json",
".",
"loads",
"(",
"response",
")",
"if",
"not",
"quiet",
":",
"end",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"\"Downloaded in {0:.3f}s\"",
".",
"format",
"(",
"end",
"-",
"start",
")",
")",
"return",
"stats"
] | https://github.com/GoldenCheetah/GoldenCheetah/blob/919a418895040144be5579884446ed6cd701bf0d/util/gh-downloads.py#L171-L214 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_windows.py | python | Printer.PrintDialog | (*args, **kwargs) | return _windows_.Printer_PrintDialog(*args, **kwargs) | PrintDialog(self, Window parent) -> DC | PrintDialog(self, Window parent) -> DC | [
"PrintDialog",
"(",
"self",
"Window",
"parent",
")",
"-",
">",
"DC"
] | def PrintDialog(*args, **kwargs):
"""PrintDialog(self, Window parent) -> DC"""
return _windows_.Printer_PrintDialog(*args, **kwargs) | [
"def",
"PrintDialog",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"Printer_PrintDialog",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_windows.py#L5231-L5233 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_controls.py | python | PreDirFilterListCtrl | (*args, **kwargs) | return val | PreDirFilterListCtrl() -> DirFilterListCtrl | PreDirFilterListCtrl() -> DirFilterListCtrl | [
"PreDirFilterListCtrl",
"()",
"-",
">",
"DirFilterListCtrl"
] | def PreDirFilterListCtrl(*args, **kwargs):
"""PreDirFilterListCtrl() -> DirFilterListCtrl"""
val = _controls_.new_PreDirFilterListCtrl(*args, **kwargs)
return val | [
"def",
"PreDirFilterListCtrl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"val",
"=",
"_controls_",
".",
"new_PreDirFilterListCtrl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"val"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L5813-L5816 | |
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/coverage/coverage/results.py | python | Analysis.has_arcs | (self) | return self.data.has_arcs() | Were arcs measured in this result? | Were arcs measured in this result? | [
"Were",
"arcs",
"measured",
"in",
"this",
"result?"
] | def has_arcs(self):
"""Were arcs measured in this result?"""
return self.data.has_arcs() | [
"def",
"has_arcs",
"(",
"self",
")",
":",
"return",
"self",
".",
"data",
".",
"has_arcs",
"(",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/coverage/coverage/results.py#L61-L63 | |
esphome/esphome | 40e06c9819f17409615d4f4eec5cfe4dc9a3776d | esphome/yaml_util.py | python | _find_files | (directory, pattern) | Recursively load files in a directory. | Recursively load files in a directory. | [
"Recursively",
"load",
"files",
"in",
"a",
"directory",
"."
] | def _find_files(directory, pattern):
"""Recursively load files in a directory."""
for root, dirs, files in os.walk(directory, topdown=True):
dirs[:] = [d for d in dirs if _is_file_valid(d)]
for basename in files:
if _is_file_valid(basename) and fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename | [
"def",
"_find_files",
"(",
"directory",
",",
"pattern",
")",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"directory",
",",
"topdown",
"=",
"True",
")",
":",
"dirs",
"[",
":",
"]",
"=",
"[",
"d",
"for",
"d",
"in",
"dirs",
"if",
"_is_file_valid",
"(",
"d",
")",
"]",
"for",
"basename",
"in",
"files",
":",
"if",
"_is_file_valid",
"(",
"basename",
")",
"and",
"fnmatch",
".",
"fnmatch",
"(",
"basename",
",",
"pattern",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"basename",
")",
"yield",
"filename"
] | https://github.com/esphome/esphome/blob/40e06c9819f17409615d4f4eec5cfe4dc9a3776d/esphome/yaml_util.py#L363-L370 | ||
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | example/ssd/tools/caffe_converter/convert_symbol.py | python | _parse_proto | (prototxt_fname) | return symbol_string, output_name, input_dim | Parse Caffe prototxt into symbol string | Parse Caffe prototxt into symbol string | [
"Parse",
"Caffe",
"prototxt",
"into",
"symbol",
"string"
] | def _parse_proto(prototxt_fname):
"""Parse Caffe prototxt into symbol string
"""
proto = caffe_parser.read_prototxt(prototxt_fname)
# process data layer
input_name, input_dim, layers = _get_input(proto)
# only support single input, so always use `data` as the input data
mapping = {input_name: 'data'}
need_flatten = {input_name: False}
symbol_string = "import mxnet as mx\ndata = mx.symbol.Variable(name='data')\n"
flatten_count = 0
output_name = ""
prev_name = None
# convert reset layers one by one
for i, layer in enumerate(layers):
type_string = ''
param_string = ''
skip_layer = False
bottom_order = []
name = re.sub('[-/]', '_', layer.name)
if layer.type == 'Convolution' or layer.type == 4:
type_string = 'mx.symbol.Convolution'
param_string = _convert_conv_param(layer.convolution_param)
need_flatten[name] = True
if layer.type == 'Deconvolution' or layer.type == 39:
type_string = 'mx.symbol.Deconvolution'
param_string = _convert_conv_param(layer.convolution_param)
need_flatten[name] = True
if layer.type == 'Pooling' or layer.type == 17:
type_string = 'mx.symbol.Pooling'
param_string = _convert_pooling_param(layer.pooling_param)
need_flatten[name] = True
if layer.type == 'ReLU' or layer.type == 18:
type_string = 'mx.symbol.Activation'
param_string = "act_type='relu'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'TanH' or layer.type == 23:
type_string = 'mx.symbol.Activation'
param_string = "act_type='tanh'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Sigmoid' or layer.type == 19:
type_string = 'mx.symbol.Activation'
param_string = "act_type='sigmoid'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'LRN' or layer.type == 15:
type_string = 'mx.symbol.LRN'
param = layer.lrn_param
param_string = "alpha=%f, beta=%f, knorm=%f, nsize=%d" % (
param.alpha, param.beta, param.k, param.local_size)
need_flatten[name] = True
if layer.type == 'InnerProduct' or layer.type == 14:
type_string = 'mx.symbol.FullyConnected'
param = layer.inner_product_param
param_string = "num_hidden=%d, no_bias=%s" % (
param.num_output, not param.bias_term)
need_flatten[name] = False
if layer.type == 'Dropout' or layer.type == 6:
type_string = 'mx.symbol.Dropout'
param = layer.dropout_param
param_string = "p=%f" % param.dropout_ratio
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Softmax' or layer.type == 20:
if layer.softmax_param.axis == 2:
symbol_string += "%s = mx.symbol.transpose(%s, axes=(0,2,1))\n" %\
(mapping[layer.bottom[0]], mapping[layer.bottom[0]])
type_string = 'mx.symbol.SoftmaxActivation'
param_string = "mode='channel'"
need_flatten[name] = False
else:
type_string = 'mx.symbol.SoftmaxOutput'
if layer.type == 'Flatten' or layer.type == 8:
if 'softmax' in layer.bottom[0]:
prev_name = re.sub('[-/]', '_', layers[i-1].name)
skip_layer = True
else:
type_string = 'mx.symbol.Flatten'
need_flatten[name] = False
if layer.type == 'Split' or layer.type == 22:
type_string = 'split' # will process later
if layer.type == 'Concat' or layer.type == 3:
type_string = 'mx.symbol.Concat'
need_flatten[name] = True
if layer.type == 'Crop':
type_string = 'mx.symbol.Crop'
need_flatten[name] = True
param_string = 'center_crop=True'
if layer.type == 'BatchNorm':
type_string = 'mx.symbol.BatchNorm'
param = layer.batch_norm_param
# CuDNN requires eps to be greater than 1e-05
# We compensate for this change in convert_model
epsilon = param.eps
if (epsilon <= 1e-05):
epsilon = 1e-04
# if next layer is scale, don't fix gamma
fix_gamma = layers[i+1].type != 'Scale'
param_string = 'use_global_stats=%s, fix_gamma=%s, eps=%f' % (
param.use_global_stats, fix_gamma, epsilon)
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Scale':
assert layers[i-1].type == 'BatchNorm'
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
skip_layer = True
prev_name = re.sub('[-/]', '_', layers[i-1].name)
if layer.type == 'PReLU':
type_string = 'mx.symbol.LeakyReLU'
param = layer.prelu_param
param_string = "act_type='prelu', slope=%f" % param.filler.value
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Eltwise':
type_string = 'mx.symbol.broadcast_add'
param_string = ""
need_flatten[name] = False
if layer.type == 'Reshape':
type_string = 'mx.symbol.Reshape'
param = layer.reshape_param
param_string = 'shape=(' + ','.join([str(x) for x in list(param.shape.dim)]) + ')'
need_flatten[name] = True
if layer.type == 'AbsVal':
type_string = 'mx.symbol.abs'
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Normalize':
bottom = re.sub('[-/]', '_', layer.bottom[0])
conv_layer = _find_layer(layers, bottom)
assert conv_layer is not None
param = layer.norm_param
assert not param.across_spatial and not param.channel_shared
assert param.scale_filler.type == 'constant'
if conv_layer.type == 'Convolution':
scale_name = "%s_scale" % name
symbol_string += "%s=mx.sym.Variable(name='%s', shape=(1, %d, 1, 1), init=mx.init.Constant(%f))\n" % \
(scale_name, scale_name, conv_layer.convolution_param.num_output,
param.scale_filler.value)
symbol_string += "%s=mx.symbol.L2Normalization(name='%s', data=%s, mode='channel')\n" %\
(name, name, mapping[layer.bottom[0]])
symbol_string += "%s=mx.symbol.broadcast_mul(lhs=%s, rhs=%s)\n" %\
(name, scale_name, name)
type_string = 'split'
need_flatten[name] = True
else:
raise ValueError('Unknown/Invalid normalize layer!')
if layer.type == 'Permute':
type_string = 'mx.symbol.transpose'
param_string = "axes=(%s)" % (','.join([str(x) for x in layer.permute_param.order]))
need_flatten[name] = True
from_name = ''
if layer.type == 'PriorBox':
param = layer.prior_box_param
if layer.bottom[0] == 'data':
bottom_order = [1]
else:
bottom_order = [0]
try:
import math
min_size = param.min_size[0] / input_dim[2]
max_size = math.sqrt(param.min_size[0] * param.max_size[0]) / input_dim[2]
sizes = '(%f, %f)' %(min_size, max_size)
except AttributeError:
min_size = param.min_size[0] / input_dim[2]
sizes = '(%f)' %(min_size)
ars = list(param.aspect_ratio)
ratios = [1.]
for ar in ars:
ratios.append(ar)
if param.flip:
ratios.append(1. / ar)
ratios_string = '(' + ','.join(str(x) for x in ratios) + ')'
clip = param.clip
if (param.step_h > 0 or param.step_w > 0):
step_h = param.step_h
step_w = param.step_w
elif param.step > 0:
step_h = param.step
step_w = param.step
else:
step_h = -1
step_w = -1
finput_dimh = float(input_dim[2])
finput_dimw = float(input_dim[3])
step = '(%f, %f)' % (step_h / finput_dimh, step_w / finput_dimw)
assert param.offset == 0.5, "currently only support offset = 0.5"
symbol_string += '%s = mx.contrib.symbol.MultiBoxPrior(%s, sizes=%s, ratios=%s, clip=%s, steps=%s, name="%s")\n' % \
(name, mapping[layer.bottom[0]], sizes, ratios_string, clip, step, name)
symbol_string += '%s = mx.symbol.Flatten(data=%s)\n' % (name, name)
type_string = 'split'
need_flatten[name] = False
if layer.type == 'DetectionOutput':
bottom_order = [1, 0, 2]
param = layer.detection_output_param
assert param.share_location == True
assert param.background_label_id == 0
nms_param = param.nms_param
type_string = 'mx.contrib.symbol.MultiBoxDetection'
param_string = "nms_threshold=%f, nms_topk=%d, clip=False" % \
(nms_param.nms_threshold, nms_param.top_k)
if skip_layer:
assert len(layer.bottom) == 1
symbol_string += "%s = %s\n" % (name, prev_name)
elif type_string == '':
raise ValueError('Unknown layer %s!' % layer.type)
elif type_string != 'split':
bottom = layer.bottom
if param_string != "":
param_string = ", " + param_string
if len(bottom) == 1:
# print(need_flatten)
if need_flatten[mapping[bottom[0]]] and type_string == 'mx.symbol.FullyConnected':
flatten_name = "flatten_%d" % flatten_count
symbol_string += "%s=mx.symbol.Flatten(name='%s', data=%s)\n" % (
flatten_name, flatten_name, mapping[bottom[0]])
flatten_count += 1
need_flatten[flatten_name] = False
bottom[0] = flatten_name
mapping[bottom[0]] = bottom[0]
symbol_string += "%s = %s(name='%s', data=%s %s)\n" % (
name, type_string, name, mapping[bottom[0]], param_string)
else:
if not bottom_order:
bottom_order = range(len(bottom))
symbol_string += "%s = %s(name='%s', *[%s] %s)\n" % \
(name, type_string, name, ','.join([mapping[bottom[x]] for x in bottom_order]), param_string)
if layer.type == 'Concat' and layer.concat_param.axis == 2:
symbol_string += "%s = mx.symbol.Reshape(data=%s, shape=(0, -1, 4), name='%s')\n" %\
(name, name, name)
for j in range(len(layer.top)):
mapping[layer.top[j]] = name
output_name = name
return symbol_string, output_name, input_dim | [
"def",
"_parse_proto",
"(",
"prototxt_fname",
")",
":",
"proto",
"=",
"caffe_parser",
".",
"read_prototxt",
"(",
"prototxt_fname",
")",
"# process data layer",
"input_name",
",",
"input_dim",
",",
"layers",
"=",
"_get_input",
"(",
"proto",
")",
"# only support single input, so always use `data` as the input data",
"mapping",
"=",
"{",
"input_name",
":",
"'data'",
"}",
"need_flatten",
"=",
"{",
"input_name",
":",
"False",
"}",
"symbol_string",
"=",
"\"import mxnet as mx\\ndata = mx.symbol.Variable(name='data')\\n\"",
"flatten_count",
"=",
"0",
"output_name",
"=",
"\"\"",
"prev_name",
"=",
"None",
"# convert reset layers one by one",
"for",
"i",
",",
"layer",
"in",
"enumerate",
"(",
"layers",
")",
":",
"type_string",
"=",
"''",
"param_string",
"=",
"''",
"skip_layer",
"=",
"False",
"bottom_order",
"=",
"[",
"]",
"name",
"=",
"re",
".",
"sub",
"(",
"'[-/]'",
",",
"'_'",
",",
"layer",
".",
"name",
")",
"if",
"layer",
".",
"type",
"==",
"'Convolution'",
"or",
"layer",
".",
"type",
"==",
"4",
":",
"type_string",
"=",
"'mx.symbol.Convolution'",
"param_string",
"=",
"_convert_conv_param",
"(",
"layer",
".",
"convolution_param",
")",
"need_flatten",
"[",
"name",
"]",
"=",
"True",
"if",
"layer",
".",
"type",
"==",
"'Deconvolution'",
"or",
"layer",
".",
"type",
"==",
"39",
":",
"type_string",
"=",
"'mx.symbol.Deconvolution'",
"param_string",
"=",
"_convert_conv_param",
"(",
"layer",
".",
"convolution_param",
")",
"need_flatten",
"[",
"name",
"]",
"=",
"True",
"if",
"layer",
".",
"type",
"==",
"'Pooling'",
"or",
"layer",
".",
"type",
"==",
"17",
":",
"type_string",
"=",
"'mx.symbol.Pooling'",
"param_string",
"=",
"_convert_pooling_param",
"(",
"layer",
".",
"pooling_param",
")",
"need_flatten",
"[",
"name",
"]",
"=",
"True",
"if",
"layer",
".",
"type",
"==",
"'ReLU'",
"or",
"layer",
".",
"type",
"==",
"18",
":",
"type_string",
"=",
"'mx.symbol.Activation'",
"param_string",
"=",
"\"act_type='relu'\"",
"need_flatten",
"[",
"name",
"]",
"=",
"need_flatten",
"[",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
"]",
"if",
"layer",
".",
"type",
"==",
"'TanH'",
"or",
"layer",
".",
"type",
"==",
"23",
":",
"type_string",
"=",
"'mx.symbol.Activation'",
"param_string",
"=",
"\"act_type='tanh'\"",
"need_flatten",
"[",
"name",
"]",
"=",
"need_flatten",
"[",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
"]",
"if",
"layer",
".",
"type",
"==",
"'Sigmoid'",
"or",
"layer",
".",
"type",
"==",
"19",
":",
"type_string",
"=",
"'mx.symbol.Activation'",
"param_string",
"=",
"\"act_type='sigmoid'\"",
"need_flatten",
"[",
"name",
"]",
"=",
"need_flatten",
"[",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
"]",
"if",
"layer",
".",
"type",
"==",
"'LRN'",
"or",
"layer",
".",
"type",
"==",
"15",
":",
"type_string",
"=",
"'mx.symbol.LRN'",
"param",
"=",
"layer",
".",
"lrn_param",
"param_string",
"=",
"\"alpha=%f, beta=%f, knorm=%f, nsize=%d\"",
"%",
"(",
"param",
".",
"alpha",
",",
"param",
".",
"beta",
",",
"param",
".",
"k",
",",
"param",
".",
"local_size",
")",
"need_flatten",
"[",
"name",
"]",
"=",
"True",
"if",
"layer",
".",
"type",
"==",
"'InnerProduct'",
"or",
"layer",
".",
"type",
"==",
"14",
":",
"type_string",
"=",
"'mx.symbol.FullyConnected'",
"param",
"=",
"layer",
".",
"inner_product_param",
"param_string",
"=",
"\"num_hidden=%d, no_bias=%s\"",
"%",
"(",
"param",
".",
"num_output",
",",
"not",
"param",
".",
"bias_term",
")",
"need_flatten",
"[",
"name",
"]",
"=",
"False",
"if",
"layer",
".",
"type",
"==",
"'Dropout'",
"or",
"layer",
".",
"type",
"==",
"6",
":",
"type_string",
"=",
"'mx.symbol.Dropout'",
"param",
"=",
"layer",
".",
"dropout_param",
"param_string",
"=",
"\"p=%f\"",
"%",
"param",
".",
"dropout_ratio",
"need_flatten",
"[",
"name",
"]",
"=",
"need_flatten",
"[",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
"]",
"if",
"layer",
".",
"type",
"==",
"'Softmax'",
"or",
"layer",
".",
"type",
"==",
"20",
":",
"if",
"layer",
".",
"softmax_param",
".",
"axis",
"==",
"2",
":",
"symbol_string",
"+=",
"\"%s = mx.symbol.transpose(%s, axes=(0,2,1))\\n\"",
"%",
"(",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
",",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
")",
"type_string",
"=",
"'mx.symbol.SoftmaxActivation'",
"param_string",
"=",
"\"mode='channel'\"",
"need_flatten",
"[",
"name",
"]",
"=",
"False",
"else",
":",
"type_string",
"=",
"'mx.symbol.SoftmaxOutput'",
"if",
"layer",
".",
"type",
"==",
"'Flatten'",
"or",
"layer",
".",
"type",
"==",
"8",
":",
"if",
"'softmax'",
"in",
"layer",
".",
"bottom",
"[",
"0",
"]",
":",
"prev_name",
"=",
"re",
".",
"sub",
"(",
"'[-/]'",
",",
"'_'",
",",
"layers",
"[",
"i",
"-",
"1",
"]",
".",
"name",
")",
"skip_layer",
"=",
"True",
"else",
":",
"type_string",
"=",
"'mx.symbol.Flatten'",
"need_flatten",
"[",
"name",
"]",
"=",
"False",
"if",
"layer",
".",
"type",
"==",
"'Split'",
"or",
"layer",
".",
"type",
"==",
"22",
":",
"type_string",
"=",
"'split'",
"# will process later",
"if",
"layer",
".",
"type",
"==",
"'Concat'",
"or",
"layer",
".",
"type",
"==",
"3",
":",
"type_string",
"=",
"'mx.symbol.Concat'",
"need_flatten",
"[",
"name",
"]",
"=",
"True",
"if",
"layer",
".",
"type",
"==",
"'Crop'",
":",
"type_string",
"=",
"'mx.symbol.Crop'",
"need_flatten",
"[",
"name",
"]",
"=",
"True",
"param_string",
"=",
"'center_crop=True'",
"if",
"layer",
".",
"type",
"==",
"'BatchNorm'",
":",
"type_string",
"=",
"'mx.symbol.BatchNorm'",
"param",
"=",
"layer",
".",
"batch_norm_param",
"# CuDNN requires eps to be greater than 1e-05",
"# We compensate for this change in convert_model",
"epsilon",
"=",
"param",
".",
"eps",
"if",
"(",
"epsilon",
"<=",
"1e-05",
")",
":",
"epsilon",
"=",
"1e-04",
"# if next layer is scale, don't fix gamma",
"fix_gamma",
"=",
"layers",
"[",
"i",
"+",
"1",
"]",
".",
"type",
"!=",
"'Scale'",
"param_string",
"=",
"'use_global_stats=%s, fix_gamma=%s, eps=%f'",
"%",
"(",
"param",
".",
"use_global_stats",
",",
"fix_gamma",
",",
"epsilon",
")",
"need_flatten",
"[",
"name",
"]",
"=",
"need_flatten",
"[",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
"]",
"if",
"layer",
".",
"type",
"==",
"'Scale'",
":",
"assert",
"layers",
"[",
"i",
"-",
"1",
"]",
".",
"type",
"==",
"'BatchNorm'",
"need_flatten",
"[",
"name",
"]",
"=",
"need_flatten",
"[",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
"]",
"skip_layer",
"=",
"True",
"prev_name",
"=",
"re",
".",
"sub",
"(",
"'[-/]'",
",",
"'_'",
",",
"layers",
"[",
"i",
"-",
"1",
"]",
".",
"name",
")",
"if",
"layer",
".",
"type",
"==",
"'PReLU'",
":",
"type_string",
"=",
"'mx.symbol.LeakyReLU'",
"param",
"=",
"layer",
".",
"prelu_param",
"param_string",
"=",
"\"act_type='prelu', slope=%f\"",
"%",
"param",
".",
"filler",
".",
"value",
"need_flatten",
"[",
"name",
"]",
"=",
"need_flatten",
"[",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
"]",
"if",
"layer",
".",
"type",
"==",
"'Eltwise'",
":",
"type_string",
"=",
"'mx.symbol.broadcast_add'",
"param_string",
"=",
"\"\"",
"need_flatten",
"[",
"name",
"]",
"=",
"False",
"if",
"layer",
".",
"type",
"==",
"'Reshape'",
":",
"type_string",
"=",
"'mx.symbol.Reshape'",
"param",
"=",
"layer",
".",
"reshape_param",
"param_string",
"=",
"'shape=('",
"+",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"list",
"(",
"param",
".",
"shape",
".",
"dim",
")",
"]",
")",
"+",
"')'",
"need_flatten",
"[",
"name",
"]",
"=",
"True",
"if",
"layer",
".",
"type",
"==",
"'AbsVal'",
":",
"type_string",
"=",
"'mx.symbol.abs'",
"need_flatten",
"[",
"name",
"]",
"=",
"need_flatten",
"[",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
"]",
"if",
"layer",
".",
"type",
"==",
"'Normalize'",
":",
"bottom",
"=",
"re",
".",
"sub",
"(",
"'[-/]'",
",",
"'_'",
",",
"layer",
".",
"bottom",
"[",
"0",
"]",
")",
"conv_layer",
"=",
"_find_layer",
"(",
"layers",
",",
"bottom",
")",
"assert",
"conv_layer",
"is",
"not",
"None",
"param",
"=",
"layer",
".",
"norm_param",
"assert",
"not",
"param",
".",
"across_spatial",
"and",
"not",
"param",
".",
"channel_shared",
"assert",
"param",
".",
"scale_filler",
".",
"type",
"==",
"'constant'",
"if",
"conv_layer",
".",
"type",
"==",
"'Convolution'",
":",
"scale_name",
"=",
"\"%s_scale\"",
"%",
"name",
"symbol_string",
"+=",
"\"%s=mx.sym.Variable(name='%s', shape=(1, %d, 1, 1), init=mx.init.Constant(%f))\\n\"",
"%",
"(",
"scale_name",
",",
"scale_name",
",",
"conv_layer",
".",
"convolution_param",
".",
"num_output",
",",
"param",
".",
"scale_filler",
".",
"value",
")",
"symbol_string",
"+=",
"\"%s=mx.symbol.L2Normalization(name='%s', data=%s, mode='channel')\\n\"",
"%",
"(",
"name",
",",
"name",
",",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
")",
"symbol_string",
"+=",
"\"%s=mx.symbol.broadcast_mul(lhs=%s, rhs=%s)\\n\"",
"%",
"(",
"name",
",",
"scale_name",
",",
"name",
")",
"type_string",
"=",
"'split'",
"need_flatten",
"[",
"name",
"]",
"=",
"True",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown/Invalid normalize layer!'",
")",
"if",
"layer",
".",
"type",
"==",
"'Permute'",
":",
"type_string",
"=",
"'mx.symbol.transpose'",
"param_string",
"=",
"\"axes=(%s)\"",
"%",
"(",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"layer",
".",
"permute_param",
".",
"order",
"]",
")",
")",
"need_flatten",
"[",
"name",
"]",
"=",
"True",
"from_name",
"=",
"''",
"if",
"layer",
".",
"type",
"==",
"'PriorBox'",
":",
"param",
"=",
"layer",
".",
"prior_box_param",
"if",
"layer",
".",
"bottom",
"[",
"0",
"]",
"==",
"'data'",
":",
"bottom_order",
"=",
"[",
"1",
"]",
"else",
":",
"bottom_order",
"=",
"[",
"0",
"]",
"try",
":",
"import",
"math",
"min_size",
"=",
"param",
".",
"min_size",
"[",
"0",
"]",
"/",
"input_dim",
"[",
"2",
"]",
"max_size",
"=",
"math",
".",
"sqrt",
"(",
"param",
".",
"min_size",
"[",
"0",
"]",
"*",
"param",
".",
"max_size",
"[",
"0",
"]",
")",
"/",
"input_dim",
"[",
"2",
"]",
"sizes",
"=",
"'(%f, %f)'",
"%",
"(",
"min_size",
",",
"max_size",
")",
"except",
"AttributeError",
":",
"min_size",
"=",
"param",
".",
"min_size",
"[",
"0",
"]",
"/",
"input_dim",
"[",
"2",
"]",
"sizes",
"=",
"'(%f)'",
"%",
"(",
"min_size",
")",
"ars",
"=",
"list",
"(",
"param",
".",
"aspect_ratio",
")",
"ratios",
"=",
"[",
"1.",
"]",
"for",
"ar",
"in",
"ars",
":",
"ratios",
".",
"append",
"(",
"ar",
")",
"if",
"param",
".",
"flip",
":",
"ratios",
".",
"append",
"(",
"1.",
"/",
"ar",
")",
"ratios_string",
"=",
"'('",
"+",
"','",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"ratios",
")",
"+",
"')'",
"clip",
"=",
"param",
".",
"clip",
"if",
"(",
"param",
".",
"step_h",
">",
"0",
"or",
"param",
".",
"step_w",
">",
"0",
")",
":",
"step_h",
"=",
"param",
".",
"step_h",
"step_w",
"=",
"param",
".",
"step_w",
"elif",
"param",
".",
"step",
">",
"0",
":",
"step_h",
"=",
"param",
".",
"step",
"step_w",
"=",
"param",
".",
"step",
"else",
":",
"step_h",
"=",
"-",
"1",
"step_w",
"=",
"-",
"1",
"finput_dimh",
"=",
"float",
"(",
"input_dim",
"[",
"2",
"]",
")",
"finput_dimw",
"=",
"float",
"(",
"input_dim",
"[",
"3",
"]",
")",
"step",
"=",
"'(%f, %f)'",
"%",
"(",
"step_h",
"/",
"finput_dimh",
",",
"step_w",
"/",
"finput_dimw",
")",
"assert",
"param",
".",
"offset",
"==",
"0.5",
",",
"\"currently only support offset = 0.5\"",
"symbol_string",
"+=",
"'%s = mx.contrib.symbol.MultiBoxPrior(%s, sizes=%s, ratios=%s, clip=%s, steps=%s, name=\"%s\")\\n'",
"%",
"(",
"name",
",",
"mapping",
"[",
"layer",
".",
"bottom",
"[",
"0",
"]",
"]",
",",
"sizes",
",",
"ratios_string",
",",
"clip",
",",
"step",
",",
"name",
")",
"symbol_string",
"+=",
"'%s = mx.symbol.Flatten(data=%s)\\n'",
"%",
"(",
"name",
",",
"name",
")",
"type_string",
"=",
"'split'",
"need_flatten",
"[",
"name",
"]",
"=",
"False",
"if",
"layer",
".",
"type",
"==",
"'DetectionOutput'",
":",
"bottom_order",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
"param",
"=",
"layer",
".",
"detection_output_param",
"assert",
"param",
".",
"share_location",
"==",
"True",
"assert",
"param",
".",
"background_label_id",
"==",
"0",
"nms_param",
"=",
"param",
".",
"nms_param",
"type_string",
"=",
"'mx.contrib.symbol.MultiBoxDetection'",
"param_string",
"=",
"\"nms_threshold=%f, nms_topk=%d, clip=False\"",
"%",
"(",
"nms_param",
".",
"nms_threshold",
",",
"nms_param",
".",
"top_k",
")",
"if",
"skip_layer",
":",
"assert",
"len",
"(",
"layer",
".",
"bottom",
")",
"==",
"1",
"symbol_string",
"+=",
"\"%s = %s\\n\"",
"%",
"(",
"name",
",",
"prev_name",
")",
"elif",
"type_string",
"==",
"''",
":",
"raise",
"ValueError",
"(",
"'Unknown layer %s!'",
"%",
"layer",
".",
"type",
")",
"elif",
"type_string",
"!=",
"'split'",
":",
"bottom",
"=",
"layer",
".",
"bottom",
"if",
"param_string",
"!=",
"\"\"",
":",
"param_string",
"=",
"\", \"",
"+",
"param_string",
"if",
"len",
"(",
"bottom",
")",
"==",
"1",
":",
"# print(need_flatten)",
"if",
"need_flatten",
"[",
"mapping",
"[",
"bottom",
"[",
"0",
"]",
"]",
"]",
"and",
"type_string",
"==",
"'mx.symbol.FullyConnected'",
":",
"flatten_name",
"=",
"\"flatten_%d\"",
"%",
"flatten_count",
"symbol_string",
"+=",
"\"%s=mx.symbol.Flatten(name='%s', data=%s)\\n\"",
"%",
"(",
"flatten_name",
",",
"flatten_name",
",",
"mapping",
"[",
"bottom",
"[",
"0",
"]",
"]",
")",
"flatten_count",
"+=",
"1",
"need_flatten",
"[",
"flatten_name",
"]",
"=",
"False",
"bottom",
"[",
"0",
"]",
"=",
"flatten_name",
"mapping",
"[",
"bottom",
"[",
"0",
"]",
"]",
"=",
"bottom",
"[",
"0",
"]",
"symbol_string",
"+=",
"\"%s = %s(name='%s', data=%s %s)\\n\"",
"%",
"(",
"name",
",",
"type_string",
",",
"name",
",",
"mapping",
"[",
"bottom",
"[",
"0",
"]",
"]",
",",
"param_string",
")",
"else",
":",
"if",
"not",
"bottom_order",
":",
"bottom_order",
"=",
"range",
"(",
"len",
"(",
"bottom",
")",
")",
"symbol_string",
"+=",
"\"%s = %s(name='%s', *[%s] %s)\\n\"",
"%",
"(",
"name",
",",
"type_string",
",",
"name",
",",
"','",
".",
"join",
"(",
"[",
"mapping",
"[",
"bottom",
"[",
"x",
"]",
"]",
"for",
"x",
"in",
"bottom_order",
"]",
")",
",",
"param_string",
")",
"if",
"layer",
".",
"type",
"==",
"'Concat'",
"and",
"layer",
".",
"concat_param",
".",
"axis",
"==",
"2",
":",
"symbol_string",
"+=",
"\"%s = mx.symbol.Reshape(data=%s, shape=(0, -1, 4), name='%s')\\n\"",
"%",
"(",
"name",
",",
"name",
",",
"name",
")",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"layer",
".",
"top",
")",
")",
":",
"mapping",
"[",
"layer",
".",
"top",
"[",
"j",
"]",
"]",
"=",
"name",
"output_name",
"=",
"name",
"return",
"symbol_string",
",",
"output_name",
",",
"input_dim"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/example/ssd/tools/caffe_converter/convert_symbol.py#L129-L359 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py2/pandas/core/frame.py | python | DataFrame.update | (self, other, join='left', overwrite=True, filter_func=None,
errors='ignore') | Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0 | Modify in place using non-NA values from another DataFrame. | [
"Modify",
"in",
"place",
"using",
"non",
"-",
"NA",
"values",
"from",
"another",
"DataFrame",
"."
] | def update(self, other, join='left', overwrite=True, filter_func=None,
errors='ignore'):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ['ignore', 'raise']:
raise ValueError("The parameter errors must be either "
"'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if errors == 'raise':
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that) | [
"def",
"update",
"(",
"self",
",",
"other",
",",
"join",
"=",
"'left'",
",",
"overwrite",
"=",
"True",
",",
"filter_func",
"=",
"None",
",",
"errors",
"=",
"'ignore'",
")",
":",
"import",
"pandas",
".",
"core",
".",
"computation",
".",
"expressions",
"as",
"expressions",
"# TODO: Support other joins",
"if",
"join",
"!=",
"'left'",
":",
"# pragma: no cover",
"raise",
"NotImplementedError",
"(",
"\"Only left join is supported\"",
")",
"if",
"errors",
"not",
"in",
"[",
"'ignore'",
",",
"'raise'",
"]",
":",
"raise",
"ValueError",
"(",
"\"The parameter errors must be either \"",
"\"'ignore' or 'raise'\"",
")",
"if",
"not",
"isinstance",
"(",
"other",
",",
"DataFrame",
")",
":",
"other",
"=",
"DataFrame",
"(",
"other",
")",
"other",
"=",
"other",
".",
"reindex_like",
"(",
"self",
")",
"for",
"col",
"in",
"self",
".",
"columns",
":",
"this",
"=",
"self",
"[",
"col",
"]",
".",
"values",
"that",
"=",
"other",
"[",
"col",
"]",
".",
"values",
"if",
"filter_func",
"is",
"not",
"None",
":",
"with",
"np",
".",
"errstate",
"(",
"all",
"=",
"'ignore'",
")",
":",
"mask",
"=",
"~",
"filter_func",
"(",
"this",
")",
"|",
"isna",
"(",
"that",
")",
"else",
":",
"if",
"errors",
"==",
"'raise'",
":",
"mask_this",
"=",
"notna",
"(",
"that",
")",
"mask_that",
"=",
"notna",
"(",
"this",
")",
"if",
"any",
"(",
"mask_this",
"&",
"mask_that",
")",
":",
"raise",
"ValueError",
"(",
"\"Data overlaps.\"",
")",
"if",
"overwrite",
":",
"mask",
"=",
"isna",
"(",
"that",
")",
"else",
":",
"mask",
"=",
"notna",
"(",
"this",
")",
"# don't overwrite columns unecessarily",
"if",
"mask",
".",
"all",
"(",
")",
":",
"continue",
"self",
"[",
"col",
"]",
"=",
"expressions",
".",
"where",
"(",
"mask",
",",
"this",
",",
"that",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/frame.py#L5368-L5516 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/requests/sessions.py | python | Session.delete | (self, url, **kwargs) | return self.request('DELETE', url, **kwargs) | r"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response | r"""Sends a DELETE request. Returns :class:`Response` object. | [
"r",
"Sends",
"a",
"DELETE",
"request",
".",
"Returns",
":",
"class",
":",
"Response",
"object",
"."
] | def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('DELETE', url, **kwargs) | [
"def",
"delete",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"request",
"(",
"'DELETE'",
",",
"url",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/requests/sessions.py#L604-L612 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/distutils/misc_util.py | python | Configuration.__init__ | (self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs) | Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter. | Construct configuration instance of a package. | [
"Construct",
"configuration",
"instance",
"of",
"a",
"package",
"."
] | def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name | [
"def",
"__init__",
"(",
"self",
",",
"package_name",
"=",
"None",
",",
"parent_name",
"=",
"None",
",",
"top_path",
"=",
"None",
",",
"package_path",
"=",
"None",
",",
"caller_level",
"=",
"1",
",",
"setup_name",
"=",
"'setup.py'",
",",
"*",
"*",
"attrs",
")",
":",
"self",
".",
"name",
"=",
"dot_join",
"(",
"parent_name",
",",
"package_name",
")",
"self",
".",
"version",
"=",
"None",
"caller_frame",
"=",
"get_frame",
"(",
"caller_level",
")",
"self",
".",
"local_path",
"=",
"get_path_from_frame",
"(",
"caller_frame",
",",
"top_path",
")",
"# local_path -- directory of a file (usually setup.py) that",
"# defines a configuration() function.",
"# local_path -- directory of a file (usually setup.py) that",
"# defines a configuration() function.",
"if",
"top_path",
"is",
"None",
":",
"top_path",
"=",
"self",
".",
"local_path",
"self",
".",
"local_path",
"=",
"''",
"if",
"package_path",
"is",
"None",
":",
"package_path",
"=",
"self",
".",
"local_path",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"njoin",
"(",
"self",
".",
"local_path",
",",
"package_path",
")",
")",
":",
"package_path",
"=",
"njoin",
"(",
"self",
".",
"local_path",
",",
"package_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"package_path",
"or",
"'.'",
")",
":",
"raise",
"ValueError",
"(",
"\"%r is not a directory\"",
"%",
"(",
"package_path",
",",
")",
")",
"self",
".",
"top_path",
"=",
"top_path",
"self",
".",
"package_path",
"=",
"package_path",
"# this is the relative path in the installed package",
"self",
".",
"path_in_package",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"self",
".",
"name",
".",
"split",
"(",
"'.'",
")",
")",
"self",
".",
"list_keys",
"=",
"self",
".",
"_list_keys",
"[",
":",
"]",
"self",
".",
"dict_keys",
"=",
"self",
".",
"_dict_keys",
"[",
":",
"]",
"for",
"n",
"in",
"self",
".",
"list_keys",
":",
"v",
"=",
"copy",
".",
"copy",
"(",
"attrs",
".",
"get",
"(",
"n",
",",
"[",
"]",
")",
")",
"setattr",
"(",
"self",
",",
"n",
",",
"as_list",
"(",
"v",
")",
")",
"for",
"n",
"in",
"self",
".",
"dict_keys",
":",
"v",
"=",
"copy",
".",
"copy",
"(",
"attrs",
".",
"get",
"(",
"n",
",",
"{",
"}",
")",
")",
"setattr",
"(",
"self",
",",
"n",
",",
"v",
")",
"known_keys",
"=",
"self",
".",
"list_keys",
"+",
"self",
".",
"dict_keys",
"self",
".",
"extra_keys",
"=",
"self",
".",
"_extra_keys",
"[",
":",
"]",
"for",
"n",
"in",
"attrs",
".",
"keys",
"(",
")",
":",
"if",
"n",
"in",
"known_keys",
":",
"continue",
"a",
"=",
"attrs",
"[",
"n",
"]",
"setattr",
"(",
"self",
",",
"n",
",",
"a",
")",
"if",
"isinstance",
"(",
"a",
",",
"list",
")",
":",
"self",
".",
"list_keys",
".",
"append",
"(",
"n",
")",
"elif",
"isinstance",
"(",
"a",
",",
"dict",
")",
":",
"self",
".",
"dict_keys",
".",
"append",
"(",
"n",
")",
"else",
":",
"self",
".",
"extra_keys",
".",
"append",
"(",
"n",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"njoin",
"(",
"package_path",
",",
"'__init__.py'",
")",
")",
":",
"self",
".",
"packages",
".",
"append",
"(",
"self",
".",
"name",
")",
"self",
".",
"package_dir",
"[",
"self",
".",
"name",
"]",
"=",
"package_path",
"self",
".",
"options",
"=",
"dict",
"(",
"ignore_setup_xxx_py",
"=",
"False",
",",
"assume_default_configuration",
"=",
"False",
",",
"delegate_options_to_subpackages",
"=",
"False",
",",
"quiet",
"=",
"False",
",",
")",
"caller_instance",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"3",
")",
":",
"try",
":",
"f",
"=",
"get_frame",
"(",
"i",
")",
"except",
"ValueError",
":",
"break",
"try",
":",
"caller_instance",
"=",
"eval",
"(",
"'self'",
",",
"f",
".",
"f_globals",
",",
"f",
".",
"f_locals",
")",
"break",
"except",
"NameError",
":",
"pass",
"if",
"isinstance",
"(",
"caller_instance",
",",
"self",
".",
"__class__",
")",
":",
"if",
"caller_instance",
".",
"options",
"[",
"'delegate_options_to_subpackages'",
"]",
":",
"self",
".",
"set_options",
"(",
"*",
"*",
"caller_instance",
".",
"options",
")",
"self",
".",
"setup_name",
"=",
"setup_name"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/distutils/misc_util.py#L741-L836 | ||
moflow/moflow | 2dfb27c799c90c6caf1477508eca3eec616ef7d2 | bap/libtracewrap/libtrace/protobuf/python/google/protobuf/message.py | python | Message.__getstate__ | (self) | return dict(serialized=self.SerializePartialToString()) | Support the pickle protocol. | Support the pickle protocol. | [
"Support",
"the",
"pickle",
"protocol",
"."
] | def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString()) | [
"def",
"__getstate__",
"(",
"self",
")",
":",
"return",
"dict",
"(",
"serialized",
"=",
"self",
".",
"SerializePartialToString",
"(",
")",
")"
] | https://github.com/moflow/moflow/blob/2dfb27c799c90c6caf1477508eca3eec616ef7d2/bap/libtracewrap/libtrace/protobuf/python/google/protobuf/message.py#L273-L275 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pathlib2/pathlib2/__init__.py | python | PurePath.anchor | (self) | return anchor | The concatenation of the drive and root, or ''. | The concatenation of the drive and root, or ''. | [
"The",
"concatenation",
"of",
"the",
"drive",
"and",
"root",
"or",
"."
] | def anchor(self):
"""The concatenation of the drive and root, or ''."""
anchor = self._drv + self._root
return anchor | [
"def",
"anchor",
"(",
"self",
")",
":",
"anchor",
"=",
"self",
".",
"_drv",
"+",
"self",
".",
"_root",
"return",
"anchor"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pathlib2/pathlib2/__init__.py#L1034-L1037 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py | python | Label.__init__ | (self, master=None, cnf={}, **kw) | Construct a label widget with the parent MASTER.
STANDARD OPTIONS
activebackground, activeforeground, anchor,
background, bitmap, borderwidth, cursor,
disabledforeground, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, image, justify,
padx, pady, relief, takefocus, text,
textvariable, underline, wraplength
WIDGET-SPECIFIC OPTIONS
height, state, width | Construct a label widget with the parent MASTER. | [
"Construct",
"a",
"label",
"widget",
"with",
"the",
"parent",
"MASTER",
"."
] | def __init__(self, master=None, cnf={}, **kw):
"""Construct a label widget with the parent MASTER.
STANDARD OPTIONS
activebackground, activeforeground, anchor,
background, bitmap, borderwidth, cursor,
disabledforeground, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, image, justify,
padx, pady, relief, takefocus, text,
textvariable, underline, wraplength
WIDGET-SPECIFIC OPTIONS
height, state, width
"""
Widget.__init__(self, master, 'label', cnf, kw) | [
"def",
"__init__",
"(",
"self",
",",
"master",
"=",
"None",
",",
"cnf",
"=",
"{",
"}",
",",
"*",
"*",
"kw",
")",
":",
"Widget",
".",
"__init__",
"(",
"self",
",",
"master",
",",
"'label'",
",",
"cnf",
",",
"kw",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py#L2748-L2766 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_misc.py | python | DateTime.__lt__ | (*args, **kwargs) | return _misc_.DateTime___lt__(*args, **kwargs) | __lt__(self, DateTime other) -> bool | __lt__(self, DateTime other) -> bool | [
"__lt__",
"(",
"self",
"DateTime",
"other",
")",
"-",
">",
"bool"
] | def __lt__(*args, **kwargs):
"""__lt__(self, DateTime other) -> bool"""
return _misc_.DateTime___lt__(*args, **kwargs) | [
"def",
"__lt__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"DateTime___lt__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_misc.py#L4106-L4108 | |
Rid7/Table-OCR | 26814d4d4d3a2cd9f6b0155d66dd475927a23d11 | crnn/utils/util.py | python | strLabelConverter.encode | (self, text, depth=0) | return (torch.IntTensor(text), torch.IntTensor(length)) | Support batch or single str. | Support batch or single str. | [
"Support",
"batch",
"or",
"single",
"str",
"."
] | def encode(self, text, depth=0):
"""Support batch or single str."""
if isinstance(text, str):
for char in text:
if self.alphabet.find(char) == -1:
print(char)
text = [self.dict[char] for char in text]
length = [len(text)]
elif isinstance(text, collections.Iterable):
length = [len(s) for s in text]
text = ''.join(text)
text, _ = self.encode(text)
if depth:
return text, len(text)
return (torch.IntTensor(text), torch.IntTensor(length)) | [
"def",
"encode",
"(",
"self",
",",
"text",
",",
"depth",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"str",
")",
":",
"for",
"char",
"in",
"text",
":",
"if",
"self",
".",
"alphabet",
".",
"find",
"(",
"char",
")",
"==",
"-",
"1",
":",
"print",
"(",
"char",
")",
"text",
"=",
"[",
"self",
".",
"dict",
"[",
"char",
"]",
"for",
"char",
"in",
"text",
"]",
"length",
"=",
"[",
"len",
"(",
"text",
")",
"]",
"elif",
"isinstance",
"(",
"text",
",",
"collections",
".",
"Iterable",
")",
":",
"length",
"=",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"text",
"]",
"text",
"=",
"''",
".",
"join",
"(",
"text",
")",
"text",
",",
"_",
"=",
"self",
".",
"encode",
"(",
"text",
")",
"if",
"depth",
":",
"return",
"text",
",",
"len",
"(",
"text",
")",
"return",
"(",
"torch",
".",
"IntTensor",
"(",
"text",
")",
",",
"torch",
".",
"IntTensor",
"(",
"length",
")",
")"
] | https://github.com/Rid7/Table-OCR/blob/26814d4d4d3a2cd9f6b0155d66dd475927a23d11/crnn/utils/util.py#L15-L30 | |
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Arch/ArchSite.py | python | _ViewProviderSite.setProperties | (self,vobj) | Give the site view provider its site view provider specific properties.
These include solar diagram and compass data, dealing the orientation
of the site, and its orientation to the sun.
You can learn more about properties here: https://wiki.freecadweb.org/property | Give the site view provider its site view provider specific properties. | [
"Give",
"the",
"site",
"view",
"provider",
"its",
"site",
"view",
"provider",
"specific",
"properties",
"."
] | def setProperties(self,vobj):
"""Give the site view provider its site view provider specific properties.
These include solar diagram and compass data, dealing the orientation
of the site, and its orientation to the sun.
You can learn more about properties here: https://wiki.freecadweb.org/property
"""
pl = vobj.PropertiesList
if not "WindRose" in pl:
vobj.addProperty("App::PropertyBool","WindRose","Site",QT_TRANSLATE_NOOP("App::Property","Show wind rose diagram or not. Uses solar diagram scale. Needs Ladybug module"))
if not "SolarDiagram" in pl:
vobj.addProperty("App::PropertyBool","SolarDiagram","Site",QT_TRANSLATE_NOOP("App::Property","Show solar diagram or not"))
if not "SolarDiagramScale" in pl:
vobj.addProperty("App::PropertyFloat","SolarDiagramScale","Site",QT_TRANSLATE_NOOP("App::Property","The scale of the solar diagram"))
vobj.SolarDiagramScale = 1
if not "SolarDiagramPosition" in pl:
vobj.addProperty("App::PropertyVector","SolarDiagramPosition","Site",QT_TRANSLATE_NOOP("App::Property","The position of the solar diagram"))
if not "SolarDiagramColor" in pl:
vobj.addProperty("App::PropertyColor","SolarDiagramColor","Site",QT_TRANSLATE_NOOP("App::Property","The color of the solar diagram"))
vobj.SolarDiagramColor = (0.16,0.16,0.25)
if not "Orientation" in pl:
vobj.addProperty("App::PropertyEnumeration", "Orientation", "Site", QT_TRANSLATE_NOOP(
"App::Property", "When set to 'True North' the whole geometry will be rotated to match the true north of this site"))
vobj.Orientation = ["Project North", "True North"]
vobj.Orientation = "Project North"
if not "Compass" in pl:
vobj.addProperty("App::PropertyBool", "Compass", "Compass", QT_TRANSLATE_NOOP("App::Property", "Show compass or not"))
if not "CompassRotation" in pl:
vobj.addProperty("App::PropertyAngle", "CompassRotation", "Compass", QT_TRANSLATE_NOOP("App::Property", "The rotation of the Compass relative to the Site"))
if not "CompassPosition" in pl:
vobj.addProperty("App::PropertyVector", "CompassPosition", "Compass", QT_TRANSLATE_NOOP("App::Property", "The position of the Compass relative to the Site placement"))
if not "UpdateDeclination" in pl:
vobj.addProperty("App::PropertyBool", "UpdateDeclination", "Compass", QT_TRANSLATE_NOOP("App::Property", "Update the Declination value based on the compass rotation")) | [
"def",
"setProperties",
"(",
"self",
",",
"vobj",
")",
":",
"pl",
"=",
"vobj",
".",
"PropertiesList",
"if",
"not",
"\"WindRose\"",
"in",
"pl",
":",
"vobj",
".",
"addProperty",
"(",
"\"App::PropertyBool\"",
",",
"\"WindRose\"",
",",
"\"Site\"",
",",
"QT_TRANSLATE_NOOP",
"(",
"\"App::Property\"",
",",
"\"Show wind rose diagram or not. Uses solar diagram scale. Needs Ladybug module\"",
")",
")",
"if",
"not",
"\"SolarDiagram\"",
"in",
"pl",
":",
"vobj",
".",
"addProperty",
"(",
"\"App::PropertyBool\"",
",",
"\"SolarDiagram\"",
",",
"\"Site\"",
",",
"QT_TRANSLATE_NOOP",
"(",
"\"App::Property\"",
",",
"\"Show solar diagram or not\"",
")",
")",
"if",
"not",
"\"SolarDiagramScale\"",
"in",
"pl",
":",
"vobj",
".",
"addProperty",
"(",
"\"App::PropertyFloat\"",
",",
"\"SolarDiagramScale\"",
",",
"\"Site\"",
",",
"QT_TRANSLATE_NOOP",
"(",
"\"App::Property\"",
",",
"\"The scale of the solar diagram\"",
")",
")",
"vobj",
".",
"SolarDiagramScale",
"=",
"1",
"if",
"not",
"\"SolarDiagramPosition\"",
"in",
"pl",
":",
"vobj",
".",
"addProperty",
"(",
"\"App::PropertyVector\"",
",",
"\"SolarDiagramPosition\"",
",",
"\"Site\"",
",",
"QT_TRANSLATE_NOOP",
"(",
"\"App::Property\"",
",",
"\"The position of the solar diagram\"",
")",
")",
"if",
"not",
"\"SolarDiagramColor\"",
"in",
"pl",
":",
"vobj",
".",
"addProperty",
"(",
"\"App::PropertyColor\"",
",",
"\"SolarDiagramColor\"",
",",
"\"Site\"",
",",
"QT_TRANSLATE_NOOP",
"(",
"\"App::Property\"",
",",
"\"The color of the solar diagram\"",
")",
")",
"vobj",
".",
"SolarDiagramColor",
"=",
"(",
"0.16",
",",
"0.16",
",",
"0.25",
")",
"if",
"not",
"\"Orientation\"",
"in",
"pl",
":",
"vobj",
".",
"addProperty",
"(",
"\"App::PropertyEnumeration\"",
",",
"\"Orientation\"",
",",
"\"Site\"",
",",
"QT_TRANSLATE_NOOP",
"(",
"\"App::Property\"",
",",
"\"When set to 'True North' the whole geometry will be rotated to match the true north of this site\"",
")",
")",
"vobj",
".",
"Orientation",
"=",
"[",
"\"Project North\"",
",",
"\"True North\"",
"]",
"vobj",
".",
"Orientation",
"=",
"\"Project North\"",
"if",
"not",
"\"Compass\"",
"in",
"pl",
":",
"vobj",
".",
"addProperty",
"(",
"\"App::PropertyBool\"",
",",
"\"Compass\"",
",",
"\"Compass\"",
",",
"QT_TRANSLATE_NOOP",
"(",
"\"App::Property\"",
",",
"\"Show compass or not\"",
")",
")",
"if",
"not",
"\"CompassRotation\"",
"in",
"pl",
":",
"vobj",
".",
"addProperty",
"(",
"\"App::PropertyAngle\"",
",",
"\"CompassRotation\"",
",",
"\"Compass\"",
",",
"QT_TRANSLATE_NOOP",
"(",
"\"App::Property\"",
",",
"\"The rotation of the Compass relative to the Site\"",
")",
")",
"if",
"not",
"\"CompassPosition\"",
"in",
"pl",
":",
"vobj",
".",
"addProperty",
"(",
"\"App::PropertyVector\"",
",",
"\"CompassPosition\"",
",",
"\"Compass\"",
",",
"QT_TRANSLATE_NOOP",
"(",
"\"App::Property\"",
",",
"\"The position of the Compass relative to the Site placement\"",
")",
")",
"if",
"not",
"\"UpdateDeclination\"",
"in",
"pl",
":",
"vobj",
".",
"addProperty",
"(",
"\"App::PropertyBool\"",
",",
"\"UpdateDeclination\"",
",",
"\"Compass\"",
",",
"QT_TRANSLATE_NOOP",
"(",
"\"App::Property\"",
",",
"\"Update the Declination value based on the compass rotation\"",
")",
")"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Arch/ArchSite.py#L824-L858 | ||
epam/Indigo | 30e40b4b1eb9bae0207435a26cfcb81ddcc42be1 | api/python/indigo/__init__.py | python | IndigoObject.getMolecule | (self, index) | return self.dispatcher.IndigoObject(
self.dispatcher,
self.dispatcher._checkResult(
Indigo._lib.indigoGetMolecule(self.id, index)
),
) | Reaction method returns a molecule by index
Args:
index (int): molecule index
Returns:
IndigoObject: molecule object | Reaction method returns a molecule by index | [
"Reaction",
"method",
"returns",
"a",
"molecule",
"by",
"index"
] | def getMolecule(self, index):
"""Reaction method returns a molecule by index
Args:
index (int): molecule index
Returns:
IndigoObject: molecule object
"""
self.dispatcher._setSessionId()
return self.dispatcher.IndigoObject(
self.dispatcher,
self.dispatcher._checkResult(
Indigo._lib.indigoGetMolecule(self.id, index)
),
) | [
"def",
"getMolecule",
"(",
"self",
",",
"index",
")",
":",
"self",
".",
"dispatcher",
".",
"_setSessionId",
"(",
")",
"return",
"self",
".",
"dispatcher",
".",
"IndigoObject",
"(",
"self",
".",
"dispatcher",
",",
"self",
".",
"dispatcher",
".",
"_checkResult",
"(",
"Indigo",
".",
"_lib",
".",
"indigoGetMolecule",
"(",
"self",
".",
"id",
",",
"index",
")",
")",
",",
")"
] | https://github.com/epam/Indigo/blob/30e40b4b1eb9bae0207435a26cfcb81ddcc42be1/api/python/indigo/__init__.py#L465-L480 | |
GoSSIP-SJTU/TripleDoggy | 03648d6b19c812504b14e8b98c8c7b3f443f4e54 | utils/lit/lit/util.py | python | which | (command, paths=None) | return None | which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified). | which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified). | [
"which",
"(",
"command",
"[",
"paths",
"]",
")",
"-",
"Look",
"up",
"the",
"given",
"command",
"in",
"the",
"paths",
"string",
"(",
"or",
"the",
"PATH",
"environment",
"variable",
"if",
"unspecified",
")",
"."
] | def which(command, paths=None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH', '')
# Check for absolute match first.
if os.path.isabs(command) and os.path.isfile(command):
return os.path.normpath(command)
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p) and not os.path.isdir(p):
return os.path.normpath(p)
return None | [
"def",
"which",
"(",
"command",
",",
"paths",
"=",
"None",
")",
":",
"if",
"paths",
"is",
"None",
":",
"paths",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATH'",
",",
"''",
")",
"# Check for absolute match first.",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"command",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"command",
")",
":",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"command",
")",
"# Would be nice if Python had a lib function for this.",
"if",
"not",
"paths",
":",
"paths",
"=",
"os",
".",
"defpath",
"# Get suffixes to search.",
"# On Cygwin, 'PATHEXT' may exist but it should not be used.",
"if",
"os",
".",
"pathsep",
"==",
"';'",
":",
"pathext",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATHEXT'",
",",
"''",
")",
".",
"split",
"(",
"';'",
")",
"else",
":",
"pathext",
"=",
"[",
"''",
"]",
"# Search the paths...",
"for",
"path",
"in",
"paths",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"for",
"ext",
"in",
"pathext",
":",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"command",
"+",
"ext",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"p",
")",
"and",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"p",
")",
":",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"p",
")",
"return",
"None"
] | https://github.com/GoSSIP-SJTU/TripleDoggy/blob/03648d6b19c812504b14e8b98c8c7b3f443f4e54/utils/lit/lit/util.py#L189-L218 | |
strasdat/Sophus | 36b08885e094fda63e92ad89d65be380c288265a | sympy/sophus/quaternion.py | python | Quaternion.Da_a_mul_b | (a, b) | return sympy.Matrix([[y, v2, -v1, v0],
[-v2, y, v0, v1],
[v1, -v0, y, v2],
[-v0, -v1, -v2, y]]) | derivatice of quaternion muliplication wrt left multiplier a | derivatice of quaternion muliplication wrt left multiplier a | [
"derivatice",
"of",
"quaternion",
"muliplication",
"wrt",
"left",
"multiplier",
"a"
] | def Da_a_mul_b(a, b):
""" derivatice of quaternion muliplication wrt left multiplier a """
v0 = b.vec[0]
v1 = b.vec[1]
v2 = b.vec[2]
y = b.real
return sympy.Matrix([[y, v2, -v1, v0],
[-v2, y, v0, v1],
[v1, -v0, y, v2],
[-v0, -v1, -v2, y]]) | [
"def",
"Da_a_mul_b",
"(",
"a",
",",
"b",
")",
":",
"v0",
"=",
"b",
".",
"vec",
"[",
"0",
"]",
"v1",
"=",
"b",
".",
"vec",
"[",
"1",
"]",
"v2",
"=",
"b",
".",
"vec",
"[",
"2",
"]",
"y",
"=",
"b",
".",
"real",
"return",
"sympy",
".",
"Matrix",
"(",
"[",
"[",
"y",
",",
"v2",
",",
"-",
"v1",
",",
"v0",
"]",
",",
"[",
"-",
"v2",
",",
"y",
",",
"v0",
",",
"v1",
"]",
",",
"[",
"v1",
",",
"-",
"v0",
",",
"y",
",",
"v2",
"]",
",",
"[",
"-",
"v0",
",",
"-",
"v1",
",",
"-",
"v2",
",",
"y",
"]",
"]",
")"
] | https://github.com/strasdat/Sophus/blob/36b08885e094fda63e92ad89d65be380c288265a/sympy/sophus/quaternion.py#L81-L90 | |
apiaryio/snowcrash | b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3 | tools/gyp/pylib/gyp/generator/msvs.py | python | _ConvertSourcesToFilterHierarchy | (sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None) | return result | Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])] | Converts a list split source file paths into a vcproj folder hierarchy. | [
"Converts",
"a",
"list",
"split",
"source",
"file",
"paths",
"into",
"a",
"vcproj",
"folder",
"hierarchy",
"."
] | def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result | [
"def",
"_ConvertSourcesToFilterHierarchy",
"(",
"sources",
",",
"prefix",
"=",
"None",
",",
"excluded",
"=",
"None",
",",
"list_excluded",
"=",
"True",
",",
"msvs_version",
"=",
"None",
")",
":",
"if",
"not",
"prefix",
":",
"prefix",
"=",
"[",
"]",
"result",
"=",
"[",
"]",
"excluded_result",
"=",
"[",
"]",
"folders",
"=",
"OrderedDict",
"(",
")",
"# Gather files into the final result, excluded, or folders.",
"for",
"s",
"in",
"sources",
":",
"if",
"len",
"(",
"s",
")",
"==",
"1",
":",
"filename",
"=",
"_NormalizedSource",
"(",
"'\\\\'",
".",
"join",
"(",
"prefix",
"+",
"s",
")",
")",
"if",
"filename",
"in",
"excluded",
":",
"excluded_result",
".",
"append",
"(",
"filename",
")",
"else",
":",
"result",
".",
"append",
"(",
"filename",
")",
"elif",
"msvs_version",
"and",
"not",
"msvs_version",
".",
"UsesVcxproj",
"(",
")",
":",
"# For MSVS 2008 and earlier, we need to process all files before walking",
"# the sub folders.",
"if",
"not",
"folders",
".",
"get",
"(",
"s",
"[",
"0",
"]",
")",
":",
"folders",
"[",
"s",
"[",
"0",
"]",
"]",
"=",
"[",
"]",
"folders",
"[",
"s",
"[",
"0",
"]",
"]",
".",
"append",
"(",
"s",
"[",
"1",
":",
"]",
")",
"else",
":",
"contents",
"=",
"_ConvertSourcesToFilterHierarchy",
"(",
"[",
"s",
"[",
"1",
":",
"]",
"]",
",",
"prefix",
"+",
"[",
"s",
"[",
"0",
"]",
"]",
",",
"excluded",
"=",
"excluded",
",",
"list_excluded",
"=",
"list_excluded",
",",
"msvs_version",
"=",
"msvs_version",
")",
"contents",
"=",
"MSVSProject",
".",
"Filter",
"(",
"s",
"[",
"0",
"]",
",",
"contents",
"=",
"contents",
")",
"result",
".",
"append",
"(",
"contents",
")",
"# Add a folder for excluded files.",
"if",
"excluded_result",
"and",
"list_excluded",
":",
"excluded_folder",
"=",
"MSVSProject",
".",
"Filter",
"(",
"'_excluded_files'",
",",
"contents",
"=",
"excluded_result",
")",
"result",
".",
"append",
"(",
"excluded_folder",
")",
"if",
"msvs_version",
"and",
"msvs_version",
".",
"UsesVcxproj",
"(",
")",
":",
"return",
"result",
"# Populate all the folders.",
"for",
"f",
"in",
"folders",
":",
"contents",
"=",
"_ConvertSourcesToFilterHierarchy",
"(",
"folders",
"[",
"f",
"]",
",",
"prefix",
"=",
"prefix",
"+",
"[",
"f",
"]",
",",
"excluded",
"=",
"excluded",
",",
"list_excluded",
"=",
"list_excluded",
",",
"msvs_version",
"=",
"msvs_version",
")",
"contents",
"=",
"MSVSProject",
".",
"Filter",
"(",
"f",
",",
"contents",
"=",
"contents",
")",
"result",
".",
"append",
"(",
"contents",
")",
"return",
"result"
] | https://github.com/apiaryio/snowcrash/blob/b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3/tools/gyp/pylib/gyp/generator/msvs.py#L180-L242 | |
chromiumembedded/cef | 80caf947f3fe2210e5344713c5281d8af9bdc295 | tools/automate/automate-git.py | python | get_chromium_main_commit | (position) | return None | Returns the main commit for the specified Chromium commit position. | Returns the main commit for the specified Chromium commit position. | [
"Returns",
"the",
"main",
"commit",
"for",
"the",
"specified",
"Chromium",
"commit",
"position",
"."
] | def get_chromium_main_commit(position):
""" Returns the main commit for the specified Chromium commit position. """
cmd = '%s log -1 --grep=refs/heads/master@{#%s} --grep=refs/heads/main@{#%s} origin/main' % (
git_exe, str(position), str(position))
result = exec_cmd(cmd, chromium_src_dir)
if result['out'] != '':
match = re.search(r'^commit ([a-f0-9]+)', result['out'])
assert match != None, 'Failed to find commit'
return match.groups()[0]
return None | [
"def",
"get_chromium_main_commit",
"(",
"position",
")",
":",
"cmd",
"=",
"'%s log -1 --grep=refs/heads/master@{#%s} --grep=refs/heads/main@{#%s} origin/main'",
"%",
"(",
"git_exe",
",",
"str",
"(",
"position",
")",
",",
"str",
"(",
"position",
")",
")",
"result",
"=",
"exec_cmd",
"(",
"cmd",
",",
"chromium_src_dir",
")",
"if",
"result",
"[",
"'out'",
"]",
"!=",
"''",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r'^commit ([a-f0-9]+)'",
",",
"result",
"[",
"'out'",
"]",
")",
"assert",
"match",
"!=",
"None",
",",
"'Failed to find commit'",
"return",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"return",
"None"
] | https://github.com/chromiumembedded/cef/blob/80caf947f3fe2210e5344713c5281d8af9bdc295/tools/automate/automate-git.py#L385-L394 | |
kamyu104/LeetCode-Solutions | 77605708a927ea3b85aee5a479db733938c7c211 | Python/number-of-students-unable-to-eat-lunch.py | python | Solution.countStudents | (self, students, sandwiches) | return len(sandwiches)-i | :type students: List[int]
:type sandwiches: List[int]
:rtype: int | :type students: List[int]
:type sandwiches: List[int]
:rtype: int | [
":",
"type",
"students",
":",
"List",
"[",
"int",
"]",
":",
"type",
"sandwiches",
":",
"List",
"[",
"int",
"]",
":",
"rtype",
":",
"int"
] | def countStudents(self, students, sandwiches):
"""
:type students: List[int]
:type sandwiches: List[int]
:rtype: int
"""
count = collections.Counter(students)
for i, s in enumerate(sandwiches):
if not count[s]:
break
count[s] -= 1
else:
i = len(sandwiches)
return len(sandwiches)-i | [
"def",
"countStudents",
"(",
"self",
",",
"students",
",",
"sandwiches",
")",
":",
"count",
"=",
"collections",
".",
"Counter",
"(",
"students",
")",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"sandwiches",
")",
":",
"if",
"not",
"count",
"[",
"s",
"]",
":",
"break",
"count",
"[",
"s",
"]",
"-=",
"1",
"else",
":",
"i",
"=",
"len",
"(",
"sandwiches",
")",
"return",
"len",
"(",
"sandwiches",
")",
"-",
"i"
] | https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/number-of-students-unable-to-eat-lunch.py#L8-L21 | |
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/fluid/dataset.py | python | InMemoryDataset.global_shuffle | (self, fleet=None, thread_num=12) | Global shuffle.
Global shuffle can be used only in distributed mode. i.e. multiple
processes on single machine or multiple machines training together.
If you run in distributed mode, you should pass fleet instead of None.
Examples:
.. code-block:: python
# required: skiptest
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.global_shuffle(fleet)
Args:
fleet(Fleet): fleet singleton. Default None.
thread_num(int): shuffle thread num. Default is 12. | Global shuffle.
Global shuffle can be used only in distributed mode. i.e. multiple
processes on single machine or multiple machines training together.
If you run in distributed mode, you should pass fleet instead of None. | [
"Global",
"shuffle",
".",
"Global",
"shuffle",
"can",
"be",
"used",
"only",
"in",
"distributed",
"mode",
".",
"i",
".",
"e",
".",
"multiple",
"processes",
"on",
"single",
"machine",
"or",
"multiple",
"machines",
"training",
"together",
".",
"If",
"you",
"run",
"in",
"distributed",
"mode",
"you",
"should",
"pass",
"fleet",
"instead",
"of",
"None",
"."
] | def global_shuffle(self, fleet=None, thread_num=12):
"""
Global shuffle.
Global shuffle can be used only in distributed mode. i.e. multiple
processes on single machine or multiple machines training together.
If you run in distributed mode, you should pass fleet instead of None.
Examples:
.. code-block:: python
# required: skiptest
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.global_shuffle(fleet)
Args:
fleet(Fleet): fleet singleton. Default None.
thread_num(int): shuffle thread num. Default is 12.
"""
from paddle.fluid.incubate.fleet.parameter_server.pslib import PSLib
if fleet is not None:
if not isinstance(fleet, PSLib):
fleet.barrier_worker()
else:
fleet._role_maker.barrier_worker()
if self.trainer_num == -1:
self.trainer_num = fleet.worker_num()
if self.fleet_send_batch_size is None:
self.fleet_send_batch_size = 1024
if self.fleet_send_sleep_seconds is None:
self.fleet_send_sleep_seconds = 0
self.dataset.register_client2client_msg_handler()
self.dataset.set_trainer_num(self.trainer_num)
self.dataset.set_fleet_send_batch_size(self.fleet_send_batch_size)
self.dataset.set_fleet_send_sleep_seconds(self.fleet_send_sleep_seconds)
if fleet is not None:
if not isinstance(fleet, PSLib):
fleet.barrier_worker()
else:
fleet._role_maker.barrier_worker()
self.dataset.global_shuffle(thread_num)
if fleet is not None:
if not isinstance(fleet, PSLib):
fleet.barrier_worker()
else:
fleet._role_maker.barrier_worker()
if self.merge_by_lineid:
self.dataset.merge_by_lineid()
if fleet is not None:
if not isinstance(fleet, PSLib):
fleet.barrier_worker()
else:
fleet._role_maker.barrier_worker() | [
"def",
"global_shuffle",
"(",
"self",
",",
"fleet",
"=",
"None",
",",
"thread_num",
"=",
"12",
")",
":",
"from",
"paddle",
".",
"fluid",
".",
"incubate",
".",
"fleet",
".",
"parameter_server",
".",
"pslib",
"import",
"PSLib",
"if",
"fleet",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"fleet",
",",
"PSLib",
")",
":",
"fleet",
".",
"barrier_worker",
"(",
")",
"else",
":",
"fleet",
".",
"_role_maker",
".",
"barrier_worker",
"(",
")",
"if",
"self",
".",
"trainer_num",
"==",
"-",
"1",
":",
"self",
".",
"trainer_num",
"=",
"fleet",
".",
"worker_num",
"(",
")",
"if",
"self",
".",
"fleet_send_batch_size",
"is",
"None",
":",
"self",
".",
"fleet_send_batch_size",
"=",
"1024",
"if",
"self",
".",
"fleet_send_sleep_seconds",
"is",
"None",
":",
"self",
".",
"fleet_send_sleep_seconds",
"=",
"0",
"self",
".",
"dataset",
".",
"register_client2client_msg_handler",
"(",
")",
"self",
".",
"dataset",
".",
"set_trainer_num",
"(",
"self",
".",
"trainer_num",
")",
"self",
".",
"dataset",
".",
"set_fleet_send_batch_size",
"(",
"self",
".",
"fleet_send_batch_size",
")",
"self",
".",
"dataset",
".",
"set_fleet_send_sleep_seconds",
"(",
"self",
".",
"fleet_send_sleep_seconds",
")",
"if",
"fleet",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"fleet",
",",
"PSLib",
")",
":",
"fleet",
".",
"barrier_worker",
"(",
")",
"else",
":",
"fleet",
".",
"_role_maker",
".",
"barrier_worker",
"(",
")",
"self",
".",
"dataset",
".",
"global_shuffle",
"(",
"thread_num",
")",
"if",
"fleet",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"fleet",
",",
"PSLib",
")",
":",
"fleet",
".",
"barrier_worker",
"(",
")",
"else",
":",
"fleet",
".",
"_role_maker",
".",
"barrier_worker",
"(",
")",
"if",
"self",
".",
"merge_by_lineid",
":",
"self",
".",
"dataset",
".",
"merge_by_lineid",
"(",
")",
"if",
"fleet",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"fleet",
",",
"PSLib",
")",
":",
"fleet",
".",
"barrier_worker",
"(",
")",
"else",
":",
"fleet",
".",
"_role_maker",
".",
"barrier_worker",
"(",
")"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/dataset.py#L841-L898 | ||
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/buildscripts/idl/idl/struct_types.py | python | StructTypeInfoBase.get_op_msg_request_serializer_method | (self) | Get the OpMsg serializer method for a struct. | Get the OpMsg serializer method for a struct. | [
"Get",
"the",
"OpMsg",
"serializer",
"method",
"for",
"a",
"struct",
"."
] | def get_op_msg_request_serializer_method(self):
# type: () -> Optional[MethodInfo]
"""Get the OpMsg serializer method for a struct."""
# pylint: disable=invalid-name
pass | [
"def",
"get_op_msg_request_serializer_method",
"(",
"self",
")",
":",
"# type: () -> Optional[MethodInfo]",
"# pylint: disable=invalid-name",
"pass"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/buildscripts/idl/idl/struct_types.py#L164-L168 | ||
potassco/clingo | e0c91d8f95cc28de1c480a871f9c97c30de83d40 | .github/manylinux.py | python | adjust_version | (url) | Adjust version in setup.py. | Adjust version in setup.py. | [
"Adjust",
"version",
"in",
"setup",
".",
"py",
"."
] | def adjust_version(url):
'''
Adjust version in setup.py.
'''
with open('setup.py') as fr:
setup = fr.read()
package_name = search(r'''name[ ]*=[ ]*['"]([^'"]*)['"]''', setup).group(1)
package_regex = package_name.replace('-', '[-_]')
pip = check_output(['curl', '-sL', '{}/{}'.format(url, package_name)]).decode()
version = None
with open('libclingo/clingo.h') as fh:
for line in fh:
m = match(r'#define CLINGO_VERSION "([0-9]+\.[0-9]+\.[0-9]+)"', line)
if m is not None:
version = m.group(1)
assert version is not None
post = 0
for m in finditer(r'{}-{}\.post([0-9]+)\.tar\.gz'.format(package_regex, escape(version)), pip):
post = max(post, int(m.group(1)))
for m in finditer(r'{}-{}.*manylinux2014_{}'.format(package_regex, escape(version), escape(ARCH)), pip):
post = max(post, 1)
for m in finditer(r'{}-{}\.post([0-9]+).*manylinux2014_{}'.format(package_regex, escape(version), escape(ARCH)), pip):
post = max(post, int(m.group(1)) + 1)
with open('setup.py', 'w') as fw:
if post > 0:
fw.write(sub('version( *)=.*', 'version = \'{}.post{}\','.format(version, post), setup, 1))
else:
fw.write(sub('version( *)=.*', 'version = \'{}\','.format(version), setup, 1)) | [
"def",
"adjust_version",
"(",
"url",
")",
":",
"with",
"open",
"(",
"'setup.py'",
")",
"as",
"fr",
":",
"setup",
"=",
"fr",
".",
"read",
"(",
")",
"package_name",
"=",
"search",
"(",
"r'''name[ ]*=[ ]*['\"]([^'\"]*)['\"]'''",
",",
"setup",
")",
".",
"group",
"(",
"1",
")",
"package_regex",
"=",
"package_name",
".",
"replace",
"(",
"'-'",
",",
"'[-_]'",
")",
"pip",
"=",
"check_output",
"(",
"[",
"'curl'",
",",
"'-sL'",
",",
"'{}/{}'",
".",
"format",
"(",
"url",
",",
"package_name",
")",
"]",
")",
".",
"decode",
"(",
")",
"version",
"=",
"None",
"with",
"open",
"(",
"'libclingo/clingo.h'",
")",
"as",
"fh",
":",
"for",
"line",
"in",
"fh",
":",
"m",
"=",
"match",
"(",
"r'#define CLINGO_VERSION \"([0-9]+\\.[0-9]+\\.[0-9]+)\"'",
",",
"line",
")",
"if",
"m",
"is",
"not",
"None",
":",
"version",
"=",
"m",
".",
"group",
"(",
"1",
")",
"assert",
"version",
"is",
"not",
"None",
"post",
"=",
"0",
"for",
"m",
"in",
"finditer",
"(",
"r'{}-{}\\.post([0-9]+)\\.tar\\.gz'",
".",
"format",
"(",
"package_regex",
",",
"escape",
"(",
"version",
")",
")",
",",
"pip",
")",
":",
"post",
"=",
"max",
"(",
"post",
",",
"int",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
")",
"for",
"m",
"in",
"finditer",
"(",
"r'{}-{}.*manylinux2014_{}'",
".",
"format",
"(",
"package_regex",
",",
"escape",
"(",
"version",
")",
",",
"escape",
"(",
"ARCH",
")",
")",
",",
"pip",
")",
":",
"post",
"=",
"max",
"(",
"post",
",",
"1",
")",
"for",
"m",
"in",
"finditer",
"(",
"r'{}-{}\\.post([0-9]+).*manylinux2014_{}'",
".",
"format",
"(",
"package_regex",
",",
"escape",
"(",
"version",
")",
",",
"escape",
"(",
"ARCH",
")",
")",
",",
"pip",
")",
":",
"post",
"=",
"max",
"(",
"post",
",",
"int",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
"+",
"1",
")",
"with",
"open",
"(",
"'setup.py'",
",",
"'w'",
")",
"as",
"fw",
":",
"if",
"post",
">",
"0",
":",
"fw",
".",
"write",
"(",
"sub",
"(",
"'version( *)=.*'",
",",
"'version = \\'{}.post{}\\','",
".",
"format",
"(",
"version",
",",
"post",
")",
",",
"setup",
",",
"1",
")",
")",
"else",
":",
"fw",
".",
"write",
"(",
"sub",
"(",
"'version( *)=.*'",
",",
"'version = \\'{}\\','",
".",
"format",
"(",
"version",
")",
",",
"setup",
",",
"1",
")",
")"
] | https://github.com/potassco/clingo/blob/e0c91d8f95cc28de1c480a871f9c97c30de83d40/.github/manylinux.py#L14-L47 | ||
llvm-mirror/lldb | d01083a850f577b85501a0902b52fd0930de72c7 | third_party/Python/module/pexpect-4.6/pexpect/spawnbase.py | python | SpawnBase.fileno | (self) | return self.child_fd | Expose file descriptor for a file-like interface | Expose file descriptor for a file-like interface | [
"Expose",
"file",
"descriptor",
"for",
"a",
"file",
"-",
"like",
"interface"
] | def fileno(self):
'''Expose file descriptor for a file-like interface
'''
return self.child_fd | [
"def",
"fileno",
"(",
"self",
")",
":",
"return",
"self",
".",
"child_fd"
] | https://github.com/llvm-mirror/lldb/blob/d01083a850f577b85501a0902b52fd0930de72c7/third_party/Python/module/pexpect-4.6/pexpect/spawnbase.py#L501-L504 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/numpy/py3/numpy/core/numeric.py | python | indices | (dimensions, dtype=int, sparse=False) | return res | Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0, 1, ...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
sparse : boolean, optional
Return a sparse representation of the grid instead of a dense
representation. Default is False.
.. versionadded:: 1.17
Returns
-------
grid : one ndarray or tuple of ndarrays
If sparse is False:
Returns one array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
If sparse is True:
Returns a tuple of arrays, with
``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
dimensions[i] in the ith place
See Also
--------
mgrid, ogrid, meshgrid
Notes
-----
The output shape in the dense case is obtained by prepending the number
of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N, r0, ..., rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k, i0, i1, ..., iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
If sparse is set to true, the grid will be returned in a sparse
representation.
>>> i, j = np.indices((2, 3), sparse=True)
>>> i.shape
(2, 1)
>>> j.shape
(1, 3)
>>> i # row indices
array([[0],
[1]])
>>> j # column indices
array([[0, 1, 2]]) | Return an array representing the indices of a grid. | [
"Return",
"an",
"array",
"representing",
"the",
"indices",
"of",
"a",
"grid",
"."
] | def indices(dimensions, dtype=int, sparse=False):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0, 1, ...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
sparse : boolean, optional
Return a sparse representation of the grid instead of a dense
representation. Default is False.
.. versionadded:: 1.17
Returns
-------
grid : one ndarray or tuple of ndarrays
If sparse is False:
Returns one array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
If sparse is True:
Returns a tuple of arrays, with
``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
dimensions[i] in the ith place
See Also
--------
mgrid, ogrid, meshgrid
Notes
-----
The output shape in the dense case is obtained by prepending the number
of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N, r0, ..., rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k, i0, i1, ..., iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
If sparse is set to true, the grid will be returned in a sparse
representation.
>>> i, j = np.indices((2, 3), sparse=True)
>>> i.shape
(2, 1)
>>> j.shape
(1, 3)
>>> i # row indices
array([[0],
[1]])
>>> j # column indices
array([[0, 1, 2]])
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,)*N
if sparse:
res = tuple()
else:
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
idx = arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i+1:]
)
if sparse:
res = res + (idx,)
else:
res[i] = idx
return res | [
"def",
"indices",
"(",
"dimensions",
",",
"dtype",
"=",
"int",
",",
"sparse",
"=",
"False",
")",
":",
"dimensions",
"=",
"tuple",
"(",
"dimensions",
")",
"N",
"=",
"len",
"(",
"dimensions",
")",
"shape",
"=",
"(",
"1",
",",
")",
"*",
"N",
"if",
"sparse",
":",
"res",
"=",
"tuple",
"(",
")",
"else",
":",
"res",
"=",
"empty",
"(",
"(",
"N",
",",
")",
"+",
"dimensions",
",",
"dtype",
"=",
"dtype",
")",
"for",
"i",
",",
"dim",
"in",
"enumerate",
"(",
"dimensions",
")",
":",
"idx",
"=",
"arange",
"(",
"dim",
",",
"dtype",
"=",
"dtype",
")",
".",
"reshape",
"(",
"shape",
"[",
":",
"i",
"]",
"+",
"(",
"dim",
",",
")",
"+",
"shape",
"[",
"i",
"+",
"1",
":",
"]",
")",
"if",
"sparse",
":",
"res",
"=",
"res",
"+",
"(",
"idx",
",",
")",
"else",
":",
"res",
"[",
"i",
"]",
"=",
"idx",
"return",
"res"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/core/numeric.py#L1680-L1779 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.