nwo stringlengths 5 86 | sha stringlengths 40 40 | path stringlengths 4 189 | language stringclasses 1 value | identifier stringlengths 1 94 | parameters stringlengths 2 4.03k | argument_list stringclasses 1 value | return_statement stringlengths 0 11.5k | docstring stringlengths 1 33.2k | docstring_summary stringlengths 0 5.15k | docstring_tokens list | function stringlengths 34 151k | function_tokens list | url stringlengths 90 278 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ledger/ledger | 8e79216887cf3c342dfca1ffa52cf4e6389d6de4 | contrib/non-profit-audit-reports/ooolib2/__init__.py | python | Writer.__init__ | (self) | Initialize ooolib Writer instance | Initialize ooolib Writer instance | [
"Initialize",
"ooolib",
"Writer",
"instance"
] | def __init__(self):
"Initialize ooolib Writer instance"
# Default to no debugging
self.debug = False
self.meta = Meta('odt') | [
"def",
"__init__",
"(",
"self",
")",
":",
"# Default to no debugging",
"self",
".",
"debug",
"=",
"False",
"self",
".",
"meta",
"=",
"Meta",
"(",
"'odt'",
")"
] | https://github.com/ledger/ledger/blob/8e79216887cf3c342dfca1ffa52cf4e6389d6de4/contrib/non-profit-audit-reports/ooolib2/__init__.py#L1845-L1849 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/html.py | python | HtmlBookRecord.GetContentsEnd | (*args, **kwargs) | return _html.HtmlBookRecord_GetContentsEnd(*args, **kwargs) | GetContentsEnd(self) -> int | GetContentsEnd(self) -> int | [
"GetContentsEnd",
"(",
"self",
")",
"-",
">",
"int"
] | def GetContentsEnd(*args, **kwargs):
"""GetContentsEnd(self) -> int"""
return _html.HtmlBookRecord_GetContentsEnd(*args, **kwargs) | [
"def",
"GetContentsEnd",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_html",
".",
"HtmlBookRecord_GetContentsEnd",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/html.py#L1435-L1437 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_windows.py | python | PrintPreview.Print | (*args, **kwargs) | return _windows_.PrintPreview_Print(*args, **kwargs) | Print(self, bool interactive) -> bool | Print(self, bool interactive) -> bool | [
"Print",
"(",
"self",
"bool",
"interactive",
")",
"-",
">",
"bool"
] | def Print(*args, **kwargs):
"""Print(self, bool interactive) -> bool"""
return _windows_.PrintPreview_Print(*args, **kwargs) | [
"def",
"Print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"PrintPreview_Print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_windows.py#L5650-L5652 | |
LLNL/lbann | 26083e6c86050302ce33148aea70f62e61cacb92 | python/lbann/contrib/lc/systems.py | python | system | () | return _system | Name of system.
Hostname with trailing digits removed. | Name of system. | [
"Name",
"of",
"system",
"."
] | def system():
"""Name of system.
Hostname with trailing digits removed.
"""
return _system | [
"def",
"system",
"(",
")",
":",
"return",
"_system"
] | https://github.com/LLNL/lbann/blob/26083e6c86050302ce33148aea70f62e61cacb92/python/lbann/contrib/lc/systems.py#L37-L43 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/tkinter/ttk.py | python | Panedwindow.insert | (self, pos, child, **kw) | Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position. | Inserts a pane at the specified positions. | [
"Inserts",
"a",
"pane",
"at",
"the",
"specified",
"positions",
"."
] | def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw))) | [
"def",
"insert",
"(",
"self",
",",
"pos",
",",
"child",
",",
"*",
"*",
"kw",
")",
":",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"_w",
",",
"\"insert\"",
",",
"pos",
",",
"child",
",",
"*",
"(",
"_format_optdict",
"(",
"kw",
")",
")",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/tkinter/ttk.py#L962-L968 | ||
RamadhanAmizudin/malware | 2c6c53c8b0d556f5d8078d6ca0fc4448f4697cf1 | Fuzzbunch/fuzzbunch/pyreadline/rlmain.py | python | Readline.read_init_file | (self, filename=None) | Parse a readline initialization file. The default filename is the last filename used. | Parse a readline initialization file. The default filename is the last filename used. | [
"Parse",
"a",
"readline",
"initialization",
"file",
".",
"The",
"default",
"filename",
"is",
"the",
"last",
"filename",
"used",
"."
] | def read_init_file(self, filename=None):
'''Parse a readline initialization file. The default filename is the last filename used.'''
log('read_init_file("%s")' % filename) | [
"def",
"read_init_file",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"log",
"(",
"'read_init_file(\"%s\")'",
"%",
"filename",
")"
] | https://github.com/RamadhanAmizudin/malware/blob/2c6c53c8b0d556f5d8078d6ca0fc4448f4697cf1/Fuzzbunch/fuzzbunch/pyreadline/rlmain.py#L150-L152 | ||
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/logging/handlers.py | python | BufferingHandler.close | (self) | Close the handler.
This version just flushes and chains to the parent class' close(). | Close the handler. | [
"Close",
"the",
"handler",
"."
] | def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self) | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"flush",
"(",
")",
"logging",
".",
"Handler",
".",
"close",
"(",
"self",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/logging/handlers.py#L1156-L1163 | ||
koth/kcws | 88efbd36a7022de4e6e90f5a1fb880cf87cfae9f | third_party/python/cpplint/cpplint.py | python | CheckCheck | (filename, clean_lines, linenum, error) | Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Checks the use of CHECK and EXPECT macros. | [
"Checks",
"the",
"use",
"of",
"CHECK",
"and",
"EXPECT",
"macros",
"."
] | def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator)) | [
"def",
"CheckCheck",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"# Decide the set of replacement macros that should be suggested",
"lines",
"=",
"clean_lines",
".",
"elided",
"(",
"check_macro",
",",
"start_pos",
")",
"=",
"FindCheckMacro",
"(",
"lines",
"[",
"linenum",
"]",
")",
"if",
"not",
"check_macro",
":",
"return",
"# Find end of the boolean expression by matching parentheses",
"(",
"last_line",
",",
"end_line",
",",
"end_pos",
")",
"=",
"CloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"start_pos",
")",
"if",
"end_pos",
"<",
"0",
":",
"return",
"# If the check macro is followed by something other than a",
"# semicolon, assume users will log their own custom error messages",
"# and don't suggest any replacements.",
"if",
"not",
"Match",
"(",
"r'\\s*;'",
",",
"last_line",
"[",
"end_pos",
":",
"]",
")",
":",
"return",
"if",
"linenum",
"==",
"end_line",
":",
"expression",
"=",
"lines",
"[",
"linenum",
"]",
"[",
"start_pos",
"+",
"1",
":",
"end_pos",
"-",
"1",
"]",
"else",
":",
"expression",
"=",
"lines",
"[",
"linenum",
"]",
"[",
"start_pos",
"+",
"1",
":",
"]",
"for",
"i",
"in",
"xrange",
"(",
"linenum",
"+",
"1",
",",
"end_line",
")",
":",
"expression",
"+=",
"lines",
"[",
"i",
"]",
"expression",
"+=",
"last_line",
"[",
"0",
":",
"end_pos",
"-",
"1",
"]",
"# Parse expression so that we can take parentheses into account.",
"# This avoids false positives for inputs like \"CHECK((a < 4) == b)\",",
"# which is not replaceable by CHECK_LE.",
"lhs",
"=",
"''",
"rhs",
"=",
"''",
"operator",
"=",
"None",
"while",
"expression",
":",
"matched",
"=",
"Match",
"(",
"r'^\\s*(<<|<<=|>>|>>=|->\\*|->|&&|\\|\\||'",
"r'==|!=|>=|>|<=|<|\\()(.*)$'",
",",
"expression",
")",
"if",
"matched",
":",
"token",
"=",
"matched",
".",
"group",
"(",
"1",
")",
"if",
"token",
"==",
"'('",
":",
"# Parenthesized operand",
"expression",
"=",
"matched",
".",
"group",
"(",
"2",
")",
"(",
"end",
",",
"_",
")",
"=",
"FindEndOfExpressionInLine",
"(",
"expression",
",",
"0",
",",
"[",
"'('",
"]",
")",
"if",
"end",
"<",
"0",
":",
"return",
"# Unmatched parenthesis",
"lhs",
"+=",
"'('",
"+",
"expression",
"[",
"0",
":",
"end",
"]",
"expression",
"=",
"expression",
"[",
"end",
":",
"]",
"elif",
"token",
"in",
"(",
"'&&'",
",",
"'||'",
")",
":",
"# Logical and/or operators. This means the expression",
"# contains more than one term, for example:",
"# CHECK(42 < a && a < b);",
"#",
"# These are not replaceable with CHECK_LE, so bail out early.",
"return",
"elif",
"token",
"in",
"(",
"'<<'",
",",
"'<<='",
",",
"'>>'",
",",
"'>>='",
",",
"'->*'",
",",
"'->'",
")",
":",
"# Non-relational operator",
"lhs",
"+=",
"token",
"expression",
"=",
"matched",
".",
"group",
"(",
"2",
")",
"else",
":",
"# Relational operator",
"operator",
"=",
"token",
"rhs",
"=",
"matched",
".",
"group",
"(",
"2",
")",
"break",
"else",
":",
"# Unparenthesized operand. Instead of appending to lhs one character",
"# at a time, we do another regular expression match to consume several",
"# characters at once if possible. Trivial benchmark shows that this",
"# is more efficient when the operands are longer than a single",
"# character, which is generally the case.",
"matched",
"=",
"Match",
"(",
"r'^([^-=!<>()&|]+)(.*)$'",
",",
"expression",
")",
"if",
"not",
"matched",
":",
"matched",
"=",
"Match",
"(",
"r'^(\\s*\\S)(.*)$'",
",",
"expression",
")",
"if",
"not",
"matched",
":",
"break",
"lhs",
"+=",
"matched",
".",
"group",
"(",
"1",
")",
"expression",
"=",
"matched",
".",
"group",
"(",
"2",
")",
"# Only apply checks if we got all parts of the boolean expression",
"if",
"not",
"(",
"lhs",
"and",
"operator",
"and",
"rhs",
")",
":",
"return",
"# Check that rhs do not contain logical operators. We already know",
"# that lhs is fine since the loop above parses out && and ||.",
"if",
"rhs",
".",
"find",
"(",
"'&&'",
")",
">",
"-",
"1",
"or",
"rhs",
".",
"find",
"(",
"'||'",
")",
">",
"-",
"1",
":",
"return",
"# At least one of the operands must be a constant literal. This is",
"# to avoid suggesting replacements for unprintable things like",
"# CHECK(variable != iterator)",
"#",
"# The following pattern matches decimal, hex integers, strings, and",
"# characters (in that order).",
"lhs",
"=",
"lhs",
".",
"strip",
"(",
")",
"rhs",
"=",
"rhs",
".",
"strip",
"(",
")",
"match_constant",
"=",
"r'^([-+]?(\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|\".*\"|\\'.*\\')$'",
"if",
"Match",
"(",
"match_constant",
",",
"lhs",
")",
"or",
"Match",
"(",
"match_constant",
",",
"rhs",
")",
":",
"# Note: since we know both lhs and rhs, we can provide a more",
"# descriptive error message like:",
"# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)",
"# Instead of:",
"# Consider using CHECK_EQ instead of CHECK(a == b)",
"#",
"# We are still keeping the less descriptive message because if lhs",
"# or rhs gets long, the error message might become unreadable.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/check'",
",",
"2",
",",
"'Consider using %s instead of %s(a %s b)'",
"%",
"(",
"_CHECK_REPLACEMENT",
"[",
"check_macro",
"]",
"[",
"operator",
"]",
",",
"check_macro",
",",
"operator",
")",
")"
] | https://github.com/koth/kcws/blob/88efbd36a7022de4e6e90f5a1fb880cf87cfae9f/third_party/python/cpplint/cpplint.py#L4202-L4317 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/stc.py | python | StyledTextCtrl.MarkerGet | (*args, **kwargs) | return _stc.StyledTextCtrl_MarkerGet(*args, **kwargs) | MarkerGet(self, int line) -> int
Get a bit mask of all the markers set on a line. | MarkerGet(self, int line) -> int | [
"MarkerGet",
"(",
"self",
"int",
"line",
")",
"-",
">",
"int"
] | def MarkerGet(*args, **kwargs):
"""
MarkerGet(self, int line) -> int
Get a bit mask of all the markers set on a line.
"""
return _stc.StyledTextCtrl_MarkerGet(*args, **kwargs) | [
"def",
"MarkerGet",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_MarkerGet",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/stc.py#L2386-L2392 | |
NERSC/timemory | 431912b360ff50d1a160d7826e2eea04fbd1037f | timemory/plotting/plotting.py | python | timemory_data.__add__ | (self, rhs) | return self | for combining results (typically from different MPI processes) | for combining results (typically from different MPI processes) | [
"for",
"combining",
"results",
"(",
"typically",
"from",
"different",
"MPI",
"processes",
")"
] | def __add__(self, rhs):
"""for combining results (typically from different MPI processes)"""
self.laps += rhs.laps
self.value += rhs.value
self.accum += rhs.accum
self.data += rhs.data
return self | [
"def",
"__add__",
"(",
"self",
",",
"rhs",
")",
":",
"self",
".",
"laps",
"+=",
"rhs",
".",
"laps",
"self",
".",
"value",
"+=",
"rhs",
".",
"value",
"self",
".",
"accum",
"+=",
"rhs",
".",
"accum",
"self",
".",
"data",
"+=",
"rhs",
".",
"data",
"return",
"self"
] | https://github.com/NERSC/timemory/blob/431912b360ff50d1a160d7826e2eea04fbd1037f/timemory/plotting/plotting.py#L315-L321 | |
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/nn/utils/prune.py | python | is_pruned | (module) | return False | r"""Check whether ``module`` is pruned by looking for
``forward_pre_hooks`` in its modules that inherit from the
:class:`BasePruningMethod`.
Args:
module (nn.Module): object that is either pruned or unpruned
Returns:
binary answer to whether ``module`` is pruned.
Examples:
>>> m = nn.Linear(5, 7)
>>> print(prune.is_pruned(m))
False
>>> prune.random_unstructured(m, name='weight', amount=0.2)
>>> print(prune.is_pruned(m))
True | r"""Check whether ``module`` is pruned by looking for
``forward_pre_hooks`` in its modules that inherit from the
:class:`BasePruningMethod`. | [
"r",
"Check",
"whether",
"module",
"is",
"pruned",
"by",
"looking",
"for",
"forward_pre_hooks",
"in",
"its",
"modules",
"that",
"inherit",
"from",
"the",
":",
"class",
":",
"BasePruningMethod",
"."
] | def is_pruned(module):
r"""Check whether ``module`` is pruned by looking for
``forward_pre_hooks`` in its modules that inherit from the
:class:`BasePruningMethod`.
Args:
module (nn.Module): object that is either pruned or unpruned
Returns:
binary answer to whether ``module`` is pruned.
Examples:
>>> m = nn.Linear(5, 7)
>>> print(prune.is_pruned(m))
False
>>> prune.random_unstructured(m, name='weight', amount=0.2)
>>> print(prune.is_pruned(m))
True
"""
for _, submodule in module.named_modules():
for _, hook in submodule._forward_pre_hooks.items():
if isinstance(hook, BasePruningMethod):
return True
return False | [
"def",
"is_pruned",
"(",
"module",
")",
":",
"for",
"_",
",",
"submodule",
"in",
"module",
".",
"named_modules",
"(",
")",
":",
"for",
"_",
",",
"hook",
"in",
"submodule",
".",
"_forward_pre_hooks",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"hook",
",",
"BasePruningMethod",
")",
":",
"return",
"True",
"return",
"False"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/nn/utils/prune.py#L1195-L1218 | |
apple/swift-lldb | d74be846ef3e62de946df343e8c234bde93a8912 | scripts/Python/static-binding/lldb.py | python | SBData.CreateDataFromDoubleArray | (endian, addr_byte_size, array) | return _lldb.SBData_CreateDataFromDoubleArray(endian, addr_byte_size, array) | CreateDataFromDoubleArray(lldb::ByteOrder endian, uint32_t addr_byte_size, double * array) -> SBData | CreateDataFromDoubleArray(lldb::ByteOrder endian, uint32_t addr_byte_size, double * array) -> SBData | [
"CreateDataFromDoubleArray",
"(",
"lldb",
"::",
"ByteOrder",
"endian",
"uint32_t",
"addr_byte_size",
"double",
"*",
"array",
")",
"-",
">",
"SBData"
] | def CreateDataFromDoubleArray(endian, addr_byte_size, array):
"""CreateDataFromDoubleArray(lldb::ByteOrder endian, uint32_t addr_byte_size, double * array) -> SBData"""
return _lldb.SBData_CreateDataFromDoubleArray(endian, addr_byte_size, array) | [
"def",
"CreateDataFromDoubleArray",
"(",
"endian",
",",
"addr_byte_size",
",",
"array",
")",
":",
"return",
"_lldb",
".",
"SBData_CreateDataFromDoubleArray",
"(",
"endian",
",",
"addr_byte_size",
",",
"array",
")"
] | https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L3457-L3459 | |
RLBot/RLBot | 34332b12cf158b3ef8dbf174ae67c53683368a9d | src/main/python/rlbot/utils/class_importer.py | python | import_agent | (python_file) | return import_class_with_base(python_file, BaseAgent) | Imports the first class that extends BaseAgent.
:param python_file: The absolute path of the bot's main python file
:return: The agent requested or BaseAgent if there are any problems. | Imports the first class that extends BaseAgent. | [
"Imports",
"the",
"first",
"class",
"that",
"extends",
"BaseAgent",
"."
] | def import_agent(python_file) -> ExternalClassWrapper:
"""
Imports the first class that extends BaseAgent.
:param python_file: The absolute path of the bot's main python file
:return: The agent requested or BaseAgent if there are any problems.
"""
return import_class_with_base(python_file, BaseAgent) | [
"def",
"import_agent",
"(",
"python_file",
")",
"->",
"ExternalClassWrapper",
":",
"return",
"import_class_with_base",
"(",
"python_file",
",",
"BaseAgent",
")"
] | https://github.com/RLBot/RLBot/blob/34332b12cf158b3ef8dbf174ae67c53683368a9d/src/main/python/rlbot/utils/class_importer.py#L81-L88 | |
bundy-dns/bundy | 3d41934996b82b0cd2fe22dd74d2abc1daba835d | src/lib/python/bundy/config/ccsession.py | python | RPCError.code | (self) | return self.__code | The code as sent over the CC. | The code as sent over the CC. | [
"The",
"code",
"as",
"sent",
"over",
"the",
"CC",
"."
] | def code(self):
"""
The code as sent over the CC.
"""
return self.__code | [
"def",
"code",
"(",
"self",
")",
":",
"return",
"self",
".",
"__code"
] | https://github.com/bundy-dns/bundy/blob/3d41934996b82b0cd2fe22dd74d2abc1daba835d/src/lib/python/bundy/config/ccsession.py#L64-L68 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_misc.py | python | StopWatch.Pause | (*args, **kwargs) | return _misc_.StopWatch_Pause(*args, **kwargs) | Pause(self) | Pause(self) | [
"Pause",
"(",
"self",
")"
] | def Pause(*args, **kwargs):
"""Pause(self)"""
return _misc_.StopWatch_Pause(*args, **kwargs) | [
"def",
"Pause",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"StopWatch_Pause",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L887-L889 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/urllib.py | python | proxy_bypass_environment | (host) | return 0 | Test if proxies should not be used for a particular host.
Checks the environment for a variable named no_proxy, which should
be a list of DNS suffixes separated by commas, or '*' for all hosts. | Test if proxies should not be used for a particular host. | [
"Test",
"if",
"proxies",
"should",
"not",
"be",
"used",
"for",
"a",
"particular",
"host",
"."
] | def proxy_bypass_environment(host):
"""Test if proxies should not be used for a particular host.
Checks the environment for a variable named no_proxy, which should
be a list of DNS suffixes separated by commas, or '*' for all hosts.
"""
no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '')
# '*' is special case for always bypass
if no_proxy == '*':
return 1
# strip port off host
hostonly, port = splitport(host)
# check if the host ends with any of the DNS suffixes
no_proxy_list = [proxy.strip() for proxy in no_proxy.split(',')]
for name in no_proxy_list:
if name and (hostonly.endswith(name) or host.endswith(name)):
return 1
# otherwise, don't bypass
return 0 | [
"def",
"proxy_bypass_environment",
"(",
"host",
")",
":",
"no_proxy",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'no_proxy'",
",",
"''",
")",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"'NO_PROXY'",
",",
"''",
")",
"# '*' is special case for always bypass",
"if",
"no_proxy",
"==",
"'*'",
":",
"return",
"1",
"# strip port off host",
"hostonly",
",",
"port",
"=",
"splitport",
"(",
"host",
")",
"# check if the host ends with any of the DNS suffixes",
"no_proxy_list",
"=",
"[",
"proxy",
".",
"strip",
"(",
")",
"for",
"proxy",
"in",
"no_proxy",
".",
"split",
"(",
"','",
")",
"]",
"for",
"name",
"in",
"no_proxy_list",
":",
"if",
"name",
"and",
"(",
"hostonly",
".",
"endswith",
"(",
"name",
")",
"or",
"host",
".",
"endswith",
"(",
"name",
")",
")",
":",
"return",
"1",
"# otherwise, don't bypass",
"return",
"0"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/urllib.py#L1371-L1389 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/propgrid.py | python | IntProperty.DoValidation | (*args, **kwargs) | return _propgrid.IntProperty_DoValidation(*args, **kwargs) | DoValidation(PGProperty property, wxLongLong_t value, PGValidationInfo pValidationInfo,
int mode=PG_PROPERTY_VALIDATION_ERROR_MESSAGE) -> bool | DoValidation(PGProperty property, wxLongLong_t value, PGValidationInfo pValidationInfo,
int mode=PG_PROPERTY_VALIDATION_ERROR_MESSAGE) -> bool | [
"DoValidation",
"(",
"PGProperty",
"property",
"wxLongLong_t",
"value",
"PGValidationInfo",
"pValidationInfo",
"int",
"mode",
"=",
"PG_PROPERTY_VALIDATION_ERROR_MESSAGE",
")",
"-",
">",
"bool"
] | def DoValidation(*args, **kwargs):
"""
DoValidation(PGProperty property, wxLongLong_t value, PGValidationInfo pValidationInfo,
int mode=PG_PROPERTY_VALIDATION_ERROR_MESSAGE) -> bool
"""
return _propgrid.IntProperty_DoValidation(*args, **kwargs) | [
"def",
"DoValidation",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"IntProperty_DoValidation",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/propgrid.py#L2912-L2917 | |
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/ops/array_ops.py | python | squeeze | (input, axis=None, name=None, squeeze_dims=None) | return gen_array_ops._squeeze(input, axis, name) | Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
Must be in the range `[-rank(input), rank(input))`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified. | Removes dimensions of size 1 from the shape of a tensor. | [
"Removes",
"dimensions",
"of",
"size",
"1",
"from",
"the",
"shape",
"of",
"a",
"tensor",
"."
] | def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
Must be in the range `[-rank(input), rank(input))`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
if squeeze_dims is not None:
if axis is not None:
raise ValueError("Cannot specify both 'squeeze_dims' and 'axis'")
axis = squeeze_dims
if np.isscalar(axis):
axis = [axis]
return gen_array_ops._squeeze(input, axis, name) | [
"def",
"squeeze",
"(",
"input",
",",
"axis",
"=",
"None",
",",
"name",
"=",
"None",
",",
"squeeze_dims",
"=",
"None",
")",
":",
"# pylint: disable=redefined-builtin",
"if",
"squeeze_dims",
"is",
"not",
"None",
":",
"if",
"axis",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot specify both 'squeeze_dims' and 'axis'\"",
")",
"axis",
"=",
"squeeze_dims",
"if",
"np",
".",
"isscalar",
"(",
"axis",
")",
":",
"axis",
"=",
"[",
"axis",
"]",
"return",
"gen_array_ops",
".",
"_squeeze",
"(",
"input",
",",
"axis",
",",
"name",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/array_ops.py#L2377-L2423 | |
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/chigger/observers/TimerObserver.py | python | TimerObserver._callback | (self, obj, event) | The function to be called by the RenderWindow.
Inputs:
obj, event: Required by VTK. | The function to be called by the RenderWindow. | [
"The",
"function",
"to",
"be",
"called",
"by",
"the",
"RenderWindow",
"."
] | def _callback(self, obj, event): #pylint: disable=unused-argument
"""
The function to be called by the RenderWindow.
Inputs:
obj, event: Required by VTK.
"""
if self.isOptionValid('count') and (self._count >= self.getOption('count')):
self._window.getVTKInteractor().DestroyTimer()
if self.getOption('terminate'):
self._window.getVTKInteractor().TerminateApp()
return
self.update()
self._count += 1 | [
"def",
"_callback",
"(",
"self",
",",
"obj",
",",
"event",
")",
":",
"#pylint: disable=unused-argument",
"if",
"self",
".",
"isOptionValid",
"(",
"'count'",
")",
"and",
"(",
"self",
".",
"_count",
">=",
"self",
".",
"getOption",
"(",
"'count'",
")",
")",
":",
"self",
".",
"_window",
".",
"getVTKInteractor",
"(",
")",
".",
"DestroyTimer",
"(",
")",
"if",
"self",
".",
"getOption",
"(",
"'terminate'",
")",
":",
"self",
".",
"_window",
".",
"getVTKInteractor",
"(",
")",
".",
"TerminateApp",
"(",
")",
"return",
"self",
".",
"update",
"(",
")",
"self",
".",
"_count",
"+=",
"1"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/chigger/observers/TimerObserver.py#L50-L63 | ||
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/tex.py | python | generate_common | (env) | Add internal Builders and construction variables for LaTeX to an Environment. | Add internal Builders and construction variables for LaTeX to an Environment. | [
"Add",
"internal",
"Builders",
"and",
"construction",
"variables",
"for",
"LaTeX",
"to",
"an",
"Environment",
"."
] | def generate_common(env):
"""Add internal Builders and construction variables for LaTeX to an Environment."""
# Add OSX system paths so TeX tools can be found
# when a list of tools is given the exists() method is not called
generate_darwin(env)
# A generic tex file Action, sufficient for all tex files.
global TeXAction
if TeXAction is None:
TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR")
# An Action to build a latex file. This might be needed more
# than once if we are dealing with labels and bibtex.
global LaTeXAction
if LaTeXAction is None:
LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR")
# Define an action to run BibTeX on a file.
global BibTeXAction
if BibTeXAction is None:
BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR")
# Define an action to run Biber on a file.
global BiberAction
if BiberAction is None:
BiberAction = SCons.Action.Action("$BIBERCOM", "$BIBERCOMSTR")
# Define an action to run MakeIndex on a file.
global MakeIndexAction
if MakeIndexAction is None:
MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR")
# Define an action to run MakeIndex on a file for nomenclatures.
global MakeNclAction
if MakeNclAction is None:
MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR")
# Define an action to run MakeIndex on a file for glossaries.
global MakeGlossaryAction
if MakeGlossaryAction is None:
MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR")
# Define an action to run MakeIndex on a file for acronyms.
global MakeAcronymsAction
if MakeAcronymsAction is None:
MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR")
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Some Linux platforms have pdflatex set up in a way
# that requires that the HOME environment variable be set.
# Add it here if defined.
v = os.environ.get('HOME')
if v:
environ['HOME'] = v
CDCOM = 'cd '
if platform.system() == 'Windows':
# allow cd command to change drives on Windows
CDCOM = 'cd /D '
env['TEX'] = 'tex'
env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['TEXCOM'] = CDCOM + '${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'
env['PDFTEX'] = 'pdftex'
env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFTEXCOM'] = CDCOM + '${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}'
env['LATEX'] = 'latex'
env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['LATEXCOM'] = CDCOM + '${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 4
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFLATEXCOM'] = CDCOM + '${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
env['BIBTEX'] = 'bibtex'
env['BIBTEXFLAGS'] = SCons.Util.CLVar('')
env['BIBTEXCOM'] = CDCOM + '${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'
env['BIBER'] = 'biber'
env['BIBERFLAGS'] = SCons.Util.CLVar('')
env['BIBERCOM'] = CDCOM + '${TARGET.dir} && $BIBER $BIBERFLAGS ${SOURCE.filebase}'
env['MAKEINDEX'] = 'makeindex'
env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('')
env['MAKEINDEXCOM'] = CDCOM + '${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'
env['MAKEGLOSSARY'] = 'makeindex'
env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg')
env['MAKEGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'
env['MAKEACRONYMS'] = 'makeindex'
env['MAKEACRONYMSSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEACRONYMSFLAGS'] = SCons.Util.CLVar('-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg')
env['MAKEACRONYMSCOM'] = CDCOM + '${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr'
env['MAKENCL'] = 'makeindex'
env['MAKENCLSTYLE'] = 'nomencl.ist'
env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'
env['MAKENCLCOM'] = CDCOM + '${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'
env['MAKENEWGLOSSARY'] = 'makeindex'
env['MAKENEWGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKENEWGLOSSARY ' | [
"def",
"generate_common",
"(",
"env",
")",
":",
"# Add OSX system paths so TeX tools can be found",
"# when a list of tools is given the exists() method is not called",
"generate_darwin",
"(",
"env",
")",
"# A generic tex file Action, sufficient for all tex files.",
"global",
"TeXAction",
"if",
"TeXAction",
"is",
"None",
":",
"TeXAction",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"\"$TEXCOM\"",
",",
"\"$TEXCOMSTR\"",
")",
"# An Action to build a latex file. This might be needed more",
"# than once if we are dealing with labels and bibtex.",
"global",
"LaTeXAction",
"if",
"LaTeXAction",
"is",
"None",
":",
"LaTeXAction",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"\"$LATEXCOM\"",
",",
"\"$LATEXCOMSTR\"",
")",
"# Define an action to run BibTeX on a file.",
"global",
"BibTeXAction",
"if",
"BibTeXAction",
"is",
"None",
":",
"BibTeXAction",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"\"$BIBTEXCOM\"",
",",
"\"$BIBTEXCOMSTR\"",
")",
"# Define an action to run Biber on a file.",
"global",
"BiberAction",
"if",
"BiberAction",
"is",
"None",
":",
"BiberAction",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"\"$BIBERCOM\"",
",",
"\"$BIBERCOMSTR\"",
")",
"# Define an action to run MakeIndex on a file.",
"global",
"MakeIndexAction",
"if",
"MakeIndexAction",
"is",
"None",
":",
"MakeIndexAction",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"\"$MAKEINDEXCOM\"",
",",
"\"$MAKEINDEXCOMSTR\"",
")",
"# Define an action to run MakeIndex on a file for nomenclatures.",
"global",
"MakeNclAction",
"if",
"MakeNclAction",
"is",
"None",
":",
"MakeNclAction",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"\"$MAKENCLCOM\"",
",",
"\"$MAKENCLCOMSTR\"",
")",
"# Define an action to run MakeIndex on a file for glossaries.",
"global",
"MakeGlossaryAction",
"if",
"MakeGlossaryAction",
"is",
"None",
":",
"MakeGlossaryAction",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"\"$MAKEGLOSSARYCOM\"",
",",
"\"$MAKEGLOSSARYCOMSTR\"",
")",
"# Define an action to run MakeIndex on a file for acronyms.",
"global",
"MakeAcronymsAction",
"if",
"MakeAcronymsAction",
"is",
"None",
":",
"MakeAcronymsAction",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"\"$MAKEACRONYMSCOM\"",
",",
"\"$MAKEACRONYMSCOMSTR\"",
")",
"try",
":",
"environ",
"=",
"env",
"[",
"'ENV'",
"]",
"except",
"KeyError",
":",
"environ",
"=",
"{",
"}",
"env",
"[",
"'ENV'",
"]",
"=",
"environ",
"# Some Linux platforms have pdflatex set up in a way",
"# that requires that the HOME environment variable be set.",
"# Add it here if defined.",
"v",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'HOME'",
")",
"if",
"v",
":",
"environ",
"[",
"'HOME'",
"]",
"=",
"v",
"CDCOM",
"=",
"'cd '",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"# allow cd command to change drives on Windows",
"CDCOM",
"=",
"'cd /D '",
"env",
"[",
"'TEX'",
"]",
"=",
"'tex'",
"env",
"[",
"'TEXFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"'-interaction=nonstopmode -recorder'",
")",
"env",
"[",
"'TEXCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'",
"env",
"[",
"'PDFTEX'",
"]",
"=",
"'pdftex'",
"env",
"[",
"'PDFTEXFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"'-interaction=nonstopmode -recorder'",
")",
"env",
"[",
"'PDFTEXCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}'",
"env",
"[",
"'LATEX'",
"]",
"=",
"'latex'",
"env",
"[",
"'LATEXFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"'-interaction=nonstopmode -recorder'",
")",
"env",
"[",
"'LATEXCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'",
"env",
"[",
"'LATEXRETRIES'",
"]",
"=",
"4",
"env",
"[",
"'PDFLATEX'",
"]",
"=",
"'pdflatex'",
"env",
"[",
"'PDFLATEXFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"'-interaction=nonstopmode -recorder'",
")",
"env",
"[",
"'PDFLATEXCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'",
"env",
"[",
"'BIBTEX'",
"]",
"=",
"'bibtex'",
"env",
"[",
"'BIBTEXFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"''",
")",
"env",
"[",
"'BIBTEXCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'",
"env",
"[",
"'BIBER'",
"]",
"=",
"'biber'",
"env",
"[",
"'BIBERFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"''",
")",
"env",
"[",
"'BIBERCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $BIBER $BIBERFLAGS ${SOURCE.filebase}'",
"env",
"[",
"'MAKEINDEX'",
"]",
"=",
"'makeindex'",
"env",
"[",
"'MAKEINDEXFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"''",
")",
"env",
"[",
"'MAKEINDEXCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'",
"env",
"[",
"'MAKEGLOSSARY'",
"]",
"=",
"'makeindex'",
"env",
"[",
"'MAKEGLOSSARYSTYLE'",
"]",
"=",
"'${SOURCE.filebase}.ist'",
"env",
"[",
"'MAKEGLOSSARYFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"'-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg'",
")",
"env",
"[",
"'MAKEGLOSSARYCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'",
"env",
"[",
"'MAKEACRONYMS'",
"]",
"=",
"'makeindex'",
"env",
"[",
"'MAKEACRONYMSSTYLE'",
"]",
"=",
"'${SOURCE.filebase}.ist'",
"env",
"[",
"'MAKEACRONYMSFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"'-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg'",
")",
"env",
"[",
"'MAKEACRONYMSCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr'",
"env",
"[",
"'MAKENCL'",
"]",
"=",
"'makeindex'",
"env",
"[",
"'MAKENCLSTYLE'",
"]",
"=",
"'nomencl.ist'",
"env",
"[",
"'MAKENCLFLAGS'",
"]",
"=",
"'-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'",
"env",
"[",
"'MAKENCLCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'",
"env",
"[",
"'MAKENEWGLOSSARY'",
"]",
"=",
"'makeindex'",
"env",
"[",
"'MAKENEWGLOSSARYCOM'",
"]",
"=",
"CDCOM",
"+",
"'${TARGET.dir} && $MAKENEWGLOSSARY '"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/tex.py#L865-L976 | ||
root-project/root | fcd3583bb14852bf2e8cd2415717cbaac0e75896 | bindings/pyroot/cppyy/cppyy-backend/cling/python/cppyy_backend/_cppyy_generator.py | python | main | (argv=None) | Takes a set of C++ header files and generate a JSON output file describing
the objects found in them. This output is intended to support more
convenient access to a set of cppyy-supported bindings.
Examples:
INC=/usr/include
QT5=$INC/x86_64-linux-gnu/qt5
KF5=$INC/KF5
INCDIRS="\\\\-I$KF5/KConfigCore;\\\\-I$QT5/QtXml;\\\\-I$QT5/QtCore"
STDDIRS="\\\\-I$Qt5/mkspecs/linux-g++-64\\\\;-I$KF5;\\\\-I$QT5"
FLAGS="\\\\-fvisibility=hidden;\\\-D__PIC__;\\\\-Wno-macro-redefined;\\\\-std=c++14"
cppyy-generator --flags "$FLAGS;$INCDIRS;$STDDIRS" KF5/Config/Config.map $INC/KF5/KConfigCore/* | Takes a set of C++ header files and generate a JSON output file describing
the objects found in them. This output is intended to support more
convenient access to a set of cppyy-supported bindings. | [
"Takes",
"a",
"set",
"of",
"C",
"++",
"header",
"files",
"and",
"generate",
"a",
"JSON",
"output",
"file",
"describing",
"the",
"objects",
"found",
"in",
"them",
".",
"This",
"output",
"is",
"intended",
"to",
"support",
"more",
"convenient",
"access",
"to",
"a",
"set",
"of",
"cppyy",
"-",
"supported",
"bindings",
"."
] | def main(argv=None):
"""
Takes a set of C++ header files and generate a JSON output file describing
the objects found in them. This output is intended to support more
convenient access to a set of cppyy-supported bindings.
Examples:
INC=/usr/include
QT5=$INC/x86_64-linux-gnu/qt5
KF5=$INC/KF5
INCDIRS="\\\\-I$KF5/KConfigCore;\\\\-I$QT5/QtXml;\\\\-I$QT5/QtCore"
STDDIRS="\\\\-I$Qt5/mkspecs/linux-g++-64\\\\;-I$KF5;\\\\-I$QT5"
FLAGS="\\\\-fvisibility=hidden;\\\-D__PIC__;\\\\-Wno-macro-redefined;\\\\-std=c++14"
cppyy-generator --flags "$FLAGS;$INCDIRS;$STDDIRS" KF5/Config/Config.map $INC/KF5/KConfigCore/*
"""
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(epilog=inspect.getdoc(main),
formatter_class=HelpFormatter)
parser.add_argument("-v", "--verbose", action="store_true", default=False, help=_("Enable verbose output"))
parser.add_argument("--flags", default="",
help=_("Semicolon-separated C++ compile flags to use, escape leading - or -- with \\"))
parser.add_argument("--libclang", help=_("libclang library to use for parsing"))
parser.add_argument("output", help=_("Output filename to write"))
parser.add_argument("sources", nargs="+", help=_("C++ headers to process"))
try:
args = parser.parse_args(argv[1:])
if args.verbose:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)s %(levelname)s: %(message)s')
else:
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
flags = []
for f in args.flags.lstrip().split(";"):
if f.startswith("\\-\\-"):
flags.append("--" + f[4:])
elif f.startswith("\\-"):
flags.append("-" + f[2:])
elif f:
flags.append(f)
#
# Load the given libclang.
#
if args.libclang:
Config.set_library_file(args.libclang)
hpath = getBuiltinHeaderPath(args.libclang)
if hpath:
flags = ['-I'+hpath] + flags
lib = Config().lib
import ctypes
from clang.cindex import Type
items = [
("clang_Type_getNumTemplateArguments", [Type], ctypes.c_size_t),
]
for item in items:
func = getattr(lib, item[0])
if len(item) >= 2:
func.argtypes = item[1]
if len(item) >= 3:
func.restype = item[2]
if len(item) == 4:
func.errcheck = item[3]
#
# Generate!
#
g = CppyyGenerator(flags, verbose=args.verbose)
mapping = g.create_mapping(args.sources)
with open(args.output, "w") as f:
json.dump(mapping, f, indent=1, sort_keys=True)
return 0
except Exception as e:
tbk = traceback.format_exc()
print(tbk)
return 1 | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"if",
"argv",
"is",
"None",
":",
"argv",
"=",
"sys",
".",
"argv",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"epilog",
"=",
"inspect",
".",
"getdoc",
"(",
"main",
")",
",",
"formatter_class",
"=",
"HelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--verbose\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"_",
"(",
"\"Enable verbose output\"",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"--flags\"",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"_",
"(",
"\"Semicolon-separated C++ compile flags to use, escape leading - or -- with \\\\\"",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"--libclang\"",
",",
"help",
"=",
"_",
"(",
"\"libclang library to use for parsing\"",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"output\"",
",",
"help",
"=",
"_",
"(",
"\"Output filename to write\"",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"sources\"",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"_",
"(",
"\"C++ headers to process\"",
")",
")",
"try",
":",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
"[",
"1",
":",
"]",
")",
"if",
"args",
".",
"verbose",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"format",
"=",
"'%(asctime)s %(name)s %(levelname)s: %(message)s'",
")",
"else",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
",",
"format",
"=",
"'%(levelname)s: %(message)s'",
")",
"flags",
"=",
"[",
"]",
"for",
"f",
"in",
"args",
".",
"flags",
".",
"lstrip",
"(",
")",
".",
"split",
"(",
"\";\"",
")",
":",
"if",
"f",
".",
"startswith",
"(",
"\"\\\\-\\\\-\"",
")",
":",
"flags",
".",
"append",
"(",
"\"--\"",
"+",
"f",
"[",
"4",
":",
"]",
")",
"elif",
"f",
".",
"startswith",
"(",
"\"\\\\-\"",
")",
":",
"flags",
".",
"append",
"(",
"\"-\"",
"+",
"f",
"[",
"2",
":",
"]",
")",
"elif",
"f",
":",
"flags",
".",
"append",
"(",
"f",
")",
"#",
"# Load the given libclang.",
"#",
"if",
"args",
".",
"libclang",
":",
"Config",
".",
"set_library_file",
"(",
"args",
".",
"libclang",
")",
"hpath",
"=",
"getBuiltinHeaderPath",
"(",
"args",
".",
"libclang",
")",
"if",
"hpath",
":",
"flags",
"=",
"[",
"'-I'",
"+",
"hpath",
"]",
"+",
"flags",
"lib",
"=",
"Config",
"(",
")",
".",
"lib",
"import",
"ctypes",
"from",
"clang",
".",
"cindex",
"import",
"Type",
"items",
"=",
"[",
"(",
"\"clang_Type_getNumTemplateArguments\"",
",",
"[",
"Type",
"]",
",",
"ctypes",
".",
"c_size_t",
")",
",",
"]",
"for",
"item",
"in",
"items",
":",
"func",
"=",
"getattr",
"(",
"lib",
",",
"item",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"item",
")",
">=",
"2",
":",
"func",
".",
"argtypes",
"=",
"item",
"[",
"1",
"]",
"if",
"len",
"(",
"item",
")",
">=",
"3",
":",
"func",
".",
"restype",
"=",
"item",
"[",
"2",
"]",
"if",
"len",
"(",
"item",
")",
"==",
"4",
":",
"func",
".",
"errcheck",
"=",
"item",
"[",
"3",
"]",
"#",
"# Generate!",
"#",
"g",
"=",
"CppyyGenerator",
"(",
"flags",
",",
"verbose",
"=",
"args",
".",
"verbose",
")",
"mapping",
"=",
"g",
".",
"create_mapping",
"(",
"args",
".",
"sources",
")",
"with",
"open",
"(",
"args",
".",
"output",
",",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"mapping",
",",
"f",
",",
"indent",
"=",
"1",
",",
"sort_keys",
"=",
"True",
")",
"return",
"0",
"except",
"Exception",
"as",
"e",
":",
"tbk",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"print",
"(",
"tbk",
")",
"return",
"1"
] | https://github.com/root-project/root/blob/fcd3583bb14852bf2e8cd2415717cbaac0e75896/bindings/pyroot/cppyy/cppyy-backend/cling/python/cppyy_backend/_cppyy_generator.py#L666-L742 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/tornado/tornado-6/tornado/web.py | python | RequestHandler.set_status | (self, status_code: int, reason: Optional[str] = None) | Sets the status code for our response.
:arg int status_code: Response status code.
:arg str reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`http.client.responses` or "Unknown".
.. versionchanged:: 5.0
No longer validates that the response code is in
`http.client.responses`. | Sets the status code for our response. | [
"Sets",
"the",
"status",
"code",
"for",
"our",
"response",
"."
] | def set_status(self, status_code: int, reason: Optional[str] = None) -> None:
"""Sets the status code for our response.
:arg int status_code: Response status code.
:arg str reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`http.client.responses` or "Unknown".
.. versionchanged:: 5.0
No longer validates that the response code is in
`http.client.responses`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
self._reason = httputil.responses.get(status_code, "Unknown") | [
"def",
"set_status",
"(",
"self",
",",
"status_code",
":",
"int",
",",
"reason",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"_status_code",
"=",
"status_code",
"if",
"reason",
"is",
"not",
"None",
":",
"self",
".",
"_reason",
"=",
"escape",
".",
"native_str",
"(",
"reason",
")",
"else",
":",
"self",
".",
"_reason",
"=",
"httputil",
".",
"responses",
".",
"get",
"(",
"status_code",
",",
"\"Unknown\"",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/tornado/tornado-6/tornado/web.py#L343-L360 | ||
jeog/TDAmeritradeAPI | 91c738afd7d57b54f6231170bd64c2550fafd34d | python/tdma_api/get.py | python | _OptionChainGetterBase.get_contract_type | (self) | return clib.get_val('OptionChainGetter_GetContractType_ABI', c_int,
self._obj) | Returns OPTION_CONTRACT_TYPE_[] constant being used. | Returns OPTION_CONTRACT_TYPE_[] constant being used. | [
"Returns",
"OPTION_CONTRACT_TYPE_",
"[]",
"constant",
"being",
"used",
"."
] | def get_contract_type(self):
"""Returns OPTION_CONTRACT_TYPE_[] constant being used."""
return clib.get_val('OptionChainGetter_GetContractType_ABI', c_int,
self._obj) | [
"def",
"get_contract_type",
"(",
"self",
")",
":",
"return",
"clib",
".",
"get_val",
"(",
"'OptionChainGetter_GetContractType_ABI'",
",",
"c_int",
",",
"self",
".",
"_obj",
")"
] | https://github.com/jeog/TDAmeritradeAPI/blob/91c738afd7d57b54f6231170bd64c2550fafd34d/python/tdma_api/get.py#L821-L824 | |
eclipse/sumo | 7132a9b8b6eea734bdec38479026b4d8c4336d03 | tools/traci/_lane.py | python | LaneDomain.getCO2Emission | (self, laneID) | return self._getUniversal(tc.VAR_CO2EMISSION, laneID) | getCO2Emission(string) -> double
Returns the CO2 emission in mg for the last time step on the given lane. | getCO2Emission(string) -> double | [
"getCO2Emission",
"(",
"string",
")",
"-",
">",
"double"
] | def getCO2Emission(self, laneID):
"""getCO2Emission(string) -> double
Returns the CO2 emission in mg for the last time step on the given lane.
"""
return self._getUniversal(tc.VAR_CO2EMISSION, laneID) | [
"def",
"getCO2Emission",
"(",
"self",
",",
"laneID",
")",
":",
"return",
"self",
".",
"_getUniversal",
"(",
"tc",
".",
"VAR_CO2EMISSION",
",",
"laneID",
")"
] | https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/traci/_lane.py#L144-L149 | |
amazon-archives/amazon-dsstne | e429ea811135a2ba3d69b2f7af496b791a61e962 | benchmarks/tf/autoencoder.py | python | AutoencoderParser.__init__ | (self, cmd) | Takes a argparse command as configuration.
Loads data, and makes it accessible as member variables:
Accessible members:
train: MiniBatcher object for training | Takes a argparse command as configuration.
Loads data, and makes it accessible as member variables:
Accessible members:
train: MiniBatcher object for training | [
"Takes",
"a",
"argparse",
"command",
"as",
"configuration",
".",
"Loads",
"data",
"and",
"makes",
"it",
"accessible",
"as",
"member",
"variables",
":",
"Accessible",
"members",
":",
"train",
":",
"MiniBatcher",
"object",
"for",
"training"
] | def __init__(self, cmd):
"""Takes a argparse command as configuration.
Loads data, and makes it accessible as member variables:
Accessible members:
train: MiniBatcher object for training
"""
# Parse config from command
dims = cmd.vocab_size
# Set up loader
mgr = DataManager(dims)
# Load train data
train_x = mgr.load(cmd.datafile)
train_y = train_x
self.train = MiniBatcher(train_x,train_y) | [
"def",
"__init__",
"(",
"self",
",",
"cmd",
")",
":",
"# Parse config from command",
"dims",
"=",
"cmd",
".",
"vocab_size",
"# Set up loader",
"mgr",
"=",
"DataManager",
"(",
"dims",
")",
"# Load train data",
"train_x",
"=",
"mgr",
".",
"load",
"(",
"cmd",
".",
"datafile",
")",
"train_y",
"=",
"train_x",
"self",
".",
"train",
"=",
"MiniBatcher",
"(",
"train_x",
",",
"train_y",
")"
] | https://github.com/amazon-archives/amazon-dsstne/blob/e429ea811135a2ba3d69b2f7af496b791a61e962/benchmarks/tf/autoencoder.py#L143-L158 | ||
eclipse/sumo | 7132a9b8b6eea734bdec38479026b4d8c4336d03 | tools/contributed/sumopy/agilepy/lib_wx/ogleditor.py | python | Polygons.pick_handle | (self, coord, detectwidth=0.1) | return handles | Retuns list [ id, ind_vert] when handle is near coord,
otherwise [] | Retuns list [ id, ind_vert] when handle is near coord,
otherwise [] | [
"Retuns",
"list",
"[",
"id",
"ind_vert",
"]",
"when",
"handle",
"is",
"near",
"coord",
"otherwise",
"[]"
] | def pick_handle(self, coord, detectwidth=0.1):
"""
Retuns list [ id, ind_vert] when handle is near coord,
otherwise []
"""
# print 'pick_handle',self.get_ident(),len(self),detectwidth
dw = detectwidth ** 2 # np.sqrt(detectwidth)
if len(self) == 0:
return np.zeros((0, 2), np.int)
# if self.ident not in [ 'lines','fancylines','polylines']:
# return np.zeros((0,2),np.int)
vertices = self._linevertices # self.get_vertices_array()
handles = []
# print ' vertices',vertices
# print ' vertices.shape',vertices.shape
dx = vertices[:, 0, 0]-coord[0]
dy = vertices[:, 0, 1]-coord[1]
inds = np.flatnonzero(dx*dx+dy*dy < dw)
ids = self._ids[self._polyinds[inds]]
handle1 = np.ones((len(ids), 2), np.int)
handle1[:, 0] = ids
handle1[:, 1] = self._vertexinds[inds, 0]
# print ' ',d,handle1
dx = vertices[:, 1, 0]-coord[0]
dy = vertices[:, 1, 1]-coord[1]
inds = np.flatnonzero(dx*dx+dy*dy < dw)
ids = self._ids[self._polyinds[inds]]
handle2 = np.ones((len(ids), 2), np.int)
handle2[:, 0] = ids
handle2[:, 1] = self._vertexinds[inds, 1]
# print ' ',d,handle2
handles = np.concatenate((handle1, handle2), 0)
# print ' found',len(np.flatnonzero(handles))
return handles | [
"def",
"pick_handle",
"(",
"self",
",",
"coord",
",",
"detectwidth",
"=",
"0.1",
")",
":",
"# print 'pick_handle',self.get_ident(),len(self),detectwidth",
"dw",
"=",
"detectwidth",
"**",
"2",
"# np.sqrt(detectwidth)",
"if",
"len",
"(",
"self",
")",
"==",
"0",
":",
"return",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
"2",
")",
",",
"np",
".",
"int",
")",
"# if self.ident not in [ 'lines','fancylines','polylines']:",
"# return np.zeros((0,2),np.int)",
"vertices",
"=",
"self",
".",
"_linevertices",
"# self.get_vertices_array()",
"handles",
"=",
"[",
"]",
"# print ' vertices',vertices",
"# print ' vertices.shape',vertices.shape",
"dx",
"=",
"vertices",
"[",
":",
",",
"0",
",",
"0",
"]",
"-",
"coord",
"[",
"0",
"]",
"dy",
"=",
"vertices",
"[",
":",
",",
"0",
",",
"1",
"]",
"-",
"coord",
"[",
"1",
"]",
"inds",
"=",
"np",
".",
"flatnonzero",
"(",
"dx",
"*",
"dx",
"+",
"dy",
"*",
"dy",
"<",
"dw",
")",
"ids",
"=",
"self",
".",
"_ids",
"[",
"self",
".",
"_polyinds",
"[",
"inds",
"]",
"]",
"handle1",
"=",
"np",
".",
"ones",
"(",
"(",
"len",
"(",
"ids",
")",
",",
"2",
")",
",",
"np",
".",
"int",
")",
"handle1",
"[",
":",
",",
"0",
"]",
"=",
"ids",
"handle1",
"[",
":",
",",
"1",
"]",
"=",
"self",
".",
"_vertexinds",
"[",
"inds",
",",
"0",
"]",
"# print ' ',d,handle1",
"dx",
"=",
"vertices",
"[",
":",
",",
"1",
",",
"0",
"]",
"-",
"coord",
"[",
"0",
"]",
"dy",
"=",
"vertices",
"[",
":",
",",
"1",
",",
"1",
"]",
"-",
"coord",
"[",
"1",
"]",
"inds",
"=",
"np",
".",
"flatnonzero",
"(",
"dx",
"*",
"dx",
"+",
"dy",
"*",
"dy",
"<",
"dw",
")",
"ids",
"=",
"self",
".",
"_ids",
"[",
"self",
".",
"_polyinds",
"[",
"inds",
"]",
"]",
"handle2",
"=",
"np",
".",
"ones",
"(",
"(",
"len",
"(",
"ids",
")",
",",
"2",
")",
",",
"np",
".",
"int",
")",
"handle2",
"[",
":",
",",
"0",
"]",
"=",
"ids",
"handle2",
"[",
":",
",",
"1",
"]",
"=",
"self",
".",
"_vertexinds",
"[",
"inds",
",",
"1",
"]",
"# print ' ',d,handle2",
"handles",
"=",
"np",
".",
"concatenate",
"(",
"(",
"handle1",
",",
"handle2",
")",
",",
"0",
")",
"# print ' found',len(np.flatnonzero(handles))",
"return",
"handles"
] | https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/agilepy/lib_wx/ogleditor.py#L3957-L3997 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/cmd.py | python | Cmd.parseline | (self, line) | return cmd, arg, line | Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed. | Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed. | [
"Parse",
"the",
"line",
"into",
"a",
"command",
"name",
"and",
"a",
"string",
"containing",
"the",
"arguments",
".",
"Returns",
"a",
"tuple",
"containing",
"(",
"command",
"args",
"line",
")",
".",
"command",
"and",
"args",
"may",
"be",
"None",
"if",
"the",
"line",
"couldn",
"t",
"be",
"parsed",
"."
] | def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line | [
"def",
"parseline",
"(",
"self",
",",
"line",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
":",
"return",
"None",
",",
"None",
",",
"line",
"elif",
"line",
"[",
"0",
"]",
"==",
"'?'",
":",
"line",
"=",
"'help '",
"+",
"line",
"[",
"1",
":",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'!'",
":",
"if",
"hasattr",
"(",
"self",
",",
"'do_shell'",
")",
":",
"line",
"=",
"'shell '",
"+",
"line",
"[",
"1",
":",
"]",
"else",
":",
"return",
"None",
",",
"None",
",",
"line",
"i",
",",
"n",
"=",
"0",
",",
"len",
"(",
"line",
")",
"while",
"i",
"<",
"n",
"and",
"line",
"[",
"i",
"]",
"in",
"self",
".",
"identchars",
":",
"i",
"=",
"i",
"+",
"1",
"cmd",
",",
"arg",
"=",
"line",
"[",
":",
"i",
"]",
",",
"line",
"[",
"i",
":",
"]",
".",
"strip",
"(",
")",
"return",
"cmd",
",",
"arg",
",",
"line"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/cmd.py#L176-L194 | |
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/fluid/optimizer.py | python | PipelineOptimizer._find_post_op | (self, index, var_name) | return result_op | Find the post op that has variable named var_name as input. | Find the post op that has variable named var_name as input. | [
"Find",
"the",
"post",
"op",
"that",
"has",
"variable",
"named",
"var_name",
"as",
"input",
"."
] | def _find_post_op(self, index, var_name):
"""
Find the post op that has variable named var_name as input.
"""
# bugfix for uniform hybrid parallelism
if '.cast_fp32' in var_name:
var_name = var_name.replace('.cast_fp32', '')
if '.cast_fp16' in var_name:
var_name = var_name.replace('.cast_fp16', '')
post_ops = self.input_var_to_op[var_name]
if post_ops == None: return None
result_op = None
for post_op, post_idx in reversed(post_ops):
if post_idx > index:
result_op = post_op
break
return result_op | [
"def",
"_find_post_op",
"(",
"self",
",",
"index",
",",
"var_name",
")",
":",
"# bugfix for uniform hybrid parallelism",
"if",
"'.cast_fp32'",
"in",
"var_name",
":",
"var_name",
"=",
"var_name",
".",
"replace",
"(",
"'.cast_fp32'",
",",
"''",
")",
"if",
"'.cast_fp16'",
"in",
"var_name",
":",
"var_name",
"=",
"var_name",
".",
"replace",
"(",
"'.cast_fp16'",
",",
"''",
")",
"post_ops",
"=",
"self",
".",
"input_var_to_op",
"[",
"var_name",
"]",
"if",
"post_ops",
"==",
"None",
":",
"return",
"None",
"result_op",
"=",
"None",
"for",
"post_op",
",",
"post_idx",
"in",
"reversed",
"(",
"post_ops",
")",
":",
"if",
"post_idx",
">",
"index",
":",
"result_op",
"=",
"post_op",
"break",
"return",
"result_op"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/optimizer.py#L4663-L4680 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/mailbox.py | python | Mailbox.get_message | (self, key) | Return a Message representation or raise a KeyError. | Return a Message representation or raise a KeyError. | [
"Return",
"a",
"Message",
"representation",
"or",
"raise",
"a",
"KeyError",
"."
] | def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass') | [
"def",
"get_message",
"(",
"self",
",",
"key",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Method must be implemented by subclass'",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/mailbox.py#L86-L88 | ||
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/mhlib.py | python | Folder.getcurrent | (self) | Return the current message. Raise Error when there is none. | Return the current message. Raise Error when there is none. | [
"Return",
"the",
"current",
"message",
".",
"Raise",
"Error",
"when",
"there",
"is",
"none",
"."
] | def getcurrent(self):
"""Return the current message. Raise Error when there is none."""
seqs = self.getsequences()
try:
return max(seqs['cur'])
except (ValueError, KeyError):
raise Error, "no cur message" | [
"def",
"getcurrent",
"(",
"self",
")",
":",
"seqs",
"=",
"self",
".",
"getsequences",
"(",
")",
"try",
":",
"return",
"max",
"(",
"seqs",
"[",
"'cur'",
"]",
")",
"except",
"(",
"ValueError",
",",
"KeyError",
")",
":",
"raise",
"Error",
",",
"\"no cur message\""
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/mhlib.py#L334-L340 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/dataview.py | python | DataViewIconTextRenderer.__init__ | (self, *args, **kwargs) | __init__(self, String varianttype="wxDataViewIconText", int mode=DATAVIEW_CELL_INERT,
int align=DVR_DEFAULT_ALIGNMENT) -> DataViewIconTextRenderer
The `DataViewIconTextRenderer` class is used to display text with a
small icon next to it as it is typically done in a file manager. This
class uses the `DataViewIconText` helper class to store its
data. | __init__(self, String varianttype="wxDataViewIconText", int mode=DATAVIEW_CELL_INERT,
int align=DVR_DEFAULT_ALIGNMENT) -> DataViewIconTextRenderer | [
"__init__",
"(",
"self",
"String",
"varianttype",
"=",
"wxDataViewIconText",
"int",
"mode",
"=",
"DATAVIEW_CELL_INERT",
"int",
"align",
"=",
"DVR_DEFAULT_ALIGNMENT",
")",
"-",
">",
"DataViewIconTextRenderer"
] | def __init__(self, *args, **kwargs):
"""
__init__(self, String varianttype="wxDataViewIconText", int mode=DATAVIEW_CELL_INERT,
int align=DVR_DEFAULT_ALIGNMENT) -> DataViewIconTextRenderer
The `DataViewIconTextRenderer` class is used to display text with a
small icon next to it as it is typically done in a file manager. This
class uses the `DataViewIconText` helper class to store its
data.
"""
_dataview.DataViewIconTextRenderer_swiginit(self,_dataview.new_DataViewIconTextRenderer(*args, **kwargs)) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_dataview",
".",
"DataViewIconTextRenderer_swiginit",
"(",
"self",
",",
"_dataview",
".",
"new_DataViewIconTextRenderer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/dataview.py#L1273-L1283 | ||
rdkit/rdkit | ede860ae316d12d8568daf5ee800921c3389c84e | External/pymol/modules/pymol/rpc.py | python | rpcSet | (prop, val, obj) | executes a PyMol set command
return value is either the result of the command or the empty string | executes a PyMol set command
return value is either the result of the command or the empty string | [
"executes",
"a",
"PyMol",
"set",
"command",
"return",
"value",
"is",
"either",
"the",
"result",
"of",
"the",
"command",
"or",
"the",
"empty",
"string"
] | def rpcSet(prop, val, obj):
""" executes a PyMol set command
return value is either the result of the command or the empty string
"""
res = cmd.set(prop, val, obj)
if res is not None:
return res
else:
return '' | [
"def",
"rpcSet",
"(",
"prop",
",",
"val",
",",
"obj",
")",
":",
"res",
"=",
"cmd",
".",
"set",
"(",
"prop",
",",
"val",
",",
"obj",
")",
"if",
"res",
"is",
"not",
"None",
":",
"return",
"res",
"else",
":",
"return",
"''"
] | https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/External/pymol/modules/pymol/rpc.py#L52-L62 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/imaplib.py | python | IMAP4.logout | (self) | return typ, dat | Shutdown connection to server.
(typ, [data]) = <instance>.logout()
Returns server 'BYE' response. | Shutdown connection to server. | [
"Shutdown",
"connection",
"to",
"server",
"."
] | def logout(self):
"""Shutdown connection to server.
(typ, [data]) = <instance>.logout()
Returns server 'BYE' response.
"""
self.state = 'LOGOUT'
try: typ, dat = self._simple_command('LOGOUT')
except: typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
self.shutdown()
if 'BYE' in self.untagged_responses:
return 'BYE', self.untagged_responses['BYE']
return typ, dat | [
"def",
"logout",
"(",
"self",
")",
":",
"self",
".",
"state",
"=",
"'LOGOUT'",
"try",
":",
"typ",
",",
"dat",
"=",
"self",
".",
"_simple_command",
"(",
"'LOGOUT'",
")",
"except",
":",
"typ",
",",
"dat",
"=",
"'NO'",
",",
"[",
"'%s: %s'",
"%",
"sys",
".",
"exc_info",
"(",
")",
"[",
":",
"2",
"]",
"]",
"self",
".",
"shutdown",
"(",
")",
"if",
"'BYE'",
"in",
"self",
".",
"untagged_responses",
":",
"return",
"'BYE'",
",",
"self",
".",
"untagged_responses",
"[",
"'BYE'",
"]",
"return",
"typ",
",",
"dat"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/imaplib.py#L620-L633 | |
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pkg_resources.py | python | Distribution.as_requirement | (self) | return Requirement.parse('%s==%s' % (self.project_name, self.version)) | Return a ``Requirement`` that matches this distribution exactly | Return a ``Requirement`` that matches this distribution exactly | [
"Return",
"a",
"Requirement",
"that",
"matches",
"this",
"distribution",
"exactly"
] | def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version)) | [
"def",
"as_requirement",
"(",
"self",
")",
":",
"return",
"Requirement",
".",
"parse",
"(",
"'%s==%s'",
"%",
"(",
"self",
".",
"project_name",
",",
"self",
".",
"version",
")",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pkg_resources.py#L2385-L2387 | |
microsoft/CNTK | e9396480025b9ca457d26b6f33dd07c474c6aa04 | Examples/Image/Detection/FastRCNN/BrainScript/selectivesearch/selectivesearch.py | python | _sim_fill | (r1, r2, imsize) | return 1.0 - (bbsize - r1["size"] - r2["size"]) / imsize | calculate the fill similarity over the image | calculate the fill similarity over the image | [
"calculate",
"the",
"fill",
"similarity",
"over",
"the",
"image"
] | def _sim_fill(r1, r2, imsize):
"""
calculate the fill similarity over the image
"""
bbsize = (
(max(r1["max_x"], r2["max_x"]) - min(r1["min_x"], r2["min_x"]))
* (max(r1["max_y"], r2["max_y"]) - min(r1["min_y"], r2["min_y"]))
)
return 1.0 - (bbsize - r1["size"] - r2["size"]) / imsize | [
"def",
"_sim_fill",
"(",
"r1",
",",
"r2",
",",
"imsize",
")",
":",
"bbsize",
"=",
"(",
"(",
"max",
"(",
"r1",
"[",
"\"max_x\"",
"]",
",",
"r2",
"[",
"\"max_x\"",
"]",
")",
"-",
"min",
"(",
"r1",
"[",
"\"min_x\"",
"]",
",",
"r2",
"[",
"\"min_x\"",
"]",
")",
")",
"*",
"(",
"max",
"(",
"r1",
"[",
"\"max_y\"",
"]",
",",
"r2",
"[",
"\"max_y\"",
"]",
")",
"-",
"min",
"(",
"r1",
"[",
"\"min_y\"",
"]",
",",
"r2",
"[",
"\"min_y\"",
"]",
")",
")",
")",
"return",
"1.0",
"-",
"(",
"bbsize",
"-",
"r1",
"[",
"\"size\"",
"]",
"-",
"r2",
"[",
"\"size\"",
"]",
")",
"/",
"imsize"
] | https://github.com/microsoft/CNTK/blob/e9396480025b9ca457d26b6f33dd07c474c6aa04/Examples/Image/Detection/FastRCNN/BrainScript/selectivesearch/selectivesearch.py#L58-L66 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/docutils/statemachine.py | python | StateMachine.add_states | (self, state_classes) | Add `state_classes` (a list of `State` subclasses). | Add `state_classes` (a list of `State` subclasses). | [
"Add",
"state_classes",
"(",
"a",
"list",
"of",
"State",
"subclasses",
")",
"."
] | def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class) | [
"def",
"add_states",
"(",
"self",
",",
"state_classes",
")",
":",
"for",
"state_class",
"in",
"state_classes",
":",
"self",
".",
"add_state",
"(",
"state_class",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/docutils/statemachine.py#L480-L485 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_misc.py | python | PlatformInformation.GetEndianness | (*args, **kwargs) | return _misc_.PlatformInformation_GetEndianness(*args, **kwargs) | GetEndianness(self) -> int | GetEndianness(self) -> int | [
"GetEndianness",
"(",
"self",
")",
"-",
">",
"int"
] | def GetEndianness(*args, **kwargs):
"""GetEndianness(self) -> int"""
return _misc_.PlatformInformation_GetEndianness(*args, **kwargs) | [
"def",
"GetEndianness",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"PlatformInformation_GetEndianness",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L1105-L1107 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/aui.py | python | AuiPaneInfo.IsDestroyOnClose | (*args, **kwargs) | return _aui.AuiPaneInfo_IsDestroyOnClose(*args, **kwargs) | IsDestroyOnClose(self) -> bool | IsDestroyOnClose(self) -> bool | [
"IsDestroyOnClose",
"(",
"self",
")",
"-",
">",
"bool"
] | def IsDestroyOnClose(*args, **kwargs):
"""IsDestroyOnClose(self) -> bool"""
return _aui.AuiPaneInfo_IsDestroyOnClose(*args, **kwargs) | [
"def",
"IsDestroyOnClose",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiPaneInfo_IsDestroyOnClose",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/aui.py#L293-L295 | |
potassco/clingo | e0c91d8f95cc28de1c480a871f9c97c30de83d40 | libpyclingo/clingo/ast.py | python | Id | (location: Location, name: str) | return AST(p_ast[0]) | Construct an AST node of type `ASTType.Id`. | Construct an AST node of type `ASTType.Id`. | [
"Construct",
"an",
"AST",
"node",
"of",
"type",
"ASTType",
".",
"Id",
"."
] | def Id(location: Location, name: str) -> AST:
'''
Construct an AST node of type `ASTType.Id`.
'''
p_ast = _ffi.new('clingo_ast_t**')
c_location = _c_location(location)
_handle_error(_lib.clingo_ast_build(
_lib.clingo_ast_type_id, p_ast,
c_location[0],
_ffi.new('char const[]', name.encode())))
return AST(p_ast[0]) | [
"def",
"Id",
"(",
"location",
":",
"Location",
",",
"name",
":",
"str",
")",
"->",
"AST",
":",
"p_ast",
"=",
"_ffi",
".",
"new",
"(",
"'clingo_ast_t**'",
")",
"c_location",
"=",
"_c_location",
"(",
"location",
")",
"_handle_error",
"(",
"_lib",
".",
"clingo_ast_build",
"(",
"_lib",
".",
"clingo_ast_type_id",
",",
"p_ast",
",",
"c_location",
"[",
"0",
"]",
",",
"_ffi",
".",
"new",
"(",
"'char const[]'",
",",
"name",
".",
"encode",
"(",
")",
")",
")",
")",
"return",
"AST",
"(",
"p_ast",
"[",
"0",
"]",
")"
] | https://github.com/potassco/clingo/blob/e0c91d8f95cc28de1c480a871f9c97c30de83d40/libpyclingo/clingo/ast.py#L1196-L1206 | |
hpi-xnor/BMXNet | ed0b201da6667887222b8e4b5f997c4f6b61943d | python/mxnet/module/sequential_module.py | python | SequentialModule.init_params | (self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False) | Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Default ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Default ``None``. Existing auxiliary states. This has higher priority
than `initializer`.
allow_missing : bool
Allow missing values in `arg_params` and `aux_params` (if not ``None``).
In this case, missing values will be filled with `initializer`.
force_init : bool
Default ``False``.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor. | Initializes parameters. | [
"Initializes",
"parameters",
"."
] | def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Default ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Default ``None``. Existing auxiliary states. This has higher priority
than `initializer`.
allow_missing : bool
Allow missing values in `arg_params` and `aux_params` (if not ``None``).
In this case, missing values will be filled with `initializer`.
force_init : bool
Default ``False``.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
if self.params_initialized and not force_init:
return
assert self.binded, 'call bind before initializing the parameters'
for module in self._modules:
module.init_params(initializer=initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
# make sure we do not have duplicated parameter names
def _check_name(known_names, new_names, modules, i):
"""Internal function to help checking duplicated names."""
for name in new_names:
assert not name in known_names, "Duplicated parameter names: " + \
('name "%s" in layer %d (%s) is already ' % (name, i, type(modules[i]))) + \
('used in layer %d (%s).' % (known_names[name],
type(modules[known_names[name]])))
known_names[name] = i
arg_names = dict()
aux_names = dict()
for i_layer, module in enumerate(self._modules):
arg_params, aux_params = module.get_params()
_check_name(arg_names, arg_params.keys(), self._modules, i_layer)
_check_name(aux_names, aux_params.keys(), self._modules, i_layer)
self.params_initialized = True | [
"def",
"init_params",
"(",
"self",
",",
"initializer",
"=",
"Uniform",
"(",
"0.01",
")",
",",
"arg_params",
"=",
"None",
",",
"aux_params",
"=",
"None",
",",
"allow_missing",
"=",
"False",
",",
"force_init",
"=",
"False",
",",
"allow_extra",
"=",
"False",
")",
":",
"if",
"self",
".",
"params_initialized",
"and",
"not",
"force_init",
":",
"return",
"assert",
"self",
".",
"binded",
",",
"'call bind before initializing the parameters'",
"for",
"module",
"in",
"self",
".",
"_modules",
":",
"module",
".",
"init_params",
"(",
"initializer",
"=",
"initializer",
",",
"arg_params",
"=",
"arg_params",
",",
"aux_params",
"=",
"aux_params",
",",
"allow_missing",
"=",
"allow_missing",
",",
"force_init",
"=",
"force_init",
",",
"allow_extra",
"=",
"allow_extra",
")",
"# make sure we do not have duplicated parameter names",
"def",
"_check_name",
"(",
"known_names",
",",
"new_names",
",",
"modules",
",",
"i",
")",
":",
"\"\"\"Internal function to help checking duplicated names.\"\"\"",
"for",
"name",
"in",
"new_names",
":",
"assert",
"not",
"name",
"in",
"known_names",
",",
"\"Duplicated parameter names: \"",
"+",
"(",
"'name \"%s\" in layer %d (%s) is already '",
"%",
"(",
"name",
",",
"i",
",",
"type",
"(",
"modules",
"[",
"i",
"]",
")",
")",
")",
"+",
"(",
"'used in layer %d (%s).'",
"%",
"(",
"known_names",
"[",
"name",
"]",
",",
"type",
"(",
"modules",
"[",
"known_names",
"[",
"name",
"]",
"]",
")",
")",
")",
"known_names",
"[",
"name",
"]",
"=",
"i",
"arg_names",
"=",
"dict",
"(",
")",
"aux_names",
"=",
"dict",
"(",
")",
"for",
"i_layer",
",",
"module",
"in",
"enumerate",
"(",
"self",
".",
"_modules",
")",
":",
"arg_params",
",",
"aux_params",
"=",
"module",
".",
"get_params",
"(",
")",
"_check_name",
"(",
"arg_names",
",",
"arg_params",
".",
"keys",
"(",
")",
",",
"self",
".",
"_modules",
",",
"i_layer",
")",
"_check_name",
"(",
"aux_names",
",",
"aux_params",
".",
"keys",
"(",
")",
",",
"self",
".",
"_modules",
",",
"i_layer",
")",
"self",
".",
"params_initialized",
"=",
"True"
] | https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/python/mxnet/module/sequential_module.py#L173-L222 | ||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/plat-mac/pimp.py | python | PimpPreferences.check | (self) | return rv | Check that the preferences make sense: directories exist and are
writable, the install directory is on sys.path, etc. | Check that the preferences make sense: directories exist and are
writable, the install directory is on sys.path, etc. | [
"Check",
"that",
"the",
"preferences",
"make",
"sense",
":",
"directories",
"exist",
"and",
"are",
"writable",
"the",
"install",
"directory",
"is",
"on",
"sys",
".",
"path",
"etc",
"."
] | def check(self):
"""Check that the preferences make sense: directories exist and are
writable, the install directory is on sys.path, etc."""
rv = ""
RWX_OK = os.R_OK|os.W_OK|os.X_OK
if not os.path.exists(self.downloadDir):
rv += "Warning: Download directory \"%s\" does not exist\n" % self.downloadDir
elif not os.access(self.downloadDir, RWX_OK):
rv += "Warning: Download directory \"%s\" is not writable or not readable\n" % self.downloadDir
if not os.path.exists(self.buildDir):
rv += "Warning: Build directory \"%s\" does not exist\n" % self.buildDir
elif not os.access(self.buildDir, RWX_OK):
rv += "Warning: Build directory \"%s\" is not writable or not readable\n" % self.buildDir
if not os.path.exists(self.installDir):
rv += "Warning: Install directory \"%s\" does not exist\n" % self.installDir
elif not os.access(self.installDir, RWX_OK):
rv += "Warning: Install directory \"%s\" is not writable or not readable\n" % self.installDir
else:
installDir = os.path.realpath(self.installDir)
for p in sys.path:
try:
realpath = os.path.realpath(p)
except:
pass
if installDir == realpath:
break
else:
rv += "Warning: Install directory \"%s\" is not on sys.path\n" % self.installDir
return rv | [
"def",
"check",
"(",
"self",
")",
":",
"rv",
"=",
"\"\"",
"RWX_OK",
"=",
"os",
".",
"R_OK",
"|",
"os",
".",
"W_OK",
"|",
"os",
".",
"X_OK",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"downloadDir",
")",
":",
"rv",
"+=",
"\"Warning: Download directory \\\"%s\\\" does not exist\\n\"",
"%",
"self",
".",
"downloadDir",
"elif",
"not",
"os",
".",
"access",
"(",
"self",
".",
"downloadDir",
",",
"RWX_OK",
")",
":",
"rv",
"+=",
"\"Warning: Download directory \\\"%s\\\" is not writable or not readable\\n\"",
"%",
"self",
".",
"downloadDir",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"buildDir",
")",
":",
"rv",
"+=",
"\"Warning: Build directory \\\"%s\\\" does not exist\\n\"",
"%",
"self",
".",
"buildDir",
"elif",
"not",
"os",
".",
"access",
"(",
"self",
".",
"buildDir",
",",
"RWX_OK",
")",
":",
"rv",
"+=",
"\"Warning: Build directory \\\"%s\\\" is not writable or not readable\\n\"",
"%",
"self",
".",
"buildDir",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"installDir",
")",
":",
"rv",
"+=",
"\"Warning: Install directory \\\"%s\\\" does not exist\\n\"",
"%",
"self",
".",
"installDir",
"elif",
"not",
"os",
".",
"access",
"(",
"self",
".",
"installDir",
",",
"RWX_OK",
")",
":",
"rv",
"+=",
"\"Warning: Install directory \\\"%s\\\" is not writable or not readable\\n\"",
"%",
"self",
".",
"installDir",
"else",
":",
"installDir",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"self",
".",
"installDir",
")",
"for",
"p",
"in",
"sys",
".",
"path",
":",
"try",
":",
"realpath",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"p",
")",
"except",
":",
"pass",
"if",
"installDir",
"==",
"realpath",
":",
"break",
"else",
":",
"rv",
"+=",
"\"Warning: Install directory \\\"%s\\\" is not on sys.path\\n\"",
"%",
"self",
".",
"installDir",
"return",
"rv"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/plat-mac/pimp.py#L316-L345 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/XRCed/presenter.py | python | _Presenter.checkCompatibility | (self, comp) | return True | Check parent/child compatibility. | Check parent/child compatibility. | [
"Check",
"parent",
"/",
"child",
"compatibility",
"."
] | def checkCompatibility(self, comp):
'''Check parent/child compatibility.'''
if self.createSibling: container = self.container
else: container = self.comp
if not container.canHaveChild(comp):
wx.LogError('Incompatible parent/child: parent is %s, child is %s!' %
(container.klass, comp.klass))
return False
return True | [
"def",
"checkCompatibility",
"(",
"self",
",",
"comp",
")",
":",
"if",
"self",
".",
"createSibling",
":",
"container",
"=",
"self",
".",
"container",
"else",
":",
"container",
"=",
"self",
".",
"comp",
"if",
"not",
"container",
".",
"canHaveChild",
"(",
"comp",
")",
":",
"wx",
".",
"LogError",
"(",
"'Incompatible parent/child: parent is %s, child is %s!'",
"%",
"(",
"container",
".",
"klass",
",",
"comp",
".",
"klass",
")",
")",
"return",
"False",
"return",
"True"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/XRCed/presenter.py#L443-L451 | |
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py | python | parserCtxt.parseAttributeListDecl | (self) | : parse the Attribute list def for an element [52]
AttlistDecl ::= '<!ATTLIST' S Name AttDef* S? '>' [53]
AttDef ::= S Name S AttType S DefaultDecl | : parse the Attribute list def for an element [52]
AttlistDecl ::= '<!ATTLIST' S Name AttDef* S? '>' [53]
AttDef ::= S Name S AttType S DefaultDecl | [
":",
"parse",
"the",
"Attribute",
"list",
"def",
"for",
"an",
"element",
"[",
"52",
"]",
"AttlistDecl",
"::",
"=",
"<!ATTLIST",
"S",
"Name",
"AttDef",
"*",
"S?",
">",
"[",
"53",
"]",
"AttDef",
"::",
"=",
"S",
"Name",
"S",
"AttType",
"S",
"DefaultDecl"
] | def parseAttributeListDecl(self):
""": parse the Attribute list def for an element [52]
AttlistDecl ::= '<!ATTLIST' S Name AttDef* S? '>' [53]
AttDef ::= S Name S AttType S DefaultDecl """
libxml2mod.xmlParseAttributeListDecl(self._o) | [
"def",
"parseAttributeListDecl",
"(",
"self",
")",
":",
"libxml2mod",
".",
"xmlParseAttributeListDecl",
"(",
"self",
".",
"_o",
")"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L5199-L5203 | ||
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | media/webrtc/trunk/build/android/emulator.py | python | Emulator._DeviceName | (self) | return ('emulator-%d' % port, port) | Return our device name. | Return our device name. | [
"Return",
"our",
"device",
"name",
"."
] | def _DeviceName(self):
"""Return our device name."""
port = _GetAvailablePort()
return ('emulator-%d' % port, port) | [
"def",
"_DeviceName",
"(",
"self",
")",
":",
"port",
"=",
"_GetAvailablePort",
"(",
")",
"return",
"(",
"'emulator-%d'",
"%",
"port",
",",
"port",
")"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/media/webrtc/trunk/build/android/emulator.py#L162-L165 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/saved_model/loader_impl.py | python | _get_main_op_tensor | (
meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY) | return init_op | Gets the main op tensor, if one exists.
Args:
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
init_op_key: name of collection to check; should be one of MAIN_OP_KEY
or the deprecated LEGACY_INIT_OP_KEY
Returns:
The main op tensor, if it exists and `None` otherwise.
Raises:
RuntimeError: If the collection def corresponding to the main op key has
other than exactly one tensor. | Gets the main op tensor, if one exists. | [
"Gets",
"the",
"main",
"op",
"tensor",
"if",
"one",
"exists",
"."
] | def _get_main_op_tensor(
meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY):
"""Gets the main op tensor, if one exists.
Args:
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
init_op_key: name of collection to check; should be one of MAIN_OP_KEY
or the deprecated LEGACY_INIT_OP_KEY
Returns:
The main op tensor, if it exists and `None` otherwise.
Raises:
RuntimeError: If the collection def corresponding to the main op key has
other than exactly one tensor.
"""
# TODO(kathywu): Rename this method to _get_op_from_collection when
# dependency from SavedModelEstimator is removed.
collection_def = meta_graph_def_to_load.collection_def
init_op = None
if init_op_key in collection_def:
init_op_list = collection_def[init_op_key].node_list.value
if len(init_op_list) != 1:
raise RuntimeError("Expected exactly one SavedModel init op. "
"Found: {}".format(init_op_list))
init_op = ops.get_collection(init_op_key)[0]
return init_op | [
"def",
"_get_main_op_tensor",
"(",
"meta_graph_def_to_load",
",",
"init_op_key",
"=",
"constants",
".",
"MAIN_OP_KEY",
")",
":",
"# TODO(kathywu): Rename this method to _get_op_from_collection when",
"# dependency from SavedModelEstimator is removed.",
"collection_def",
"=",
"meta_graph_def_to_load",
".",
"collection_def",
"init_op",
"=",
"None",
"if",
"init_op_key",
"in",
"collection_def",
":",
"init_op_list",
"=",
"collection_def",
"[",
"init_op_key",
"]",
".",
"node_list",
".",
"value",
"if",
"len",
"(",
"init_op_list",
")",
"!=",
"1",
":",
"raise",
"RuntimeError",
"(",
"\"Expected exactly one SavedModel init op. \"",
"\"Found: {}\"",
".",
"format",
"(",
"init_op_list",
")",
")",
"init_op",
"=",
"ops",
".",
"get_collection",
"(",
"init_op_key",
")",
"[",
"0",
"]",
"return",
"init_op"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/saved_model/loader_impl.py#L134-L160 | |
Z3Prover/z3 | d745d03afdfdf638d66093e2bfbacaf87187f35b | src/api/python/z3/z3.py | python | FailIf | (p, ctx=None) | return Tactic(Z3_tactic_fail_if(p.ctx.ref(), p.probe), p.ctx) | Return a tactic that fails if the probe `p` evaluates to true.
Otherwise, it returns the input goal unmodified.
In the following example, the tactic applies 'simplify' if and only if there are
more than 2 constraints in the goal.
>>> t = OrElse(FailIf(Probe('size') > 2), Tactic('simplify'))
>>> x, y = Ints('x y')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(y > 0)
>>> t(g)
[[x > 0, y > 0]]
>>> g.add(x == y + 1)
>>> t(g)
[[Not(x <= 0), Not(y <= 0), x == 1 + y]] | Return a tactic that fails if the probe `p` evaluates to true.
Otherwise, it returns the input goal unmodified. | [
"Return",
"a",
"tactic",
"that",
"fails",
"if",
"the",
"probe",
"p",
"evaluates",
"to",
"true",
".",
"Otherwise",
"it",
"returns",
"the",
"input",
"goal",
"unmodified",
"."
] | def FailIf(p, ctx=None):
"""Return a tactic that fails if the probe `p` evaluates to true.
Otherwise, it returns the input goal unmodified.
In the following example, the tactic applies 'simplify' if and only if there are
more than 2 constraints in the goal.
>>> t = OrElse(FailIf(Probe('size') > 2), Tactic('simplify'))
>>> x, y = Ints('x y')
>>> g = Goal()
>>> g.add(x > 0)
>>> g.add(y > 0)
>>> t(g)
[[x > 0, y > 0]]
>>> g.add(x == y + 1)
>>> t(g)
[[Not(x <= 0), Not(y <= 0), x == 1 + y]]
"""
p = _to_probe(p, ctx)
return Tactic(Z3_tactic_fail_if(p.ctx.ref(), p.probe), p.ctx) | [
"def",
"FailIf",
"(",
"p",
",",
"ctx",
"=",
"None",
")",
":",
"p",
"=",
"_to_probe",
"(",
"p",
",",
"ctx",
")",
"return",
"Tactic",
"(",
"Z3_tactic_fail_if",
"(",
"p",
".",
"ctx",
".",
"ref",
"(",
")",
",",
"p",
".",
"probe",
")",
",",
"p",
".",
"ctx",
")"
] | https://github.com/Z3Prover/z3/blob/d745d03afdfdf638d66093e2bfbacaf87187f35b/src/api/python/z3/z3.py#L8626-L8645 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/scipy/signal/ltisys.py | python | TransferFunction.den | (self) | return self._den | Denominator of the `TransferFunction` system. | Denominator of the `TransferFunction` system. | [
"Denominator",
"of",
"the",
"TransferFunction",
"system",
"."
] | def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den | [
"def",
"den",
"(",
"self",
")",
":",
"return",
"self",
".",
"_den"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/signal/ltisys.py#L789-L791 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/py/py/_path/svnwc.py | python | SvnPathBase.propget | (self, name) | return value | return the content of the given property. | return the content of the given property. | [
"return",
"the",
"content",
"of",
"the",
"given",
"property",
"."
] | def propget(self, name):
""" return the content of the given property. """
value = self._propget(name)
return value | [
"def",
"propget",
"(",
"self",
",",
"name",
")",
":",
"value",
"=",
"self",
".",
"_propget",
"(",
"name",
")",
"return",
"value"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/py/py/_path/svnwc.py#L228-L231 | |
yrnkrn/zapcc | c6a8aa30006d997eff0d60fd37b0e62b8aa0ea50 | bindings/python/llvm/object.py | python | Relocation.type_name | (self) | return lib.LLVMGetRelocationTypeName(self) | The relocation type's name, as a str. | The relocation type's name, as a str. | [
"The",
"relocation",
"type",
"s",
"name",
"as",
"a",
"str",
"."
] | def type_name(self):
"""The relocation type's name, as a str."""
if self.expired:
raise Exception('Relocation instance has expired.')
return lib.LLVMGetRelocationTypeName(self) | [
"def",
"type_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"expired",
":",
"raise",
"Exception",
"(",
"'Relocation instance has expired.'",
")",
"return",
"lib",
".",
"LLVMGetRelocationTypeName",
"(",
"self",
")"
] | https://github.com/yrnkrn/zapcc/blob/c6a8aa30006d997eff0d60fd37b0e62b8aa0ea50/bindings/python/llvm/object.py#L400-L405 | |
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | DQM/Integration/scripts/harvesting_tools/cmsHarvester.py | python | CMSHarvester.process_dataset_ignore_list | (self) | Update the list of datasets taking into account the ones to
ignore.
Both lists have been generated before from DBS and both are
assumed to be unique.
NOTE: The advantage of creating the ignore list from DBS (in
case a regexp is given) and matching that instead of directly
matching the ignore criterion against the list of datasets (to
consider) built from DBS is that in the former case we're sure
that all regexps are treated exactly as DBS would have done
without the cmsHarvester.
NOTE: This only removes complete samples. Exclusion of single
runs is done by the book keeping. So the assumption is that a
user never wants to harvest just part (i.e. n out of N runs)
of a sample. | Update the list of datasets taking into account the ones to
ignore. | [
"Update",
"the",
"list",
"of",
"datasets",
"taking",
"into",
"account",
"the",
"ones",
"to",
"ignore",
"."
] | def process_dataset_ignore_list(self):
"""Update the list of datasets taking into account the ones to
ignore.
Both lists have been generated before from DBS and both are
assumed to be unique.
NOTE: The advantage of creating the ignore list from DBS (in
case a regexp is given) and matching that instead of directly
matching the ignore criterion against the list of datasets (to
consider) built from DBS is that in the former case we're sure
that all regexps are treated exactly as DBS would have done
without the cmsHarvester.
NOTE: This only removes complete samples. Exclusion of single
runs is done by the book keeping. So the assumption is that a
user never wants to harvest just part (i.e. n out of N runs)
of a sample.
"""
self.logger.info("Processing list of datasets to ignore...")
self.logger.debug("Before processing ignore list there are %d " \
"datasets in the list to be processed" % \
len(self.datasets_to_use))
# Simple approach: just loop and search.
dataset_names_filtered = copy.deepcopy(self.datasets_to_use)
for dataset_name in self.datasets_to_use.keys():
if dataset_name in self.datasets_to_ignore.keys():
del dataset_names_filtered[dataset_name]
self.logger.info(" --> Removed %d dataset(s)" % \
(len(self.datasets_to_use) -
len(dataset_names_filtered)))
self.datasets_to_use = dataset_names_filtered
self.logger.debug("After processing ignore list there are %d " \
"datasets in the list to be processed" % \
len(self.datasets_to_use)) | [
"def",
"process_dataset_ignore_list",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Processing list of datasets to ignore...\"",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Before processing ignore list there are %d \"",
"\"datasets in the list to be processed\"",
"%",
"len",
"(",
"self",
".",
"datasets_to_use",
")",
")",
"# Simple approach: just loop and search.",
"dataset_names_filtered",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"datasets_to_use",
")",
"for",
"dataset_name",
"in",
"self",
".",
"datasets_to_use",
".",
"keys",
"(",
")",
":",
"if",
"dataset_name",
"in",
"self",
".",
"datasets_to_ignore",
".",
"keys",
"(",
")",
":",
"del",
"dataset_names_filtered",
"[",
"dataset_name",
"]",
"self",
".",
"logger",
".",
"info",
"(",
"\" --> Removed %d dataset(s)\"",
"%",
"(",
"len",
"(",
"self",
".",
"datasets_to_use",
")",
"-",
"len",
"(",
"dataset_names_filtered",
")",
")",
")",
"self",
".",
"datasets_to_use",
"=",
"dataset_names_filtered",
"self",
".",
"logger",
".",
"debug",
"(",
"\"After processing ignore list there are %d \"",
"\"datasets in the list to be processed\"",
"%",
"len",
"(",
"self",
".",
"datasets_to_use",
")",
")"
] | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/DQM/Integration/scripts/harvesting_tools/cmsHarvester.py#L3565-L3606 | ||
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/keras/distribute/worker_training_state.py | python | WorkerTrainingState.delete_backup | (self) | Delete the backup directories.
Delete the backup directories which should not exist after `fit()`
successfully finishes. | Delete the backup directories. | [
"Delete",
"the",
"backup",
"directories",
"."
] | def delete_backup(self):
"""Delete the backup directories.
Delete the backup directories which should not exist after `fit()`
successfully finishes.
"""
if self.write_checkpoint_manager is self.read_checkpoint_manager:
try:
file_io.delete_recursively_v2(self.write_checkpoint_manager.directory)
except errors.NotFoundError:
pass | [
"def",
"delete_backup",
"(",
"self",
")",
":",
"if",
"self",
".",
"write_checkpoint_manager",
"is",
"self",
".",
"read_checkpoint_manager",
":",
"try",
":",
"file_io",
".",
"delete_recursively_v2",
"(",
"self",
".",
"write_checkpoint_manager",
".",
"directory",
")",
"except",
"errors",
".",
"NotFoundError",
":",
"pass"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/distribute/worker_training_state.py#L108-L118 | ||
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/python/framework/gen_docs_combined.py | python | all_libraries | (module_to_name, members, documented) | return [
# Splits of module 'tf'.
library("framework", "Building Graphs", framework_lib),
library("check_ops", "Asserts and boolean checks."),
library("constant_op", "Constants, Sequences, and Random Values",
constant_op, prefix=PREFIX_TEXT),
library("state_ops",
"Variables",
exclude_symbols=["create_partitioned_variables"],
prefix=PREFIX_TEXT),
library("array_ops",
"Tensor Transformations",
exclude_symbols=["list_diff"],
prefix=PREFIX_TEXT),
library("math_ops",
"Math",
exclude_symbols=["sparse_matmul", "arg_min", "arg_max",
"lin_space", "sparse_segment_mean_grad"],
prefix=PREFIX_TEXT),
library("string_ops", "Strings",
prefix=PREFIX_TEXT),
library("histogram_ops", "Histograms"),
library("control_flow_ops", "Control Flow", prefix=PREFIX_TEXT),
library("functional_ops", "Higher Order Functions", prefix=PREFIX_TEXT),
library("tensor_array_ops", "TensorArray Operations", prefix=PREFIX_TEXT),
library("session_ops", "Tensor Handle Operations", prefix=PREFIX_TEXT),
library("image", "Images", tf.image, exclude_symbols=["ResizeMethod"],
prefix=PREFIX_TEXT),
library("sparse_ops",
"Sparse Tensors",
exclude_symbols=["serialize_sparse", "serialize_many_sparse",
"deserialize_many_sparse"],
prefix=PREFIX_TEXT),
library("io_ops",
"Inputs and Readers",
exclude_symbols=["LookupTableBase", "HashTable",
"initialize_all_tables",
"parse_single_sequence_example",
"string_to_hash_bucket"],
prefix=PREFIX_TEXT),
library("python_io", "Data IO (Python functions)", tf.python_io),
library("nn",
"Neural Network",
tf.nn,
exclude_symbols=["conv2d_backprop_input",
"conv2d_backprop_filter", "avg_pool_grad",
"max_pool_grad", "max_pool_grad_with_argmax",
"batch_norm_with_global_normalization_grad",
"lrn_grad", "relu6_grad", "softplus_grad",
"softsign_grad", "xw_plus_b", "relu_layer",
"lrn", "batch_norm_with_global_normalization",
"batch_norm_with_global_normalization_grad",
"all_candidate_sampler", "seq2seq"],
prefix=PREFIX_TEXT),
library("rnn_cell", "Neural Network RNN Cells", tf.nn.rnn_cell),
library("client", "Running Graphs", client_lib),
library("train",
"Training",
tf.train,
exclude_symbols=["Feature", "Features", "BytesList", "FloatList",
"Int64List", "Example", "InferenceExample",
"FeatureList", "FeatureLists", "RankingExample",
"SequenceExample"]),
library("script_ops",
"Wraps python functions",
prefix=PREFIX_TEXT),
library("summary", "Summary Operations", tf.summary),
library("test", "Testing", tf.test),
library("contrib.bayesflow.stochastic_graph",
"BayesFlow Stochastic Graph (contrib)",
tf.contrib.bayesflow.stochastic_graph),
library("contrib.distributions", "Statistical distributions (contrib)",
tf.contrib.distributions),
library("contrib.ffmpeg", "FFmpeg (contrib)", ffmpeg),
library("contrib.framework", "Framework (contrib)", tf.contrib.framework),
library("contrib.graph_editor", "Graph Editor (contrib)",
tf.contrib.graph_editor),
library("contrib.layers", "Layers (contrib)", tf.contrib.layers),
library("contrib.learn", "Learn (contrib)", tf.contrib.learn),
library("contrib.learn.monitors", "Monitors (contrib)",
tf.contrib.learn.monitors),
library("contrib.losses", "Losses (contrib)", tf.contrib.losses),
library("contrib.rnn", "RNN (contrib)", tf.contrib.rnn),
library("contrib.metrics", "Metrics (contrib)", tf.contrib.metrics),
library("contrib.util", "Utilities (contrib)", tf.contrib.util),
library("contrib.copy_graph", "Copying Graph Elements (contrib)",
tf.contrib.copy_graph),
] | Make a list of the individual files that we want to create.
Args:
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
documented: Set of documented names to update.
Returns:
List of (filename, docs.Library) pairs. | Make a list of the individual files that we want to create. | [
"Make",
"a",
"list",
"of",
"the",
"individual",
"files",
"that",
"we",
"want",
"to",
"create",
"."
] | def all_libraries(module_to_name, members, documented):
"""Make a list of the individual files that we want to create.
Args:
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
documented: Set of documented names to update.
Returns:
List of (filename, docs.Library) pairs.
"""
def library(name, title, module=None, **args):
if module is None:
module = sys.modules["tensorflow.python.ops" +
("" if name == "ops" else "." + name)]
return (name + ".md", docs.Library(title=title,
module_to_name=module_to_name,
members=members,
documented=documented,
module=module,
**args))
return [
# Splits of module 'tf'.
library("framework", "Building Graphs", framework_lib),
library("check_ops", "Asserts and boolean checks."),
library("constant_op", "Constants, Sequences, and Random Values",
constant_op, prefix=PREFIX_TEXT),
library("state_ops",
"Variables",
exclude_symbols=["create_partitioned_variables"],
prefix=PREFIX_TEXT),
library("array_ops",
"Tensor Transformations",
exclude_symbols=["list_diff"],
prefix=PREFIX_TEXT),
library("math_ops",
"Math",
exclude_symbols=["sparse_matmul", "arg_min", "arg_max",
"lin_space", "sparse_segment_mean_grad"],
prefix=PREFIX_TEXT),
library("string_ops", "Strings",
prefix=PREFIX_TEXT),
library("histogram_ops", "Histograms"),
library("control_flow_ops", "Control Flow", prefix=PREFIX_TEXT),
library("functional_ops", "Higher Order Functions", prefix=PREFIX_TEXT),
library("tensor_array_ops", "TensorArray Operations", prefix=PREFIX_TEXT),
library("session_ops", "Tensor Handle Operations", prefix=PREFIX_TEXT),
library("image", "Images", tf.image, exclude_symbols=["ResizeMethod"],
prefix=PREFIX_TEXT),
library("sparse_ops",
"Sparse Tensors",
exclude_symbols=["serialize_sparse", "serialize_many_sparse",
"deserialize_many_sparse"],
prefix=PREFIX_TEXT),
library("io_ops",
"Inputs and Readers",
exclude_symbols=["LookupTableBase", "HashTable",
"initialize_all_tables",
"parse_single_sequence_example",
"string_to_hash_bucket"],
prefix=PREFIX_TEXT),
library("python_io", "Data IO (Python functions)", tf.python_io),
library("nn",
"Neural Network",
tf.nn,
exclude_symbols=["conv2d_backprop_input",
"conv2d_backprop_filter", "avg_pool_grad",
"max_pool_grad", "max_pool_grad_with_argmax",
"batch_norm_with_global_normalization_grad",
"lrn_grad", "relu6_grad", "softplus_grad",
"softsign_grad", "xw_plus_b", "relu_layer",
"lrn", "batch_norm_with_global_normalization",
"batch_norm_with_global_normalization_grad",
"all_candidate_sampler", "seq2seq"],
prefix=PREFIX_TEXT),
library("rnn_cell", "Neural Network RNN Cells", tf.nn.rnn_cell),
library("client", "Running Graphs", client_lib),
library("train",
"Training",
tf.train,
exclude_symbols=["Feature", "Features", "BytesList", "FloatList",
"Int64List", "Example", "InferenceExample",
"FeatureList", "FeatureLists", "RankingExample",
"SequenceExample"]),
library("script_ops",
"Wraps python functions",
prefix=PREFIX_TEXT),
library("summary", "Summary Operations", tf.summary),
library("test", "Testing", tf.test),
library("contrib.bayesflow.stochastic_graph",
"BayesFlow Stochastic Graph (contrib)",
tf.contrib.bayesflow.stochastic_graph),
library("contrib.distributions", "Statistical distributions (contrib)",
tf.contrib.distributions),
library("contrib.ffmpeg", "FFmpeg (contrib)", ffmpeg),
library("contrib.framework", "Framework (contrib)", tf.contrib.framework),
library("contrib.graph_editor", "Graph Editor (contrib)",
tf.contrib.graph_editor),
library("contrib.layers", "Layers (contrib)", tf.contrib.layers),
library("contrib.learn", "Learn (contrib)", tf.contrib.learn),
library("contrib.learn.monitors", "Monitors (contrib)",
tf.contrib.learn.monitors),
library("contrib.losses", "Losses (contrib)", tf.contrib.losses),
library("contrib.rnn", "RNN (contrib)", tf.contrib.rnn),
library("contrib.metrics", "Metrics (contrib)", tf.contrib.metrics),
library("contrib.util", "Utilities (contrib)", tf.contrib.util),
library("contrib.copy_graph", "Copying Graph Elements (contrib)",
tf.contrib.copy_graph),
] | [
"def",
"all_libraries",
"(",
"module_to_name",
",",
"members",
",",
"documented",
")",
":",
"def",
"library",
"(",
"name",
",",
"title",
",",
"module",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"if",
"module",
"is",
"None",
":",
"module",
"=",
"sys",
".",
"modules",
"[",
"\"tensorflow.python.ops\"",
"+",
"(",
"\"\"",
"if",
"name",
"==",
"\"ops\"",
"else",
"\".\"",
"+",
"name",
")",
"]",
"return",
"(",
"name",
"+",
"\".md\"",
",",
"docs",
".",
"Library",
"(",
"title",
"=",
"title",
",",
"module_to_name",
"=",
"module_to_name",
",",
"members",
"=",
"members",
",",
"documented",
"=",
"documented",
",",
"module",
"=",
"module",
",",
"*",
"*",
"args",
")",
")",
"return",
"[",
"# Splits of module 'tf'.",
"library",
"(",
"\"framework\"",
",",
"\"Building Graphs\"",
",",
"framework_lib",
")",
",",
"library",
"(",
"\"check_ops\"",
",",
"\"Asserts and boolean checks.\"",
")",
",",
"library",
"(",
"\"constant_op\"",
",",
"\"Constants, Sequences, and Random Values\"",
",",
"constant_op",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"state_ops\"",
",",
"\"Variables\"",
",",
"exclude_symbols",
"=",
"[",
"\"create_partitioned_variables\"",
"]",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"array_ops\"",
",",
"\"Tensor Transformations\"",
",",
"exclude_symbols",
"=",
"[",
"\"list_diff\"",
"]",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"math_ops\"",
",",
"\"Math\"",
",",
"exclude_symbols",
"=",
"[",
"\"sparse_matmul\"",
",",
"\"arg_min\"",
",",
"\"arg_max\"",
",",
"\"lin_space\"",
",",
"\"sparse_segment_mean_grad\"",
"]",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"string_ops\"",
",",
"\"Strings\"",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"histogram_ops\"",
",",
"\"Histograms\"",
")",
",",
"library",
"(",
"\"control_flow_ops\"",
",",
"\"Control Flow\"",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"functional_ops\"",
",",
"\"Higher Order Functions\"",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"tensor_array_ops\"",
",",
"\"TensorArray Operations\"",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"session_ops\"",
",",
"\"Tensor Handle Operations\"",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"image\"",
",",
"\"Images\"",
",",
"tf",
".",
"image",
",",
"exclude_symbols",
"=",
"[",
"\"ResizeMethod\"",
"]",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"sparse_ops\"",
",",
"\"Sparse Tensors\"",
",",
"exclude_symbols",
"=",
"[",
"\"serialize_sparse\"",
",",
"\"serialize_many_sparse\"",
",",
"\"deserialize_many_sparse\"",
"]",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"io_ops\"",
",",
"\"Inputs and Readers\"",
",",
"exclude_symbols",
"=",
"[",
"\"LookupTableBase\"",
",",
"\"HashTable\"",
",",
"\"initialize_all_tables\"",
",",
"\"parse_single_sequence_example\"",
",",
"\"string_to_hash_bucket\"",
"]",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"python_io\"",
",",
"\"Data IO (Python functions)\"",
",",
"tf",
".",
"python_io",
")",
",",
"library",
"(",
"\"nn\"",
",",
"\"Neural Network\"",
",",
"tf",
".",
"nn",
",",
"exclude_symbols",
"=",
"[",
"\"conv2d_backprop_input\"",
",",
"\"conv2d_backprop_filter\"",
",",
"\"avg_pool_grad\"",
",",
"\"max_pool_grad\"",
",",
"\"max_pool_grad_with_argmax\"",
",",
"\"batch_norm_with_global_normalization_grad\"",
",",
"\"lrn_grad\"",
",",
"\"relu6_grad\"",
",",
"\"softplus_grad\"",
",",
"\"softsign_grad\"",
",",
"\"xw_plus_b\"",
",",
"\"relu_layer\"",
",",
"\"lrn\"",
",",
"\"batch_norm_with_global_normalization\"",
",",
"\"batch_norm_with_global_normalization_grad\"",
",",
"\"all_candidate_sampler\"",
",",
"\"seq2seq\"",
"]",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"rnn_cell\"",
",",
"\"Neural Network RNN Cells\"",
",",
"tf",
".",
"nn",
".",
"rnn_cell",
")",
",",
"library",
"(",
"\"client\"",
",",
"\"Running Graphs\"",
",",
"client_lib",
")",
",",
"library",
"(",
"\"train\"",
",",
"\"Training\"",
",",
"tf",
".",
"train",
",",
"exclude_symbols",
"=",
"[",
"\"Feature\"",
",",
"\"Features\"",
",",
"\"BytesList\"",
",",
"\"FloatList\"",
",",
"\"Int64List\"",
",",
"\"Example\"",
",",
"\"InferenceExample\"",
",",
"\"FeatureList\"",
",",
"\"FeatureLists\"",
",",
"\"RankingExample\"",
",",
"\"SequenceExample\"",
"]",
")",
",",
"library",
"(",
"\"script_ops\"",
",",
"\"Wraps python functions\"",
",",
"prefix",
"=",
"PREFIX_TEXT",
")",
",",
"library",
"(",
"\"summary\"",
",",
"\"Summary Operations\"",
",",
"tf",
".",
"summary",
")",
",",
"library",
"(",
"\"test\"",
",",
"\"Testing\"",
",",
"tf",
".",
"test",
")",
",",
"library",
"(",
"\"contrib.bayesflow.stochastic_graph\"",
",",
"\"BayesFlow Stochastic Graph (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"bayesflow",
".",
"stochastic_graph",
")",
",",
"library",
"(",
"\"contrib.distributions\"",
",",
"\"Statistical distributions (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"distributions",
")",
",",
"library",
"(",
"\"contrib.ffmpeg\"",
",",
"\"FFmpeg (contrib)\"",
",",
"ffmpeg",
")",
",",
"library",
"(",
"\"contrib.framework\"",
",",
"\"Framework (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"framework",
")",
",",
"library",
"(",
"\"contrib.graph_editor\"",
",",
"\"Graph Editor (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"graph_editor",
")",
",",
"library",
"(",
"\"contrib.layers\"",
",",
"\"Layers (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"layers",
")",
",",
"library",
"(",
"\"contrib.learn\"",
",",
"\"Learn (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"learn",
")",
",",
"library",
"(",
"\"contrib.learn.monitors\"",
",",
"\"Monitors (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"learn",
".",
"monitors",
")",
",",
"library",
"(",
"\"contrib.losses\"",
",",
"\"Losses (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"losses",
")",
",",
"library",
"(",
"\"contrib.rnn\"",
",",
"\"RNN (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"rnn",
")",
",",
"library",
"(",
"\"contrib.metrics\"",
",",
"\"Metrics (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"metrics",
")",
",",
"library",
"(",
"\"contrib.util\"",
",",
"\"Utilities (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"util",
")",
",",
"library",
"(",
"\"contrib.copy_graph\"",
",",
"\"Copying Graph Elements (contrib)\"",
",",
"tf",
".",
"contrib",
".",
"copy_graph",
")",
",",
"]"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/framework/gen_docs_combined.py#L75-L183 | |
tensorflow/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | deepmath/premises/model_definition_cnn_flat3.py | python | Model.classifier | (self, conjecture_embedding, axiom_embedding) | return logits | Compute the logits from conjecture and axiom embeddings. | Compute the logits from conjecture and axiom embeddings. | [
"Compute",
"the",
"logits",
"from",
"conjecture",
"and",
"axiom",
"embeddings",
"."
] | def classifier(self, conjecture_embedding, axiom_embedding):
"""Compute the logits from conjecture and axiom embeddings."""
with self.graph.as_default():
net = tf.concat((conjecture_embedding, axiom_embedding), 1)
net = layers.relu(net, 1024)
logits = layers.linear(net, 2)
return logits | [
"def",
"classifier",
"(",
"self",
",",
"conjecture_embedding",
",",
"axiom_embedding",
")",
":",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"net",
"=",
"tf",
".",
"concat",
"(",
"(",
"conjecture_embedding",
",",
"axiom_embedding",
")",
",",
"1",
")",
"net",
"=",
"layers",
".",
"relu",
"(",
"net",
",",
"1024",
")",
"logits",
"=",
"layers",
".",
"linear",
"(",
"net",
",",
"2",
")",
"return",
"logits"
] | https://github.com/tensorflow/deepmath/blob/b5b721f54de1d5d6a02d78f5da5995237f9995f9/deepmath/premises/model_definition_cnn_flat3.py#L63-L69 | |
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/_tensor.py | python | Tensor.storage_type | (self) | return type(self.storage()) | r"""storage_type() -> type
Returns the type of the underlying storage. | r"""storage_type() -> type | [
"r",
"storage_type",
"()",
"-",
">",
"type"
] | def storage_type(self):
r"""storage_type() -> type
Returns the type of the underlying storage.
"""
# NB: this returns old fashioned TypedStorage, e.g., FloatStorage, as it
# would be pretty pointless otherwise (it would always return
# UntypedStorage)
return type(self.storage()) | [
"def",
"storage_type",
"(",
"self",
")",
":",
"# NB: this returns old fashioned TypedStorage, e.g., FloatStorage, as it",
"# would be pretty pointless otherwise (it would always return",
"# UntypedStorage)",
"return",
"type",
"(",
"self",
".",
"storage",
"(",
")",
")"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/_tensor.py#L827-L836 | |
intel/llvm | e6d0547e9d99b5a56430c4749f6c7e328bf221ab | mlir/utils/spirv/gen_spirv_dialect.py | python | gen_instr_coverage_report | (path, instructions) | Dumps to standard output a YAML report of current instruction coverage
Arguments:
- path: the path to SPIRBase.td
- instructions: a list containing all SPIR-V instructions' grammar | Dumps to standard output a YAML report of current instruction coverage | [
"Dumps",
"to",
"standard",
"output",
"a",
"YAML",
"report",
"of",
"current",
"instruction",
"coverage"
] | def gen_instr_coverage_report(path, instructions):
"""Dumps to standard output a YAML report of current instruction coverage
Arguments:
- path: the path to SPIRBase.td
- instructions: a list containing all SPIR-V instructions' grammar
"""
with open(path, 'r') as f:
content = f.read()
content = content.split(AUTOGEN_OPCODE_SECTION_MARKER)
existing_opcodes = [k[11:] for k in re.findall('def SPV_OC_\w+', content[1])]
existing_instructions = list(
filter(lambda inst: (inst['opname'] in existing_opcodes),
instructions))
instructions_opnames = [inst['opname'] for inst in instructions]
remaining_opcodes = list(set(instructions_opnames) - set(existing_opcodes))
remaining_instructions = list(
filter(lambda inst: (inst['opname'] in remaining_opcodes),
instructions))
rem_cap_to_instr = map_cap_to_opnames(remaining_instructions)
ex_cap_to_instr = map_cap_to_opnames(existing_instructions)
rem_cap_to_cov = {}
# Calculate coverage for each capability
for cap in rem_cap_to_instr:
if cap not in ex_cap_to_instr:
rem_cap_to_cov[cap] = 0.0
else:
rem_cap_to_cov[cap] = \
(len(ex_cap_to_instr[cap]) / (len(ex_cap_to_instr[cap]) \
+ len(rem_cap_to_instr[cap])))
report = {}
# Merge the 3 maps into one report
for cap in rem_cap_to_instr:
report[cap] = {}
report[cap]['Supported Instructions'] = \
ex_cap_to_instr[cap] if cap in ex_cap_to_instr else []
report[cap]['Unsupported Instructions'] = rem_cap_to_instr[cap]
report[cap]['Coverage'] = '{}%'.format(int(rem_cap_to_cov[cap] * 100))
print(yaml.dump(report)) | [
"def",
"gen_instr_coverage_report",
"(",
"path",
",",
"instructions",
")",
":",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"content",
"=",
"content",
".",
"split",
"(",
"AUTOGEN_OPCODE_SECTION_MARKER",
")",
"existing_opcodes",
"=",
"[",
"k",
"[",
"11",
":",
"]",
"for",
"k",
"in",
"re",
".",
"findall",
"(",
"'def SPV_OC_\\w+'",
",",
"content",
"[",
"1",
"]",
")",
"]",
"existing_instructions",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"inst",
":",
"(",
"inst",
"[",
"'opname'",
"]",
"in",
"existing_opcodes",
")",
",",
"instructions",
")",
")",
"instructions_opnames",
"=",
"[",
"inst",
"[",
"'opname'",
"]",
"for",
"inst",
"in",
"instructions",
"]",
"remaining_opcodes",
"=",
"list",
"(",
"set",
"(",
"instructions_opnames",
")",
"-",
"set",
"(",
"existing_opcodes",
")",
")",
"remaining_instructions",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"inst",
":",
"(",
"inst",
"[",
"'opname'",
"]",
"in",
"remaining_opcodes",
")",
",",
"instructions",
")",
")",
"rem_cap_to_instr",
"=",
"map_cap_to_opnames",
"(",
"remaining_instructions",
")",
"ex_cap_to_instr",
"=",
"map_cap_to_opnames",
"(",
"existing_instructions",
")",
"rem_cap_to_cov",
"=",
"{",
"}",
"# Calculate coverage for each capability",
"for",
"cap",
"in",
"rem_cap_to_instr",
":",
"if",
"cap",
"not",
"in",
"ex_cap_to_instr",
":",
"rem_cap_to_cov",
"[",
"cap",
"]",
"=",
"0.0",
"else",
":",
"rem_cap_to_cov",
"[",
"cap",
"]",
"=",
"(",
"len",
"(",
"ex_cap_to_instr",
"[",
"cap",
"]",
")",
"/",
"(",
"len",
"(",
"ex_cap_to_instr",
"[",
"cap",
"]",
")",
"+",
"len",
"(",
"rem_cap_to_instr",
"[",
"cap",
"]",
")",
")",
")",
"report",
"=",
"{",
"}",
"# Merge the 3 maps into one report",
"for",
"cap",
"in",
"rem_cap_to_instr",
":",
"report",
"[",
"cap",
"]",
"=",
"{",
"}",
"report",
"[",
"cap",
"]",
"[",
"'Supported Instructions'",
"]",
"=",
"ex_cap_to_instr",
"[",
"cap",
"]",
"if",
"cap",
"in",
"ex_cap_to_instr",
"else",
"[",
"]",
"report",
"[",
"cap",
"]",
"[",
"'Unsupported Instructions'",
"]",
"=",
"rem_cap_to_instr",
"[",
"cap",
"]",
"report",
"[",
"cap",
"]",
"[",
"'Coverage'",
"]",
"=",
"'{}%'",
".",
"format",
"(",
"int",
"(",
"rem_cap_to_cov",
"[",
"cap",
"]",
"*",
"100",
")",
")",
"print",
"(",
"yaml",
".",
"dump",
"(",
"report",
")",
")"
] | https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/mlir/utils/spirv/gen_spirv_dialect.py#L482-L530 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/mailbox.py | python | Message._explain_to | (self, message) | Copy format-specific state to message insofar as possible. | Copy format-specific state to message insofar as possible. | [
"Copy",
"format",
"-",
"specific",
"state",
"to",
"message",
"insofar",
"as",
"possible",
"."
] | def _explain_to(self, message):
"""Copy format-specific state to message insofar as possible."""
if isinstance(message, Message):
return # There's nothing format-specific to explain.
else:
raise TypeError('Cannot convert to specified type') | [
"def",
"_explain_to",
"(",
"self",
",",
"message",
")",
":",
"if",
"isinstance",
"(",
"message",
",",
"Message",
")",
":",
"return",
"# There's nothing format-specific to explain.",
"else",
":",
"raise",
"TypeError",
"(",
"'Cannot convert to specified type'",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/mailbox.py#L1515-L1520 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/codecs.py | python | StreamWriter.reset | (self) | Resets the codec buffers used for keeping internal state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state. | Resets the codec buffers used for keeping internal state. | [
"Resets",
"the",
"codec",
"buffers",
"used",
"for",
"keeping",
"internal",
"state",
"."
] | def reset(self):
""" Resets the codec buffers used for keeping internal state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass | [
"def",
"reset",
"(",
"self",
")",
":",
"pass"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/codecs.py#L387-L397 | ||
sonyxperiadev/WebGL | 0299b38196f78c6d5f74bcf6fa312a3daee6de60 | Tools/Scripts/webkitpy/common/system/filesystem.py | python | FileSystem.rmtree | (self, path) | Delete the directory rooted at path, empty or no. | Delete the directory rooted at path, empty or no. | [
"Delete",
"the",
"directory",
"rooted",
"at",
"path",
"empty",
"or",
"no",
"."
] | def rmtree(self, path):
"""Delete the directory rooted at path, empty or no."""
shutil.rmtree(path, ignore_errors=True) | [
"def",
"rmtree",
"(",
"self",
",",
"path",
")",
":",
"shutil",
".",
"rmtree",
"(",
"path",
",",
"ignore_errors",
"=",
"True",
")"
] | https://github.com/sonyxperiadev/WebGL/blob/0299b38196f78c6d5f74bcf6fa312a3daee6de60/Tools/Scripts/webkitpy/common/system/filesystem.py#L250-L252 | ||
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/chigger/utils/AxisOptions.py | python | get_options | () | return opt | Retuns options for vtkAxis objects. | Retuns options for vtkAxis objects. | [
"Retuns",
"options",
"for",
"vtkAxis",
"objects",
"."
] | def get_options():
"""
Retuns options for vtkAxis objects.
"""
opt = Options()
opt.add('num_ticks', 5, "The number of tick marks to place on the axis.", vtype=int)
opt.add('lim', "The axis extents.", vtype=list)
opt.add('font_color', [1, 1, 1], "The color of the axis, ticks, and labels.")
opt.add('title', "The axis label.", vtype=str)
opt.add('font_size', "The axis title and label font sizes, in points.", vtype=int)
opt.add('title_font_size', "The axis title font size, in points.", vtype=int)
opt.add('tick_font_size', "The axis tick label font size, in points.", vtype=int)
opt.add('grid', True, "Show/hide the grid lines for this axis.")
opt.add('grid_color', [0.25, 0.25, 0.25], "The color for the grid lines.")
opt.add('precision', "The axis numeric precision.", vtype=int)
opt.add('notation', "The type of notation, leave empty to let VTK decide", vtype=str,
allow=['standard', 'scientific', 'fixed', 'printf'])
opt.add('ticks_visible', True, "Control visibility of tickmarks on colorbar axis.")
opt.add('axis_visible', True, "Control visibility of axis line on colorbar axis.")
opt.add('labels_visible', True, "Control visibility of the numeric labels.")
opt.add('axis_position', 'left', "Set the axis position (left, right, top, bottom)", vtype=str,
allow=['left', 'right', 'top', 'bottom'])
opt.add('axis_point1', [0, 0], 'Starting location of axis, in absolute viewport coordinates.')
opt.add('axis_point2', [0, 0], 'Ending location of axis, in absolute viewport coordinates.')
opt.add('axis_scale', 1, "The axis scaling factor.", vtype=float)
opt.add('axis_factor', 0, "Offset the axis by adding a factor.", vtype=float)
opt.add('axis_opacity', 1, "The vtkAxis opacity.", vtype=float)
opt.add('zero_tol', 1e-10, "Tolerance for considering limits to be the same.")
return opt | [
"def",
"get_options",
"(",
")",
":",
"opt",
"=",
"Options",
"(",
")",
"opt",
".",
"add",
"(",
"'num_ticks'",
",",
"5",
",",
"\"The number of tick marks to place on the axis.\"",
",",
"vtype",
"=",
"int",
")",
"opt",
".",
"add",
"(",
"'lim'",
",",
"\"The axis extents.\"",
",",
"vtype",
"=",
"list",
")",
"opt",
".",
"add",
"(",
"'font_color'",
",",
"[",
"1",
",",
"1",
",",
"1",
"]",
",",
"\"The color of the axis, ticks, and labels.\"",
")",
"opt",
".",
"add",
"(",
"'title'",
",",
"\"The axis label.\"",
",",
"vtype",
"=",
"str",
")",
"opt",
".",
"add",
"(",
"'font_size'",
",",
"\"The axis title and label font sizes, in points.\"",
",",
"vtype",
"=",
"int",
")",
"opt",
".",
"add",
"(",
"'title_font_size'",
",",
"\"The axis title font size, in points.\"",
",",
"vtype",
"=",
"int",
")",
"opt",
".",
"add",
"(",
"'tick_font_size'",
",",
"\"The axis tick label font size, in points.\"",
",",
"vtype",
"=",
"int",
")",
"opt",
".",
"add",
"(",
"'grid'",
",",
"True",
",",
"\"Show/hide the grid lines for this axis.\"",
")",
"opt",
".",
"add",
"(",
"'grid_color'",
",",
"[",
"0.25",
",",
"0.25",
",",
"0.25",
"]",
",",
"\"The color for the grid lines.\"",
")",
"opt",
".",
"add",
"(",
"'precision'",
",",
"\"The axis numeric precision.\"",
",",
"vtype",
"=",
"int",
")",
"opt",
".",
"add",
"(",
"'notation'",
",",
"\"The type of notation, leave empty to let VTK decide\"",
",",
"vtype",
"=",
"str",
",",
"allow",
"=",
"[",
"'standard'",
",",
"'scientific'",
",",
"'fixed'",
",",
"'printf'",
"]",
")",
"opt",
".",
"add",
"(",
"'ticks_visible'",
",",
"True",
",",
"\"Control visibility of tickmarks on colorbar axis.\"",
")",
"opt",
".",
"add",
"(",
"'axis_visible'",
",",
"True",
",",
"\"Control visibility of axis line on colorbar axis.\"",
")",
"opt",
".",
"add",
"(",
"'labels_visible'",
",",
"True",
",",
"\"Control visibility of the numeric labels.\"",
")",
"opt",
".",
"add",
"(",
"'axis_position'",
",",
"'left'",
",",
"\"Set the axis position (left, right, top, bottom)\"",
",",
"vtype",
"=",
"str",
",",
"allow",
"=",
"[",
"'left'",
",",
"'right'",
",",
"'top'",
",",
"'bottom'",
"]",
")",
"opt",
".",
"add",
"(",
"'axis_point1'",
",",
"[",
"0",
",",
"0",
"]",
",",
"'Starting location of axis, in absolute viewport coordinates.'",
")",
"opt",
".",
"add",
"(",
"'axis_point2'",
",",
"[",
"0",
",",
"0",
"]",
",",
"'Ending location of axis, in absolute viewport coordinates.'",
")",
"opt",
".",
"add",
"(",
"'axis_scale'",
",",
"1",
",",
"\"The axis scaling factor.\"",
",",
"vtype",
"=",
"float",
")",
"opt",
".",
"add",
"(",
"'axis_factor'",
",",
"0",
",",
"\"Offset the axis by adding a factor.\"",
",",
"vtype",
"=",
"float",
")",
"opt",
".",
"add",
"(",
"'axis_opacity'",
",",
"1",
",",
"\"The vtkAxis opacity.\"",
",",
"vtype",
"=",
"float",
")",
"opt",
".",
"add",
"(",
"'zero_tol'",
",",
"1e-10",
",",
"\"Tolerance for considering limits to be the same.\"",
")",
"return",
"opt"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/chigger/utils/AxisOptions.py#L22-L50 | |
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/python/ops/state_ops.py | python | is_variable_initialized | (ref, name=None) | Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. | Checks whether a tensor has been initialized. | [
"Checks",
"whether",
"a",
"tensor",
"has",
"been",
"initialized",
"."
] | def is_variable_initialized(ref, name=None):
"""Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.is_variable_initialized(ref=ref, name=name)
# Handle resource variables.
if ref.op.type == "VarHandleOp":
return gen_resource_variable_ops.var_is_initialized_op(ref.handle,
name=name) | [
"def",
"is_variable_initialized",
"(",
"ref",
",",
"name",
"=",
"None",
")",
":",
"if",
"ref",
".",
"dtype",
".",
"_is_ref_dtype",
":",
"return",
"gen_state_ops",
".",
"is_variable_initialized",
"(",
"ref",
"=",
"ref",
",",
"name",
"=",
"name",
")",
"# Handle resource variables.",
"if",
"ref",
".",
"op",
".",
"type",
"==",
"\"VarHandleOp\"",
":",
"return",
"gen_resource_variable_ops",
".",
"var_is_initialized_op",
"(",
"ref",
".",
"handle",
",",
"name",
"=",
"name",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/state_ops.py#L169-L187 | ||
s9xie/DSN | 065e49898d239f5c96be558616b2556eabc50351 | scripts/cpp_lint.py | python | CheckForHeaderGuard | (filename, lines, error) | Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found. | Checks that the file contains a header guard. | [
"Checks",
"that",
"the",
"file",
"contains",
"a",
"header",
"guard",
"."
] | def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar) | [
"def",
"CheckForHeaderGuard",
"(",
"filename",
",",
"lines",
",",
"error",
")",
":",
"cppvar",
"=",
"GetHeaderGuardCPPVariable",
"(",
"filename",
")",
"ifndef",
"=",
"None",
"ifndef_linenum",
"=",
"0",
"define",
"=",
"None",
"endif",
"=",
"None",
"endif_linenum",
"=",
"0",
"for",
"linenum",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"linesplit",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"linesplit",
")",
">=",
"2",
":",
"# find the first occurrence of #ifndef and #define, save arg",
"if",
"not",
"ifndef",
"and",
"linesplit",
"[",
"0",
"]",
"==",
"'#ifndef'",
":",
"# set ifndef to the header guard presented on the #ifndef line.",
"ifndef",
"=",
"linesplit",
"[",
"1",
"]",
"ifndef_linenum",
"=",
"linenum",
"if",
"not",
"define",
"and",
"linesplit",
"[",
"0",
"]",
"==",
"'#define'",
":",
"define",
"=",
"linesplit",
"[",
"1",
"]",
"# find the last occurrence of #endif, save entire line",
"if",
"line",
".",
"startswith",
"(",
"'#endif'",
")",
":",
"endif",
"=",
"line",
"endif_linenum",
"=",
"linenum",
"if",
"not",
"ifndef",
":",
"error",
"(",
"filename",
",",
"0",
",",
"'build/header_guard'",
",",
"5",
",",
"'No #ifndef header guard found, suggested CPP variable is: %s'",
"%",
"cppvar",
")",
"return",
"if",
"not",
"define",
":",
"error",
"(",
"filename",
",",
"0",
",",
"'build/header_guard'",
",",
"5",
",",
"'No #define header guard found, suggested CPP variable is: %s'",
"%",
"cppvar",
")",
"return",
"# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__",
"# for backward compatibility.",
"if",
"ifndef",
"!=",
"cppvar",
":",
"error_level",
"=",
"0",
"if",
"ifndef",
"!=",
"cppvar",
"+",
"'_'",
":",
"error_level",
"=",
"5",
"ParseNolintSuppressions",
"(",
"filename",
",",
"lines",
"[",
"ifndef_linenum",
"]",
",",
"ifndef_linenum",
",",
"error",
")",
"error",
"(",
"filename",
",",
"ifndef_linenum",
",",
"'build/header_guard'",
",",
"error_level",
",",
"'#ifndef header guard has wrong style, please use: %s'",
"%",
"cppvar",
")",
"if",
"define",
"!=",
"ifndef",
":",
"error",
"(",
"filename",
",",
"0",
",",
"'build/header_guard'",
",",
"5",
",",
"'#ifndef and #define don\\'t match, suggested CPP variable is: %s'",
"%",
"cppvar",
")",
"return",
"if",
"endif",
"!=",
"(",
"'#endif // %s'",
"%",
"cppvar",
")",
":",
"error_level",
"=",
"0",
"if",
"endif",
"!=",
"(",
"'#endif // %s'",
"%",
"(",
"cppvar",
"+",
"'_'",
")",
")",
":",
"error_level",
"=",
"5",
"ParseNolintSuppressions",
"(",
"filename",
",",
"lines",
"[",
"endif_linenum",
"]",
",",
"endif_linenum",
",",
"error",
")",
"error",
"(",
"filename",
",",
"endif_linenum",
",",
"'build/header_guard'",
",",
"error_level",
",",
"'#endif line should be \"#endif // %s\"'",
"%",
"cppvar",
")"
] | https://github.com/s9xie/DSN/blob/065e49898d239f5c96be558616b2556eabc50351/scripts/cpp_lint.py#L1404-L1476 | ||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | scripts/SANS/SANSUtility.py | python | CummulativeTimeSeriesPropertyAdder._update_single_valued_entries | (self, workspace) | We need to update single-valued entries which are based on the
cumulative time series
:param workspace: the workspace which requires the changes | We need to update single-valued entries which are based on the
cumulative time series
:param workspace: the workspace which requires the changes | [
"We",
"need",
"to",
"update",
"single",
"-",
"valued",
"entries",
"which",
"are",
"based",
"on",
"the",
"cumulative",
"time",
"series",
":",
"param",
"workspace",
":",
"the",
"workspace",
"which",
"requires",
"the",
"changes"
] | def _update_single_valued_entries(self, workspace):
"""
We need to update single-valued entries which are based on the
cumulative time series
:param workspace: the workspace which requires the changes
"""
run = workspace.getRun()
alg_log = AlgorithmManager.createUnmanaged("AddSampleLog")
alg_log.initialize()
alg_log.setChild(True)
for element in self._single_valued:
if run.hasProperty(element):
type_converter = self._type_map[element]
new_value = type_converter(self._original_single_valued_lhs[element]
+ self._original_single_valued_rhs[element])
alg_log.setProperty("Workspace", workspace)
alg_log.setProperty("LogName", element)
alg_log.setProperty("LogText", str(new_value))
alg_log.setProperty("LogType", "Number")
alg_log.execute() | [
"def",
"_update_single_valued_entries",
"(",
"self",
",",
"workspace",
")",
":",
"run",
"=",
"workspace",
".",
"getRun",
"(",
")",
"alg_log",
"=",
"AlgorithmManager",
".",
"createUnmanaged",
"(",
"\"AddSampleLog\"",
")",
"alg_log",
".",
"initialize",
"(",
")",
"alg_log",
".",
"setChild",
"(",
"True",
")",
"for",
"element",
"in",
"self",
".",
"_single_valued",
":",
"if",
"run",
".",
"hasProperty",
"(",
"element",
")",
":",
"type_converter",
"=",
"self",
".",
"_type_map",
"[",
"element",
"]",
"new_value",
"=",
"type_converter",
"(",
"self",
".",
"_original_single_valued_lhs",
"[",
"element",
"]",
"+",
"self",
".",
"_original_single_valued_rhs",
"[",
"element",
"]",
")",
"alg_log",
".",
"setProperty",
"(",
"\"Workspace\"",
",",
"workspace",
")",
"alg_log",
".",
"setProperty",
"(",
"\"LogName\"",
",",
"element",
")",
"alg_log",
".",
"setProperty",
"(",
"\"LogText\"",
",",
"str",
"(",
"new_value",
")",
")",
"alg_log",
".",
"setProperty",
"(",
"\"LogType\"",
",",
"\"Number\"",
")",
"alg_log",
".",
"execute",
"(",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/SANSUtility.py#L1345-L1365 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/mhlib.py | python | MH.listallfolders | (self) | return self.listallsubfolders('') | Return the names of all folders and subfolders, recursively. | Return the names of all folders and subfolders, recursively. | [
"Return",
"the",
"names",
"of",
"all",
"folders",
"and",
"subfolders",
"recursively",
"."
] | def listallfolders(self):
"""Return the names of all folders and subfolders, recursively."""
return self.listallsubfolders('') | [
"def",
"listallfolders",
"(",
"self",
")",
":",
"return",
"self",
".",
"listallsubfolders",
"(",
"''",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/mhlib.py#L179-L181 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/compileall.py | python | main | () | return True | Script main program. | Script main program. | [
"Script",
"main",
"program",
"."
] | def main():
"""Script main program."""
import argparse
parser = argparse.ArgumentParser(
description='Utilities to support installing Python libraries.')
parser.add_argument('-l', action='store_const', const=0,
default=None, dest='maxlevels',
help="don't recurse into subdirectories")
parser.add_argument('-r', type=int, dest='recursion',
help=('control the maximum recursion level. '
'if `-l` and `-r` options are specified, '
'then `-r` takes precedence.'))
parser.add_argument('-f', action='store_true', dest='force',
help='force rebuild even if timestamps are up to date')
parser.add_argument('-q', action='count', dest='quiet', default=0,
help='output only error messages; -qq will suppress '
'the error messages as well.')
parser.add_argument('-b', action='store_true', dest='legacy',
help='use legacy (pre-PEP3147) compiled file locations')
parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None,
help=('directory to prepend to file paths for use in '
'compile-time tracebacks and in runtime '
'tracebacks in cases where the source file is '
'unavailable'))
parser.add_argument('-s', metavar='STRIPDIR', dest='stripdir',
default=None,
help=('part of path to left-strip from path '
'to source file - for example buildroot. '
'`-d` and `-s` options cannot be '
'specified together.'))
parser.add_argument('-p', metavar='PREPENDDIR', dest='prependdir',
default=None,
help=('path to add as prefix to path '
'to source file - for example / to make '
'it absolute when some part is removed '
'by `-s` option. '
'`-d` and `-p` options cannot be '
'specified together.'))
parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None,
help=('skip files matching the regular expression; '
'the regexp is searched for in the full path '
'of each file considered for compilation'))
parser.add_argument('-i', metavar='FILE', dest='flist',
help=('add all the files and directories listed in '
'FILE to the list considered for compilation; '
'if "-", names are read from stdin'))
parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*',
help=('zero or more file and directory names '
'to compile; if no arguments given, defaults '
'to the equivalent of -l sys.path'))
parser.add_argument('-j', '--workers', default=1,
type=int, help='Run compileall concurrently')
invalidation_modes = [mode.name.lower().replace('_', '-')
for mode in py_compile.PycInvalidationMode]
parser.add_argument('--invalidation-mode',
choices=sorted(invalidation_modes),
help=('set .pyc invalidation mode; defaults to '
'"checked-hash" if the SOURCE_DATE_EPOCH '
'environment variable is set, and '
'"timestamp" otherwise.'))
parser.add_argument('-o', action='append', type=int, dest='opt_levels',
help=('Optimization levels to run compilation with. '
'Default is -1 which uses the optimization level '
'of the Python interpreter itself (see -O).'))
parser.add_argument('-e', metavar='DIR', dest='limit_sl_dest',
help='Ignore symlinks pointing outsite of the DIR')
parser.add_argument('--hardlink-dupes', action='store_true',
dest='hardlink_dupes',
help='Hardlink duplicated pyc files')
args = parser.parse_args()
compile_dests = args.compile_dest
if args.rx:
import re
args.rx = re.compile(args.rx)
if args.limit_sl_dest == "":
args.limit_sl_dest = None
if args.recursion is not None:
maxlevels = args.recursion
else:
maxlevels = args.maxlevels
if args.opt_levels is None:
args.opt_levels = [-1]
if len(args.opt_levels) == 1 and args.hardlink_dupes:
parser.error(("Hardlinking of duplicated bytecode makes sense "
"only for more than one optimization level."))
if args.ddir is not None and (
args.stripdir is not None or args.prependdir is not None
):
parser.error("-d cannot be used in combination with -s or -p")
# if flist is provided then load it
if args.flist:
try:
with (sys.stdin if args.flist=='-' else open(args.flist)) as f:
for line in f:
compile_dests.append(line.strip())
except OSError:
if args.quiet < 2:
print("Error reading file list {}".format(args.flist))
return False
if args.invalidation_mode:
ivl_mode = args.invalidation_mode.replace('-', '_').upper()
invalidation_mode = py_compile.PycInvalidationMode[ivl_mode]
else:
invalidation_mode = None
success = True
try:
if compile_dests:
for dest in compile_dests:
if os.path.isfile(dest):
if not compile_file(dest, args.ddir, args.force, args.rx,
args.quiet, args.legacy,
invalidation_mode=invalidation_mode,
stripdir=args.stripdir,
prependdir=args.prependdir,
optimize=args.opt_levels,
limit_sl_dest=args.limit_sl_dest,
hardlink_dupes=args.hardlink_dupes):
success = False
else:
if not compile_dir(dest, maxlevels, args.ddir,
args.force, args.rx, args.quiet,
args.legacy, workers=args.workers,
invalidation_mode=invalidation_mode,
stripdir=args.stripdir,
prependdir=args.prependdir,
optimize=args.opt_levels,
limit_sl_dest=args.limit_sl_dest,
hardlink_dupes=args.hardlink_dupes):
success = False
return success
else:
return compile_path(legacy=args.legacy, force=args.force,
quiet=args.quiet,
invalidation_mode=invalidation_mode)
except KeyboardInterrupt:
if args.quiet < 2:
print("\n[interrupted]")
return False
return True | [
"def",
"main",
"(",
")",
":",
"import",
"argparse",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Utilities to support installing Python libraries.'",
")",
"parser",
".",
"add_argument",
"(",
"'-l'",
",",
"action",
"=",
"'store_const'",
",",
"const",
"=",
"0",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'maxlevels'",
",",
"help",
"=",
"\"don't recurse into subdirectories\"",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"type",
"=",
"int",
",",
"dest",
"=",
"'recursion'",
",",
"help",
"=",
"(",
"'control the maximum recursion level. '",
"'if `-l` and `-r` options are specified, '",
"'then `-r` takes precedence.'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-f'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'force'",
",",
"help",
"=",
"'force rebuild even if timestamps are up to date'",
")",
"parser",
".",
"add_argument",
"(",
"'-q'",
",",
"action",
"=",
"'count'",
",",
"dest",
"=",
"'quiet'",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'output only error messages; -qq will suppress '",
"'the error messages as well.'",
")",
"parser",
".",
"add_argument",
"(",
"'-b'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'legacy'",
",",
"help",
"=",
"'use legacy (pre-PEP3147) compiled file locations'",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"metavar",
"=",
"'DESTDIR'",
",",
"dest",
"=",
"'ddir'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"(",
"'directory to prepend to file paths for use in '",
"'compile-time tracebacks and in runtime '",
"'tracebacks in cases where the source file is '",
"'unavailable'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"metavar",
"=",
"'STRIPDIR'",
",",
"dest",
"=",
"'stripdir'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"(",
"'part of path to left-strip from path '",
"'to source file - for example buildroot. '",
"'`-d` and `-s` options cannot be '",
"'specified together.'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"metavar",
"=",
"'PREPENDDIR'",
",",
"dest",
"=",
"'prependdir'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"(",
"'path to add as prefix to path '",
"'to source file - for example / to make '",
"'it absolute when some part is removed '",
"'by `-s` option. '",
"'`-d` and `-p` options cannot be '",
"'specified together.'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-x'",
",",
"metavar",
"=",
"'REGEXP'",
",",
"dest",
"=",
"'rx'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"(",
"'skip files matching the regular expression; '",
"'the regexp is searched for in the full path '",
"'of each file considered for compilation'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-i'",
",",
"metavar",
"=",
"'FILE'",
",",
"dest",
"=",
"'flist'",
",",
"help",
"=",
"(",
"'add all the files and directories listed in '",
"'FILE to the list considered for compilation; '",
"'if \"-\", names are read from stdin'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'compile_dest'",
",",
"metavar",
"=",
"'FILE|DIR'",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"(",
"'zero or more file and directory names '",
"'to compile; if no arguments given, defaults '",
"'to the equivalent of -l sys.path'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-j'",
",",
"'--workers'",
",",
"default",
"=",
"1",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'Run compileall concurrently'",
")",
"invalidation_modes",
"=",
"[",
"mode",
".",
"name",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"for",
"mode",
"in",
"py_compile",
".",
"PycInvalidationMode",
"]",
"parser",
".",
"add_argument",
"(",
"'--invalidation-mode'",
",",
"choices",
"=",
"sorted",
"(",
"invalidation_modes",
")",
",",
"help",
"=",
"(",
"'set .pyc invalidation mode; defaults to '",
"'\"checked-hash\" if the SOURCE_DATE_EPOCH '",
"'environment variable is set, and '",
"'\"timestamp\" otherwise.'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"action",
"=",
"'append'",
",",
"type",
"=",
"int",
",",
"dest",
"=",
"'opt_levels'",
",",
"help",
"=",
"(",
"'Optimization levels to run compilation with. '",
"'Default is -1 which uses the optimization level '",
"'of the Python interpreter itself (see -O).'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"metavar",
"=",
"'DIR'",
",",
"dest",
"=",
"'limit_sl_dest'",
",",
"help",
"=",
"'Ignore symlinks pointing outsite of the DIR'",
")",
"parser",
".",
"add_argument",
"(",
"'--hardlink-dupes'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'hardlink_dupes'",
",",
"help",
"=",
"'Hardlink duplicated pyc files'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"compile_dests",
"=",
"args",
".",
"compile_dest",
"if",
"args",
".",
"rx",
":",
"import",
"re",
"args",
".",
"rx",
"=",
"re",
".",
"compile",
"(",
"args",
".",
"rx",
")",
"if",
"args",
".",
"limit_sl_dest",
"==",
"\"\"",
":",
"args",
".",
"limit_sl_dest",
"=",
"None",
"if",
"args",
".",
"recursion",
"is",
"not",
"None",
":",
"maxlevels",
"=",
"args",
".",
"recursion",
"else",
":",
"maxlevels",
"=",
"args",
".",
"maxlevels",
"if",
"args",
".",
"opt_levels",
"is",
"None",
":",
"args",
".",
"opt_levels",
"=",
"[",
"-",
"1",
"]",
"if",
"len",
"(",
"args",
".",
"opt_levels",
")",
"==",
"1",
"and",
"args",
".",
"hardlink_dupes",
":",
"parser",
".",
"error",
"(",
"(",
"\"Hardlinking of duplicated bytecode makes sense \"",
"\"only for more than one optimization level.\"",
")",
")",
"if",
"args",
".",
"ddir",
"is",
"not",
"None",
"and",
"(",
"args",
".",
"stripdir",
"is",
"not",
"None",
"or",
"args",
".",
"prependdir",
"is",
"not",
"None",
")",
":",
"parser",
".",
"error",
"(",
"\"-d cannot be used in combination with -s or -p\"",
")",
"# if flist is provided then load it",
"if",
"args",
".",
"flist",
":",
"try",
":",
"with",
"(",
"sys",
".",
"stdin",
"if",
"args",
".",
"flist",
"==",
"'-'",
"else",
"open",
"(",
"args",
".",
"flist",
")",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"compile_dests",
".",
"append",
"(",
"line",
".",
"strip",
"(",
")",
")",
"except",
"OSError",
":",
"if",
"args",
".",
"quiet",
"<",
"2",
":",
"print",
"(",
"\"Error reading file list {}\"",
".",
"format",
"(",
"args",
".",
"flist",
")",
")",
"return",
"False",
"if",
"args",
".",
"invalidation_mode",
":",
"ivl_mode",
"=",
"args",
".",
"invalidation_mode",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
".",
"upper",
"(",
")",
"invalidation_mode",
"=",
"py_compile",
".",
"PycInvalidationMode",
"[",
"ivl_mode",
"]",
"else",
":",
"invalidation_mode",
"=",
"None",
"success",
"=",
"True",
"try",
":",
"if",
"compile_dests",
":",
"for",
"dest",
"in",
"compile_dests",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"dest",
")",
":",
"if",
"not",
"compile_file",
"(",
"dest",
",",
"args",
".",
"ddir",
",",
"args",
".",
"force",
",",
"args",
".",
"rx",
",",
"args",
".",
"quiet",
",",
"args",
".",
"legacy",
",",
"invalidation_mode",
"=",
"invalidation_mode",
",",
"stripdir",
"=",
"args",
".",
"stripdir",
",",
"prependdir",
"=",
"args",
".",
"prependdir",
",",
"optimize",
"=",
"args",
".",
"opt_levels",
",",
"limit_sl_dest",
"=",
"args",
".",
"limit_sl_dest",
",",
"hardlink_dupes",
"=",
"args",
".",
"hardlink_dupes",
")",
":",
"success",
"=",
"False",
"else",
":",
"if",
"not",
"compile_dir",
"(",
"dest",
",",
"maxlevels",
",",
"args",
".",
"ddir",
",",
"args",
".",
"force",
",",
"args",
".",
"rx",
",",
"args",
".",
"quiet",
",",
"args",
".",
"legacy",
",",
"workers",
"=",
"args",
".",
"workers",
",",
"invalidation_mode",
"=",
"invalidation_mode",
",",
"stripdir",
"=",
"args",
".",
"stripdir",
",",
"prependdir",
"=",
"args",
".",
"prependdir",
",",
"optimize",
"=",
"args",
".",
"opt_levels",
",",
"limit_sl_dest",
"=",
"args",
".",
"limit_sl_dest",
",",
"hardlink_dupes",
"=",
"args",
".",
"hardlink_dupes",
")",
":",
"success",
"=",
"False",
"return",
"success",
"else",
":",
"return",
"compile_path",
"(",
"legacy",
"=",
"args",
".",
"legacy",
",",
"force",
"=",
"args",
".",
"force",
",",
"quiet",
"=",
"args",
".",
"quiet",
",",
"invalidation_mode",
"=",
"invalidation_mode",
")",
"except",
"KeyboardInterrupt",
":",
"if",
"args",
".",
"quiet",
"<",
"2",
":",
"print",
"(",
"\"\\n[interrupted]\"",
")",
"return",
"False",
"return",
"True"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/compileall.py#L306-L455 | |
pmq20/node-packer | 12c46c6e44fbc14d9ee645ebd17d5296b324f7e0 | lts/deps/v8/third_party/jinja2/compiler.py | python | CodeGenerator.visit_Block | (self, node, frame) | Call a block and register it for the template. | Call a block and register it for the template. | [
"Call",
"a",
"block",
"and",
"register",
"it",
"for",
"the",
"template",
"."
] | def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 0
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
if node.scoped:
context = self.derive_context(frame)
else:
context = self.get_context_ref()
if supports_yield_from and not self.environment.is_async and \
frame.buffer is None:
self.writeline('yield from context.blocks[%r][0](%s)' % (
node.name, context), node)
else:
loop = self.environment.is_async and 'async for' or 'for'
self.writeline('%s event in context.blocks[%r][0](%s):' % (
loop, node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent()
self.outdent(level) | [
"def",
"visit_Block",
"(",
"self",
",",
"node",
",",
"frame",
")",
":",
"level",
"=",
"0",
"if",
"frame",
".",
"toplevel",
":",
"# if we know that we are a child template, there is no need to",
"# check if we are one",
"if",
"self",
".",
"has_known_extends",
":",
"return",
"if",
"self",
".",
"extends_so_far",
">",
"0",
":",
"self",
".",
"writeline",
"(",
"'if parent_template is None:'",
")",
"self",
".",
"indent",
"(",
")",
"level",
"+=",
"1",
"if",
"node",
".",
"scoped",
":",
"context",
"=",
"self",
".",
"derive_context",
"(",
"frame",
")",
"else",
":",
"context",
"=",
"self",
".",
"get_context_ref",
"(",
")",
"if",
"supports_yield_from",
"and",
"not",
"self",
".",
"environment",
".",
"is_async",
"and",
"frame",
".",
"buffer",
"is",
"None",
":",
"self",
".",
"writeline",
"(",
"'yield from context.blocks[%r][0](%s)'",
"%",
"(",
"node",
".",
"name",
",",
"context",
")",
",",
"node",
")",
"else",
":",
"loop",
"=",
"self",
".",
"environment",
".",
"is_async",
"and",
"'async for'",
"or",
"'for'",
"self",
".",
"writeline",
"(",
"'%s event in context.blocks[%r][0](%s):'",
"%",
"(",
"loop",
",",
"node",
".",
"name",
",",
"context",
")",
",",
"node",
")",
"self",
".",
"indent",
"(",
")",
"self",
".",
"simple_write",
"(",
"'event'",
",",
"frame",
")",
"self",
".",
"outdent",
"(",
")",
"self",
".",
"outdent",
"(",
"level",
")"
] | https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/deps/v8/third_party/jinja2/compiler.py#L811-L841 | ||
pyne/pyne | 0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3 | pyne/mesh.py | python | Tag.__init__ | (self, mesh=None, name=None, doc=None) | Parameters
----------
mesh : Mesh, optional
The PyNE mesh to tag.
name : str, optional
The name of the tag.
doc : str, optional
Documentation string for the tag. | Parameters
----------
mesh : Mesh, optional
The PyNE mesh to tag.
name : str, optional
The name of the tag.
doc : str, optional
Documentation string for the tag. | [
"Parameters",
"----------",
"mesh",
":",
"Mesh",
"optional",
"The",
"PyNE",
"mesh",
"to",
"tag",
".",
"name",
":",
"str",
"optional",
"The",
"name",
"of",
"the",
"tag",
".",
"doc",
":",
"str",
"optional",
"Documentation",
"string",
"for",
"the",
"tag",
"."
] | def __init__(self, mesh=None, name=None, doc=None):
"""Parameters
----------
mesh : Mesh, optional
The PyNE mesh to tag.
name : str, optional
The name of the tag.
doc : str, optional
Documentation string for the tag.
"""
if mesh is None or name is None:
self._lazy_args = {'mesh': mesh, 'name': name, 'doc': doc}
return
self.mesh = mesh
self.name = name
mesh.tags[name] = self
if doc is None:
doc = "the {0!r} tag".format(name)
self.__doc__ = doc
if hasattr(self, '_lazy_args'):
del self._lazy_args | [
"def",
"__init__",
"(",
"self",
",",
"mesh",
"=",
"None",
",",
"name",
"=",
"None",
",",
"doc",
"=",
"None",
")",
":",
"if",
"mesh",
"is",
"None",
"or",
"name",
"is",
"None",
":",
"self",
".",
"_lazy_args",
"=",
"{",
"'mesh'",
":",
"mesh",
",",
"'name'",
":",
"name",
",",
"'doc'",
":",
"doc",
"}",
"return",
"self",
".",
"mesh",
"=",
"mesh",
"self",
".",
"name",
"=",
"name",
"mesh",
".",
"tags",
"[",
"name",
"]",
"=",
"self",
"if",
"doc",
"is",
"None",
":",
"doc",
"=",
"\"the {0!r} tag\"",
".",
"format",
"(",
"name",
")",
"self",
".",
"__doc__",
"=",
"doc",
"if",
"hasattr",
"(",
"self",
",",
"'_lazy_args'",
")",
":",
"del",
"self",
".",
"_lazy_args"
] | https://github.com/pyne/pyne/blob/0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3/pyne/mesh.py#L65-L86 | ||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ReflectometryILLPreprocess.py | python | ReflectometryILLPreprocess._flatBkgRanges | (self, ws) | return highRange + lowRange | Return spectrum number ranges for flat background fitting. | Return spectrum number ranges for flat background fitting. | [
"Return",
"spectrum",
"number",
"ranges",
"for",
"flat",
"background",
"fitting",
"."
] | def _flatBkgRanges(self, ws):
"""Return spectrum number ranges for flat background fitting."""
sign = self._workspaceIndexDirection(ws)
peakPos = ws.run().getProperty(common.SampleLogs.FOREGROUND_CENTRE).value
# Convert to spectrum numbers
peakPos = ws.getSpectrum(peakPos).getSpectrumNo()
peakHalfWidths = self._foregroundWidths()
lowPeakHalfWidth = peakHalfWidths[0]
lowOffset = self.getProperty(Prop.LOW_BKG_OFFSET).value
lowWidth = self.getProperty(Prop.LOW_BKG_WIDTH).value
lowStartIndex = peakPos - sign * (lowPeakHalfWidth + lowOffset + lowWidth)
lowEndIndex = lowStartIndex + sign * lowWidth
highPeakHalfWidth = peakHalfWidths[1]
highOffset = self.getProperty(Prop.HIGH_BKG_OFFSET).value
highWidth = self.getProperty(Prop.HIGH_BKG_WIDTH).value
highStartIndex = peakPos + sign * (highPeakHalfWidth + highOffset)
highEndIndex = highStartIndex + sign * highWidth
if sign > 0:
lowRange = [lowStartIndex - sign * 0.5, lowEndIndex - sign * 0.5]
highRange = [highStartIndex + sign * 0.5, highEndIndex + sign * 0.5]
return lowRange + highRange
# Indices decrease with increasing bragg angle. Swap everything.
lowRange = [lowEndIndex - sign * 0.5, lowStartIndex - sign * 0.5]
highRange = [highEndIndex + sign * 0.5, highStartIndex + sign * 0.5]
return highRange + lowRange | [
"def",
"_flatBkgRanges",
"(",
"self",
",",
"ws",
")",
":",
"sign",
"=",
"self",
".",
"_workspaceIndexDirection",
"(",
"ws",
")",
"peakPos",
"=",
"ws",
".",
"run",
"(",
")",
".",
"getProperty",
"(",
"common",
".",
"SampleLogs",
".",
"FOREGROUND_CENTRE",
")",
".",
"value",
"# Convert to spectrum numbers",
"peakPos",
"=",
"ws",
".",
"getSpectrum",
"(",
"peakPos",
")",
".",
"getSpectrumNo",
"(",
")",
"peakHalfWidths",
"=",
"self",
".",
"_foregroundWidths",
"(",
")",
"lowPeakHalfWidth",
"=",
"peakHalfWidths",
"[",
"0",
"]",
"lowOffset",
"=",
"self",
".",
"getProperty",
"(",
"Prop",
".",
"LOW_BKG_OFFSET",
")",
".",
"value",
"lowWidth",
"=",
"self",
".",
"getProperty",
"(",
"Prop",
".",
"LOW_BKG_WIDTH",
")",
".",
"value",
"lowStartIndex",
"=",
"peakPos",
"-",
"sign",
"*",
"(",
"lowPeakHalfWidth",
"+",
"lowOffset",
"+",
"lowWidth",
")",
"lowEndIndex",
"=",
"lowStartIndex",
"+",
"sign",
"*",
"lowWidth",
"highPeakHalfWidth",
"=",
"peakHalfWidths",
"[",
"1",
"]",
"highOffset",
"=",
"self",
".",
"getProperty",
"(",
"Prop",
".",
"HIGH_BKG_OFFSET",
")",
".",
"value",
"highWidth",
"=",
"self",
".",
"getProperty",
"(",
"Prop",
".",
"HIGH_BKG_WIDTH",
")",
".",
"value",
"highStartIndex",
"=",
"peakPos",
"+",
"sign",
"*",
"(",
"highPeakHalfWidth",
"+",
"highOffset",
")",
"highEndIndex",
"=",
"highStartIndex",
"+",
"sign",
"*",
"highWidth",
"if",
"sign",
">",
"0",
":",
"lowRange",
"=",
"[",
"lowStartIndex",
"-",
"sign",
"*",
"0.5",
",",
"lowEndIndex",
"-",
"sign",
"*",
"0.5",
"]",
"highRange",
"=",
"[",
"highStartIndex",
"+",
"sign",
"*",
"0.5",
",",
"highEndIndex",
"+",
"sign",
"*",
"0.5",
"]",
"return",
"lowRange",
"+",
"highRange",
"# Indices decrease with increasing bragg angle. Swap everything.",
"lowRange",
"=",
"[",
"lowEndIndex",
"-",
"sign",
"*",
"0.5",
",",
"lowStartIndex",
"-",
"sign",
"*",
"0.5",
"]",
"highRange",
"=",
"[",
"highEndIndex",
"+",
"sign",
"*",
"0.5",
",",
"highStartIndex",
"+",
"sign",
"*",
"0.5",
"]",
"return",
"highRange",
"+",
"lowRange"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ReflectometryILLPreprocess.py#L305-L329 | |
echronos/echronos | c996f1d2c8af6c6536205eb319c1bf1d4d84569c | external_tools/ply_info/example/GardenSnake/GardenSnake.py | python | p_expr_stmt | (p) | expr_stmt : testlist ASSIGN testlist
| testlist | expr_stmt : testlist ASSIGN testlist
| testlist | [
"expr_stmt",
":",
"testlist",
"ASSIGN",
"testlist",
"|",
"testlist"
] | def p_expr_stmt(p):
"""expr_stmt : testlist ASSIGN testlist
| testlist """
if len(p) == 2:
# a list of expressions
p[0] = ast.Discard(p[1])
else:
p[0] = Assign(p[1], p[3]) | [
"def",
"p_expr_stmt",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"2",
":",
"# a list of expressions",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Discard",
"(",
"p",
"[",
"1",
"]",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"Assign",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
")"
] | https://github.com/echronos/echronos/blob/c996f1d2c8af6c6536205eb319c1bf1d4d84569c/external_tools/ply_info/example/GardenSnake/GardenSnake.py#L442-L449 | ||
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/python/training/input.py | python | shuffle_batch | (tensors, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None, name=None) | Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensors` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensors`.
If `enqueue_many` is `False`, `tensors` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensors` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensors` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape` method will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`. | Creates batches by randomly shuffling tensors. | [
"Creates",
"batches",
"by",
"randomly",
"shuffling",
"tensors",
"."
] | def shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensors` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensors`.
If `enqueue_many` is `False`, `tensors` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensors` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensors` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape` method will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
tensor_list = _as_tensor_list(tensors)
with ops.op_scope(tensor_list, name, "shuffle_batch") as name:
tensor_list = _validate(tensor_list)
tensor_list, sparse_info = _serialize_sparse_tensors(
tensor_list, enqueue_many)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
logging_ops.scalar_summary(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued) | [
"def",
"shuffle_batch",
"(",
"tensors",
",",
"batch_size",
",",
"capacity",
",",
"min_after_dequeue",
",",
"num_threads",
"=",
"1",
",",
"seed",
"=",
"None",
",",
"enqueue_many",
"=",
"False",
",",
"shapes",
"=",
"None",
",",
"allow_smaller_final_batch",
"=",
"False",
",",
"shared_name",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"tensor_list",
"=",
"_as_tensor_list",
"(",
"tensors",
")",
"with",
"ops",
".",
"op_scope",
"(",
"tensor_list",
",",
"name",
",",
"\"shuffle_batch\"",
")",
"as",
"name",
":",
"tensor_list",
"=",
"_validate",
"(",
"tensor_list",
")",
"tensor_list",
",",
"sparse_info",
"=",
"_serialize_sparse_tensors",
"(",
"tensor_list",
",",
"enqueue_many",
")",
"types",
"=",
"_dtypes",
"(",
"[",
"tensor_list",
"]",
")",
"shapes",
"=",
"_shapes",
"(",
"[",
"tensor_list",
"]",
",",
"shapes",
",",
"enqueue_many",
")",
"queue",
"=",
"data_flow_ops",
".",
"RandomShuffleQueue",
"(",
"capacity",
"=",
"capacity",
",",
"min_after_dequeue",
"=",
"min_after_dequeue",
",",
"seed",
"=",
"seed",
",",
"dtypes",
"=",
"types",
",",
"shapes",
"=",
"shapes",
",",
"shared_name",
"=",
"shared_name",
")",
"_enqueue",
"(",
"queue",
",",
"tensor_list",
",",
"num_threads",
",",
"enqueue_many",
")",
"full",
"=",
"(",
"math_ops",
".",
"cast",
"(",
"math_ops",
".",
"maximum",
"(",
"0",
",",
"queue",
".",
"size",
"(",
")",
"-",
"min_after_dequeue",
")",
",",
"dtypes",
".",
"float32",
")",
"*",
"(",
"1.",
"/",
"(",
"capacity",
"-",
"min_after_dequeue",
")",
")",
")",
"# Note that name contains a '/' at the end so we intentionally do not place",
"# a '/' after %s below.",
"summary_name",
"=",
"(",
"\"queue/%sfraction_over_%d_of_%d_full\"",
"%",
"(",
"name",
",",
"min_after_dequeue",
",",
"capacity",
"-",
"min_after_dequeue",
")",
")",
"logging_ops",
".",
"scalar_summary",
"(",
"summary_name",
",",
"full",
")",
"if",
"allow_smaller_final_batch",
":",
"dequeued",
"=",
"queue",
".",
"dequeue_up_to",
"(",
"batch_size",
",",
"name",
"=",
"name",
")",
"else",
":",
"dequeued",
"=",
"queue",
".",
"dequeue_many",
"(",
"batch_size",
",",
"name",
"=",
"name",
")",
"dequeued",
"=",
"_deserialize_sparse_tensors",
"(",
"dequeued",
",",
"sparse_info",
")",
"return",
"_as_original_type",
"(",
"tensors",
",",
"dequeued",
")"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/training/input.py#L714-L819 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/dataview.py | python | DataViewCtrl.PrependProgressColumn | (*args, **kwargs) | return _dataview.DataViewCtrl_PrependProgressColumn(*args, **kwargs) | PrependProgressColumn(self, PyObject label_or_bitmap, unsigned int model_column,
int mode=DATAVIEW_CELL_INERT, int width=DVC_DEFAULT_WIDTH,
int align=ALIGN_CENTER, int flags=DATAVIEW_COL_RESIZABLE) -> DataViewColumn | PrependProgressColumn(self, PyObject label_or_bitmap, unsigned int model_column,
int mode=DATAVIEW_CELL_INERT, int width=DVC_DEFAULT_WIDTH,
int align=ALIGN_CENTER, int flags=DATAVIEW_COL_RESIZABLE) -> DataViewColumn | [
"PrependProgressColumn",
"(",
"self",
"PyObject",
"label_or_bitmap",
"unsigned",
"int",
"model_column",
"int",
"mode",
"=",
"DATAVIEW_CELL_INERT",
"int",
"width",
"=",
"DVC_DEFAULT_WIDTH",
"int",
"align",
"=",
"ALIGN_CENTER",
"int",
"flags",
"=",
"DATAVIEW_COL_RESIZABLE",
")",
"-",
">",
"DataViewColumn"
] | def PrependProgressColumn(*args, **kwargs):
"""
PrependProgressColumn(self, PyObject label_or_bitmap, unsigned int model_column,
int mode=DATAVIEW_CELL_INERT, int width=DVC_DEFAULT_WIDTH,
int align=ALIGN_CENTER, int flags=DATAVIEW_COL_RESIZABLE) -> DataViewColumn
"""
return _dataview.DataViewCtrl_PrependProgressColumn(*args, **kwargs) | [
"def",
"PrependProgressColumn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewCtrl_PrependProgressColumn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/dataview.py#L1614-L1620 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/os.py | python | execle | (file, *args) | execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. | execle(file, *args, env) | [
"execle",
"(",
"file",
"*",
"args",
"env",
")"
] | def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env) | [
"def",
"execle",
"(",
"file",
",",
"*",
"args",
")",
":",
"env",
"=",
"args",
"[",
"-",
"1",
"]",
"execve",
"(",
"file",
",",
"args",
"[",
":",
"-",
"1",
"]",
",",
"env",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/os.py#L316-L322 | ||
borglab/gtsam | a5bee157efce6a0563704bce6a5d188c29817f39 | gtsam/3rdparty/GeographicLib/python/geographiclib/geomath.py | python | Math.cbrt | (x) | return y if x >= 0 else -y | Real cube root of a number | Real cube root of a number | [
"Real",
"cube",
"root",
"of",
"a",
"number"
] | def cbrt(x):
"""Real cube root of a number"""
y = math.pow(abs(x), 1/3.0)
return y if x >= 0 else -y | [
"def",
"cbrt",
"(",
"x",
")",
":",
"y",
"=",
"math",
".",
"pow",
"(",
"abs",
"(",
"x",
")",
",",
"1",
"/",
"3.0",
")",
"return",
"y",
"if",
"x",
">=",
"0",
"else",
"-",
"y"
] | https://github.com/borglab/gtsam/blob/a5bee157efce6a0563704bce6a5d188c29817f39/gtsam/3rdparty/GeographicLib/python/geographiclib/geomath.py#L43-L47 | |
apache/impala | 8ddac48f3428c86f2cbd037ced89cfb903298b12 | shell/ext-py/prettytable-0.7.2/prettytable.py | python | PrettyTable._get_vertical_char | (self) | return self._vertical_char | The charcter used when printing table borders to draw vertical lines
Arguments:
vertical_char - single character string used to draw vertical lines | The charcter used when printing table borders to draw vertical lines | [
"The",
"charcter",
"used",
"when",
"printing",
"table",
"borders",
"to",
"draw",
"vertical",
"lines"
] | def _get_vertical_char(self):
"""The charcter used when printing table borders to draw vertical lines
Arguments:
vertical_char - single character string used to draw vertical lines"""
return self._vertical_char | [
"def",
"_get_vertical_char",
"(",
"self",
")",
":",
"return",
"self",
".",
"_vertical_char"
] | https://github.com/apache/impala/blob/8ddac48f3428c86f2cbd037ced89cfb903298b12/shell/ext-py/prettytable-0.7.2/prettytable.py#L653-L659 | |
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | ppapi/generators/idl_node.py | python | IDLNode.GetOneOf | (self, *keys) | return None | Get an object for the given key(s). | Get an object for the given key(s). | [
"Get",
"an",
"object",
"for",
"the",
"given",
"key",
"(",
"s",
")",
"."
] | def GetOneOf(self, *keys):
"""Get an object for the given key(s)."""
out = self.GetListOf(*keys)
if out:
return out[0]
return None | [
"def",
"GetOneOf",
"(",
"self",
",",
"*",
"keys",
")",
":",
"out",
"=",
"self",
".",
"GetListOf",
"(",
"*",
"keys",
")",
"if",
"out",
":",
"return",
"out",
"[",
"0",
"]",
"return",
"None"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/ppapi/generators/idl_node.py#L166-L171 | |
tensorflow/minigo | 6d89c202cdceaf449aefc3149ab2110d44f1a6a4 | oneoffs/prepare_bigquery.py | python | parse_comment_node | (comment) | return post_Q, debug_rows | Resign Threshold: -0.88
-0.0662
D4 (100) ==> D16 (14) ==> Q16 (3) ==> Q4 (1) ==> Q: -0.07149
move: action Q U P P-Dir N soft-N p-delta p-rel
D4 : -0.028, -0.048, 0.020, 0.048, 0.064, 100 0.1096 0.06127 1.27
D16 : -0.024, -0.043, 0.019, 0.044, 0.059, 96 0.1053 0.06135 1.40 | Resign Threshold: -0.88
-0.0662
D4 (100) ==> D16 (14) ==> Q16 (3) ==> Q4 (1) ==> Q: -0.07149
move: action Q U P P-Dir N soft-N p-delta p-rel
D4 : -0.028, -0.048, 0.020, 0.048, 0.064, 100 0.1096 0.06127 1.27
D16 : -0.024, -0.043, 0.019, 0.044, 0.059, 96 0.1053 0.06135 1.40 | [
"Resign",
"Threshold",
":",
"-",
"0",
".",
"88",
"-",
"0",
".",
"0662",
"D4",
"(",
"100",
")",
"==",
">",
"D16",
"(",
"14",
")",
"==",
">",
"Q16",
"(",
"3",
")",
"==",
">",
"Q4",
"(",
"1",
")",
"==",
">",
"Q",
":",
"-",
"0",
".",
"07149",
"move",
":",
"action",
"Q",
"U",
"P",
"P",
"-",
"Dir",
"N",
"soft",
"-",
"N",
"p",
"-",
"delta",
"p",
"-",
"rel",
"D4",
":",
"-",
"0",
".",
"028",
"-",
"0",
".",
"048",
"0",
".",
"020",
"0",
".",
"048",
"0",
".",
"064",
"100",
"0",
".",
"1096",
"0",
".",
"06127",
"1",
".",
"27",
"D16",
":",
"-",
"0",
".",
"024",
"-",
"0",
".",
"043",
"0",
".",
"019",
"0",
".",
"044",
"0",
".",
"059",
"96",
"0",
".",
"1053",
"0",
".",
"06135",
"1",
".",
"40"
] | def parse_comment_node(comment):
# Example of a comment node. The resign threshold line appears only
# for the first move in the game; it gets preprocessed by extract_game_data
"""
Resign Threshold: -0.88
-0.0662
D4 (100) ==> D16 (14) ==> Q16 (3) ==> Q4 (1) ==> Q: -0.07149
move: action Q U P P-Dir N soft-N p-delta p-rel
D4 : -0.028, -0.048, 0.020, 0.048, 0.064, 100 0.1096 0.06127 1.27
D16 : -0.024, -0.043, 0.019, 0.044, 0.059, 96 0.1053 0.06135 1.40
"""
lines = comment.split('\n')
if lines[0].startswith('Resign'):
lines = lines[1:]
post_Q = float(lines[0])
debug_rows = []
comment_splitter = re.compile(r'[ :,]')
for line in lines[3:]:
if not line:
continue
columns = comment_splitter.split(line)
columns = list(filter(bool, columns))
coord, *other_columns = columns
coord = coords.to_flat(coords.from_gtp(coord))
debug_rows.append(DebugRow(coord, *map(float, other_columns)))
if FLAGS.only_top_move:
break
return post_Q, debug_rows | [
"def",
"parse_comment_node",
"(",
"comment",
")",
":",
"# Example of a comment node. The resign threshold line appears only",
"# for the first move in the game; it gets preprocessed by extract_game_data",
"lines",
"=",
"comment",
".",
"split",
"(",
"'\\n'",
")",
"if",
"lines",
"[",
"0",
"]",
".",
"startswith",
"(",
"'Resign'",
")",
":",
"lines",
"=",
"lines",
"[",
"1",
":",
"]",
"post_Q",
"=",
"float",
"(",
"lines",
"[",
"0",
"]",
")",
"debug_rows",
"=",
"[",
"]",
"comment_splitter",
"=",
"re",
".",
"compile",
"(",
"r'[ :,]'",
")",
"for",
"line",
"in",
"lines",
"[",
"3",
":",
"]",
":",
"if",
"not",
"line",
":",
"continue",
"columns",
"=",
"comment_splitter",
".",
"split",
"(",
"line",
")",
"columns",
"=",
"list",
"(",
"filter",
"(",
"bool",
",",
"columns",
")",
")",
"coord",
",",
"",
"*",
"other_columns",
"=",
"columns",
"coord",
"=",
"coords",
".",
"to_flat",
"(",
"coords",
".",
"from_gtp",
"(",
"coord",
")",
")",
"debug_rows",
".",
"append",
"(",
"DebugRow",
"(",
"coord",
",",
"*",
"map",
"(",
"float",
",",
"other_columns",
")",
")",
")",
"if",
"FLAGS",
".",
"only_top_move",
":",
"break",
"return",
"post_Q",
",",
"debug_rows"
] | https://github.com/tensorflow/minigo/blob/6d89c202cdceaf449aefc3149ab2110d44f1a6a4/oneoffs/prepare_bigquery.py#L192-L221 | |
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | CondTools/SiStrip/python/o2o_helper.py | python | send_mail | (subject, message, send_to, send_from, text_attachments=[]) | Send an email. [send_to] needs to be a list. | Send an email. [send_to] needs to be a list. | [
"Send",
"an",
"email",
".",
"[",
"send_to",
"]",
"needs",
"to",
"be",
"a",
"list",
"."
] | def send_mail(subject, message, send_to, send_from, text_attachments=[]):
'''Send an email. [send_to] needs to be a list.'''
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = send_from
msg['To'] = ','.join(send_to)
msg.attach(MIMEText(message))
for fn in text_attachments:
with open(fn, 'rb') as txtfile:
attachment = MIMEText(txtfile.read())
attachment.add_header('Content-Disposition', 'attachment', filename=os.path.basename(fn))
msg.attach(attachment)
s = smtplib.SMTP('localhost')
s.sendmail(send_from, send_to, msg.as_string())
s.quit() | [
"def",
"send_mail",
"(",
"subject",
",",
"message",
",",
"send_to",
",",
"send_from",
",",
"text_attachments",
"=",
"[",
"]",
")",
":",
"msg",
"=",
"MIMEMultipart",
"(",
")",
"msg",
"[",
"'Subject'",
"]",
"=",
"subject",
"msg",
"[",
"'From'",
"]",
"=",
"send_from",
"msg",
"[",
"'To'",
"]",
"=",
"','",
".",
"join",
"(",
"send_to",
")",
"msg",
".",
"attach",
"(",
"MIMEText",
"(",
"message",
")",
")",
"for",
"fn",
"in",
"text_attachments",
":",
"with",
"open",
"(",
"fn",
",",
"'rb'",
")",
"as",
"txtfile",
":",
"attachment",
"=",
"MIMEText",
"(",
"txtfile",
".",
"read",
"(",
")",
")",
"attachment",
".",
"add_header",
"(",
"'Content-Disposition'",
",",
"'attachment'",
",",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fn",
")",
")",
"msg",
".",
"attach",
"(",
"attachment",
")",
"s",
"=",
"smtplib",
".",
"SMTP",
"(",
"'localhost'",
")",
"s",
".",
"sendmail",
"(",
"send_from",
",",
"send_to",
",",
"msg",
".",
"as_string",
"(",
")",
")",
"s",
".",
"quit",
"(",
")"
] | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/CondTools/SiStrip/python/o2o_helper.py#L131-L147 | ||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/telemetry/third_party/web-page-replay/httpclient.py | python | ReplayHttpArchiveFetch.__init__ | (self, http_archive, real_dns_lookup, inject_script,
use_diff_on_unknown_requests=False,
use_closest_match=False, scramble_images=False) | Initialize ReplayHttpArchiveFetch.
Args:
http_archive: an instance of a HttpArchive
real_dns_lookup: a function that resolves a host to an IP.
inject_script: script string to inject in all pages
use_diff_on_unknown_requests: If True, log unknown requests
with a diff to requests that look similar.
use_closest_match: If True, on replay mode, serve the closest match
in the archive instead of giving a 404. | Initialize ReplayHttpArchiveFetch. | [
"Initialize",
"ReplayHttpArchiveFetch",
"."
] | def __init__(self, http_archive, real_dns_lookup, inject_script,
use_diff_on_unknown_requests=False,
use_closest_match=False, scramble_images=False):
"""Initialize ReplayHttpArchiveFetch.
Args:
http_archive: an instance of a HttpArchive
real_dns_lookup: a function that resolves a host to an IP.
inject_script: script string to inject in all pages
use_diff_on_unknown_requests: If True, log unknown requests
with a diff to requests that look similar.
use_closest_match: If True, on replay mode, serve the closest match
in the archive instead of giving a 404.
"""
self.http_archive = http_archive
self.inject_script = inject_script
self.use_diff_on_unknown_requests = use_diff_on_unknown_requests
self.use_closest_match = use_closest_match
self.scramble_images = scramble_images
self.real_http_fetch = RealHttpFetch(real_dns_lookup) | [
"def",
"__init__",
"(",
"self",
",",
"http_archive",
",",
"real_dns_lookup",
",",
"inject_script",
",",
"use_diff_on_unknown_requests",
"=",
"False",
",",
"use_closest_match",
"=",
"False",
",",
"scramble_images",
"=",
"False",
")",
":",
"self",
".",
"http_archive",
"=",
"http_archive",
"self",
".",
"inject_script",
"=",
"inject_script",
"self",
".",
"use_diff_on_unknown_requests",
"=",
"use_diff_on_unknown_requests",
"self",
".",
"use_closest_match",
"=",
"use_closest_match",
"self",
".",
"scramble_images",
"=",
"scramble_images",
"self",
".",
"real_http_fetch",
"=",
"RealHttpFetch",
"(",
"real_dns_lookup",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/third_party/web-page-replay/httpclient.py#L409-L428 | ||
chromiumembedded/cef | 80caf947f3fe2210e5344713c5281d8af9bdc295 | tools/cef_parser.py | python | obj_class.get_attribs | (self) | return self.attribs | Return all attributes as a dictionary. | Return all attributes as a dictionary. | [
"Return",
"all",
"attributes",
"as",
"a",
"dictionary",
"."
] | def get_attribs(self):
""" Return all attributes as a dictionary. """
return self.attribs | [
"def",
"get_attribs",
"(",
"self",
")",
":",
"return",
"self",
".",
"attribs"
] | https://github.com/chromiumembedded/cef/blob/80caf947f3fe2210e5344713c5281d8af9bdc295/tools/cef_parser.py#L968-L970 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib-tk/turtle.py | python | RawTurtle.end_fill | (self) | Fill the shape drawn after the call begin_fill().
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.end_fill() | Fill the shape drawn after the call begin_fill(). | [
"Fill",
"the",
"shape",
"drawn",
"after",
"the",
"call",
"begin_fill",
"()",
"."
] | def end_fill(self):
"""Fill the shape drawn after the call begin_fill().
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.left(90)
>>> turtle.forward(100)
>>> turtle.end_fill()
"""
self.fill(False) | [
"def",
"end_fill",
"(",
"self",
")",
":",
"self",
".",
"fill",
"(",
"False",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib-tk/turtle.py#L3197-L3213 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/inspect.py | python | _signature_get_partial | (wrapped_sig, partial, extra_args=()) | return wrapped_sig.replace(parameters=new_params.values()) | Private helper to calculate how 'wrapped_sig' signature will
look like after applying a 'functools.partial' object (or alike)
on it. | Private helper to calculate how 'wrapped_sig' signature will
look like after applying a 'functools.partial' object (or alike)
on it. | [
"Private",
"helper",
"to",
"calculate",
"how",
"wrapped_sig",
"signature",
"will",
"look",
"like",
"after",
"applying",
"a",
"functools",
".",
"partial",
"object",
"(",
"or",
"alike",
")",
"on",
"it",
"."
] | def _signature_get_partial(wrapped_sig, partial, extra_args=()):
"""Private helper to calculate how 'wrapped_sig' signature will
look like after applying a 'functools.partial' object (or alike)
on it.
"""
old_params = wrapped_sig.parameters
new_params = OrderedDict(old_params.items())
partial_args = partial.args or ()
partial_keywords = partial.keywords or {}
if extra_args:
partial_args = extra_args + partial_args
try:
ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(partial)
raise ValueError(msg) from ex
transform_to_kwonly = False
for param_name, param in old_params.items():
try:
arg_value = ba.arguments[param_name]
except KeyError:
pass
else:
if param.kind is _POSITIONAL_ONLY:
# If positional-only parameter is bound by partial,
# it effectively disappears from the signature
new_params.pop(param_name)
continue
if param.kind is _POSITIONAL_OR_KEYWORD:
if param_name in partial_keywords:
# This means that this parameter, and all parameters
# after it should be keyword-only (and var-positional
# should be removed). Here's why. Consider the following
# function:
# foo(a, b, *args, c):
# pass
#
# "partial(foo, a='spam')" will have the following
# signature: "(*, a='spam', b, c)". Because attempting
# to call that partial with "(10, 20)" arguments will
# raise a TypeError, saying that "a" argument received
# multiple values.
transform_to_kwonly = True
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
else:
# was passed as a positional argument
new_params.pop(param.name)
continue
if param.kind is _KEYWORD_ONLY:
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
if transform_to_kwonly:
assert param.kind is not _POSITIONAL_ONLY
if param.kind is _POSITIONAL_OR_KEYWORD:
new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)
new_params[param_name] = new_param
new_params.move_to_end(param_name)
elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):
new_params.move_to_end(param_name)
elif param.kind is _VAR_POSITIONAL:
new_params.pop(param.name)
return wrapped_sig.replace(parameters=new_params.values()) | [
"def",
"_signature_get_partial",
"(",
"wrapped_sig",
",",
"partial",
",",
"extra_args",
"=",
"(",
")",
")",
":",
"old_params",
"=",
"wrapped_sig",
".",
"parameters",
"new_params",
"=",
"OrderedDict",
"(",
"old_params",
".",
"items",
"(",
")",
")",
"partial_args",
"=",
"partial",
".",
"args",
"or",
"(",
")",
"partial_keywords",
"=",
"partial",
".",
"keywords",
"or",
"{",
"}",
"if",
"extra_args",
":",
"partial_args",
"=",
"extra_args",
"+",
"partial_args",
"try",
":",
"ba",
"=",
"wrapped_sig",
".",
"bind_partial",
"(",
"*",
"partial_args",
",",
"*",
"*",
"partial_keywords",
")",
"except",
"TypeError",
"as",
"ex",
":",
"msg",
"=",
"'partial object {!r} has incorrect arguments'",
".",
"format",
"(",
"partial",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"from",
"ex",
"transform_to_kwonly",
"=",
"False",
"for",
"param_name",
",",
"param",
"in",
"old_params",
".",
"items",
"(",
")",
":",
"try",
":",
"arg_value",
"=",
"ba",
".",
"arguments",
"[",
"param_name",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"if",
"param",
".",
"kind",
"is",
"_POSITIONAL_ONLY",
":",
"# If positional-only parameter is bound by partial,",
"# it effectively disappears from the signature",
"new_params",
".",
"pop",
"(",
"param_name",
")",
"continue",
"if",
"param",
".",
"kind",
"is",
"_POSITIONAL_OR_KEYWORD",
":",
"if",
"param_name",
"in",
"partial_keywords",
":",
"# This means that this parameter, and all parameters",
"# after it should be keyword-only (and var-positional",
"# should be removed). Here's why. Consider the following",
"# function:",
"# foo(a, b, *args, c):",
"# pass",
"#",
"# \"partial(foo, a='spam')\" will have the following",
"# signature: \"(*, a='spam', b, c)\". Because attempting",
"# to call that partial with \"(10, 20)\" arguments will",
"# raise a TypeError, saying that \"a\" argument received",
"# multiple values.",
"transform_to_kwonly",
"=",
"True",
"# Set the new default value",
"new_params",
"[",
"param_name",
"]",
"=",
"param",
".",
"replace",
"(",
"default",
"=",
"arg_value",
")",
"else",
":",
"# was passed as a positional argument",
"new_params",
".",
"pop",
"(",
"param",
".",
"name",
")",
"continue",
"if",
"param",
".",
"kind",
"is",
"_KEYWORD_ONLY",
":",
"# Set the new default value",
"new_params",
"[",
"param_name",
"]",
"=",
"param",
".",
"replace",
"(",
"default",
"=",
"arg_value",
")",
"if",
"transform_to_kwonly",
":",
"assert",
"param",
".",
"kind",
"is",
"not",
"_POSITIONAL_ONLY",
"if",
"param",
".",
"kind",
"is",
"_POSITIONAL_OR_KEYWORD",
":",
"new_param",
"=",
"new_params",
"[",
"param_name",
"]",
".",
"replace",
"(",
"kind",
"=",
"_KEYWORD_ONLY",
")",
"new_params",
"[",
"param_name",
"]",
"=",
"new_param",
"new_params",
".",
"move_to_end",
"(",
"param_name",
")",
"elif",
"param",
".",
"kind",
"in",
"(",
"_KEYWORD_ONLY",
",",
"_VAR_KEYWORD",
")",
":",
"new_params",
".",
"move_to_end",
"(",
"param_name",
")",
"elif",
"param",
".",
"kind",
"is",
"_VAR_POSITIONAL",
":",
"new_params",
".",
"pop",
"(",
"param",
".",
"name",
")",
"return",
"wrapped_sig",
".",
"replace",
"(",
"parameters",
"=",
"new_params",
".",
"values",
"(",
")",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/inspect.py#L1723-L1796 | |
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/win/toolchain/toolchain.py | python | ExtractIso | (iso_path) | return target_path | Use 7zip to extract the contents of the given .iso (or self-extracting
.exe). | Use 7zip to extract the contents of the given .iso (or self-extracting
.exe). | [
"Use",
"7zip",
"to",
"extract",
"the",
"contents",
"of",
"the",
"given",
".",
"iso",
"(",
"or",
"self",
"-",
"extracting",
".",
"exe",
")",
"."
] | def ExtractIso(iso_path):
"""Use 7zip to extract the contents of the given .iso (or self-extracting
.exe)."""
target_path = TempDir()
sys.stdout.write('Extracting %s...\n' % iso_path)
sys.stdout.flush()
# TODO(scottmg): Do this (and exe) manually with python code.
# Note that at the beginning of main() we set the working directory to 7z's
# location.
RunOrDie('7z x "%s" -y "-o%s" >nul' % (iso_path, target_path))
return target_path | [
"def",
"ExtractIso",
"(",
"iso_path",
")",
":",
"target_path",
"=",
"TempDir",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'Extracting %s...\\n'",
"%",
"iso_path",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"# TODO(scottmg): Do this (and exe) manually with python code.",
"# Note that at the beginning of main() we set the working directory to 7z's",
"# location.",
"RunOrDie",
"(",
"'7z x \"%s\" -y \"-o%s\" >nul'",
"%",
"(",
"iso_path",
",",
"target_path",
")",
")",
"return",
"target_path"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/win/toolchain/toolchain.py#L224-L234 | |
eventql/eventql | 7ca0dbb2e683b525620ea30dc40540a22d5eb227 | deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/mac_tool.py | python | MacTool.ExecCopyInfoPlist | (self, source, dest) | Copies the |source| Info.plist to the destination directory |dest|. | Copies the |source| Info.plist to the destination directory |dest|. | [
"Copies",
"the",
"|source|",
"Info",
".",
"plist",
"to",
"the",
"destination",
"directory",
"|dest|",
"."
] | def ExecCopyInfoPlist(self, source, dest):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Go through all the environment variables and replace them as variables in
# the file.
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
lines = string.replace(lines, evar, os.environ[key])
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest) | [
"def",
"ExecCopyInfoPlist",
"(",
"self",
",",
"source",
",",
"dest",
")",
":",
"# Read the source Info.plist into memory.",
"fd",
"=",
"open",
"(",
"source",
",",
"'r'",
")",
"lines",
"=",
"fd",
".",
"read",
"(",
")",
"fd",
".",
"close",
"(",
")",
"# Go through all the environment variables and replace them as variables in",
"# the file.",
"for",
"key",
"in",
"os",
".",
"environ",
":",
"if",
"key",
".",
"startswith",
"(",
"'_'",
")",
":",
"continue",
"evar",
"=",
"'${%s}'",
"%",
"key",
"lines",
"=",
"string",
".",
"replace",
"(",
"lines",
",",
"evar",
",",
"os",
".",
"environ",
"[",
"key",
"]",
")",
"# Write out the file with variables replaced.",
"fd",
"=",
"open",
"(",
"dest",
",",
"'w'",
")",
"fd",
".",
"write",
"(",
"lines",
")",
"fd",
".",
"close",
"(",
")",
"# Now write out PkgInfo file now that the Info.plist file has been",
"# \"compiled\".",
"self",
".",
"_WritePkgInfo",
"(",
"dest",
")"
] | https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/mac_tool.py#L108-L130 | ||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/multiprocessing/util.py | python | Finalize.still_active | (self) | return self._key in _finalizer_registry | Return whether this finalizer is still waiting to invoke callback | Return whether this finalizer is still waiting to invoke callback | [
"Return",
"whether",
"this",
"finalizer",
"is",
"still",
"waiting",
"to",
"invoke",
"callback"
] | def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry | [
"def",
"still_active",
"(",
"self",
")",
":",
"return",
"self",
".",
"_key",
"in",
"_finalizer_registry"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/multiprocessing/util.py#L218-L222 | |
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib-tk/Tkinter.py | python | Text.window_cget | (self, index, option) | return self.tk.call(self._w, 'window', 'cget', index, option) | Return the value of OPTION of an embedded window at INDEX. | Return the value of OPTION of an embedded window at INDEX. | [
"Return",
"the",
"value",
"of",
"OPTION",
"of",
"an",
"embedded",
"window",
"at",
"INDEX",
"."
] | def window_cget(self, index, option):
"""Return the value of OPTION of an embedded window at INDEX."""
if option[:1] != '-':
option = '-' + option
if option[-1:] == '_':
option = option[:-1]
return self.tk.call(self._w, 'window', 'cget', index, option) | [
"def",
"window_cget",
"(",
"self",
",",
"index",
",",
"option",
")",
":",
"if",
"option",
"[",
":",
"1",
"]",
"!=",
"'-'",
":",
"option",
"=",
"'-'",
"+",
"option",
"if",
"option",
"[",
"-",
"1",
":",
"]",
"==",
"'_'",
":",
"option",
"=",
"option",
"[",
":",
"-",
"1",
"]",
"return",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"_w",
",",
"'window'",
",",
"'cget'",
",",
"index",
",",
"option",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib-tk/Tkinter.py#L3166-L3172 | |
lighttransport/nanort | 74063967336311f54ede5dffdfa242123825033b | deps/cpplint.py | python | CheckOperatorSpacing | (filename, clean_lines, linenum, error) | Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Checks for horizontal spacing around operators. | [
"Checks",
"for",
"horizontal",
"spacing",
"around",
"operators",
"."
] | def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1)) | [
"def",
"CheckOperatorSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# Don't try to do spacing checks for operator methods. Do this by",
"# replacing the troublesome characters with something else,",
"# preserving column position for all other characters.",
"#",
"# The replacement is done repeatedly to avoid false positives from",
"# operators that call operators.",
"while",
"True",
":",
"match",
"=",
"Match",
"(",
"r'^(.*\\boperator\\b)(\\S+)(\\s*\\(.*)$'",
",",
"line",
")",
"if",
"match",
":",
"line",
"=",
"match",
".",
"group",
"(",
"1",
")",
"+",
"(",
"'_'",
"*",
"len",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"+",
"match",
".",
"group",
"(",
"3",
")",
"else",
":",
"break",
"# We allow no-spaces around = within an if: \"if ( (a=Foo()) == 0 )\".",
"# Otherwise not. Note we only check for non-spaces on *both* sides;",
"# sometimes people put non-spaces on one side when aligning ='s among",
"# many lines (not that this is behavior that I approve of...)",
"if",
"(",
"(",
"Search",
"(",
"r'[\\w.]='",
",",
"line",
")",
"or",
"Search",
"(",
"r'=[\\w.]'",
",",
"line",
")",
")",
"and",
"not",
"Search",
"(",
"r'\\b(if|while|for) '",
",",
"line",
")",
"# Operators taken from [lex.operators] in C++11 standard.",
"and",
"not",
"Search",
"(",
"r'(>=|<=|==|!=|&=|\\^=|\\|=|\\+=|\\*=|\\/=|\\%=)'",
",",
"line",
")",
"and",
"not",
"Search",
"(",
"r'operator='",
",",
"line",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"4",
",",
"'Missing spaces around ='",
")",
"# It's ok not to have spaces around binary operators like + - * /, but if",
"# there's too little whitespace, we get concerned. It's hard to tell,",
"# though, so we punt on this one for now. TODO.",
"# You should always have whitespace around binary operators.",
"#",
"# Check <= and >= first to avoid false positives with < and >, then",
"# check non-include lines for spacing around < and >.",
"#",
"# If the operator is followed by a comma, assume it's be used in a",
"# macro context and don't do any checks. This avoids false",
"# positives.",
"#",
"# Note that && is not included here. Those are checked separately",
"# in CheckRValueReference",
"match",
"=",
"Search",
"(",
"r'[^<>=!\\s](==|!=|<=|>=|\\|\\|)[^<>=!\\s,;\\)]'",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around %s'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")",
"elif",
"not",
"Match",
"(",
"r'#.*include'",
",",
"line",
")",
":",
"# Look for < that is not surrounded by spaces. This is only",
"# triggered if both sides are missing spaces, even though",
"# technically should should flag if at least one side is missing a",
"# space. This is done to avoid some false positives with shifts.",
"match",
"=",
"Match",
"(",
"r'^(.*[^\\s<])<[^\\s=<,]'",
",",
"line",
")",
"if",
"match",
":",
"(",
"_",
",",
"_",
",",
"end_pos",
")",
"=",
"CloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"len",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"if",
"end_pos",
"<=",
"-",
"1",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around <'",
")",
"# Look for > that is not surrounded by spaces. Similar to the",
"# above, we only trigger if both sides are missing spaces to avoid",
"# false positives with shifts.",
"match",
"=",
"Match",
"(",
"r'^(.*[^-\\s>])>[^\\s=>,]'",
",",
"line",
")",
"if",
"match",
":",
"(",
"_",
",",
"_",
",",
"start_pos",
")",
"=",
"ReverseCloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"len",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"if",
"start_pos",
"<=",
"-",
"1",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around >'",
")",
"# We allow no-spaces around << when used like this: 10<<20, but",
"# not otherwise (particularly, not when used as streams)",
"#",
"# We also allow operators following an opening parenthesis, since",
"# those tend to be macros that deal with operators.",
"match",
"=",
"Search",
"(",
"r'(operator|[^\\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\\s,=<])'",
",",
"line",
")",
"if",
"(",
"match",
"and",
"not",
"(",
"match",
".",
"group",
"(",
"1",
")",
".",
"isdigit",
"(",
")",
"and",
"match",
".",
"group",
"(",
"2",
")",
".",
"isdigit",
"(",
")",
")",
"and",
"not",
"(",
"match",
".",
"group",
"(",
"1",
")",
"==",
"'operator'",
"and",
"match",
".",
"group",
"(",
"2",
")",
"==",
"';'",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around <<'",
")",
"# We allow no-spaces around >> for almost anything. This is because",
"# C++11 allows \">>\" to close nested templates, which accounts for",
"# most cases when \">>\" is not followed by a space.",
"#",
"# We still warn on \">>\" followed by alpha character, because that is",
"# likely due to \">>\" being used for right shifts, e.g.:",
"# value >> alpha",
"#",
"# When \">>\" is used to close templates, the alphanumeric letter that",
"# follows would be part of an identifier, and there should still be",
"# a space separating the template type and the identifier.",
"# type<type<type>> alpha",
"match",
"=",
"Search",
"(",
"r'>>[a-zA-Z_]'",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around >>'",
")",
"# There shouldn't be space around unary operators",
"match",
"=",
"Search",
"(",
"r'(!\\s|~\\s|[\\s]--[\\s;]|[\\s]\\+\\+[\\s;])'",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"4",
",",
"'Extra space for operator %s'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")"
] | https://github.com/lighttransport/nanort/blob/74063967336311f54ede5dffdfa242123825033b/deps/cpplint.py#L3127-L3239 | ||
facebook/ThreatExchange | 31914a51820c73c8a0daffe62ccca29a6e3d359e | hasher-matcher-actioner/hmalib/common/models/models_base.py | python | DynamoDBItem.write_to_table_if_not_found | (self, table: Table) | return True | Write record to DDB if the PK/SK combination does not exist.
Returns:
* True when record was written (did not exist)
* False when record could not be written (PK/SK combo existed) | Write record to DDB if the PK/SK combination does not exist. | [
"Write",
"record",
"to",
"DDB",
"if",
"the",
"PK",
"/",
"SK",
"combination",
"does",
"not",
"exist",
"."
] | def write_to_table_if_not_found(self, table: Table) -> bool:
"""
Write record to DDB if the PK/SK combination does not exist.
Returns:
* True when record was written (did not exist)
* False when record could not be written (PK/SK combo existed)
"""
try:
table.put_item(
Item=self.to_dynamodb_item(),
ConditionExpression="attribute_not_exists(PK) AND attribute_not_exists(SK)",
)
except ClientError as client_error:
# boto3 exception handling https://imgflip.com/i/5f5zfj
if (
client_error.response.get("Error", {"Code", "Unknown"}).get(
"Code", "Unknown"
)
== "ConditionalCheckFailedException"
):
return False
else:
raise client_error
return True | [
"def",
"write_to_table_if_not_found",
"(",
"self",
",",
"table",
":",
"Table",
")",
"->",
"bool",
":",
"try",
":",
"table",
".",
"put_item",
"(",
"Item",
"=",
"self",
".",
"to_dynamodb_item",
"(",
")",
",",
"ConditionExpression",
"=",
"\"attribute_not_exists(PK) AND attribute_not_exists(SK)\"",
",",
")",
"except",
"ClientError",
"as",
"client_error",
":",
"# boto3 exception handling https://imgflip.com/i/5f5zfj",
"if",
"(",
"client_error",
".",
"response",
".",
"get",
"(",
"\"Error\"",
",",
"{",
"\"Code\"",
",",
"\"Unknown\"",
"}",
")",
".",
"get",
"(",
"\"Code\"",
",",
"\"Unknown\"",
")",
"==",
"\"ConditionalCheckFailedException\"",
")",
":",
"return",
"False",
"else",
":",
"raise",
"client_error",
"return",
"True"
] | https://github.com/facebook/ThreatExchange/blob/31914a51820c73c8a0daffe62ccca29a6e3d359e/hasher-matcher-actioner/hmalib/common/models/models_base.py#L22-L46 | |
tfwu/FaceDetection-ConvNet-3D | f9251c48eb40c5aec8fba7455115c355466555be | python/mxnet/ndarray.py | python | _make_ndarray_function | (handle) | return ret_function | Create a NDArray function from the FunctionHandle. | Create a NDArray function from the FunctionHandle. | [
"Create",
"a",
"NDArray",
"function",
"from",
"the",
"FunctionHandle",
"."
] | def _make_ndarray_function(handle):
"""Create a NDArray function from the FunctionHandle."""
NDARRAY_ARG_BEFORE_SCALAR = 1
ACCEPT_EMPTY_MUTATE_TARGET = 1 << 2
# Get the property of NDArray
n_used_vars = mx_uint()
n_scalars = mx_uint()
n_mutate_vars = mx_uint()
type_mask = ctypes.c_int()
check_call(_LIB.MXFuncDescribe(
handle,
ctypes.byref(n_used_vars),
ctypes.byref(n_scalars),
ctypes.byref(n_mutate_vars),
ctypes.byref(type_mask)))
n_mutate_vars = n_mutate_vars.value
n_used_vars = n_used_vars.value
n_scalars = n_scalars.value
type_mask = type_mask.value
accept_empty_mutate = (type_mask & ACCEPT_EMPTY_MUTATE_TARGET) != 0
# infer type of the function
if (type_mask & NDARRAY_ARG_BEFORE_SCALAR) != 0:
use_vars_range = range(0, n_used_vars)
scalar_range = range(n_used_vars, n_used_vars + n_scalars)
else:
scalar_range = range(0, n_scalars)
use_vars_range = range(n_scalars, n_used_vars + n_scalars)
# Get the information from the function
name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXFuncGetInfo(
handle, ctypes.byref(name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(ret_type)))
func_name = py_str(name.value)
param_str = ctypes2docstring(num_args, arg_names, arg_types, arg_descs)
doc_str = ('%s\n\n' +
'%s\n' +
'out : NDArray, optional\n' +
' The output NDArray to hold the result.\n\n'+
'Returns\n' +
'-------\n' +
'out : NDArray\n'+
' The output of binary function.')
doc_str = doc_str % (py_str(desc.value), param_str)
# Definition of internal functions.
def binary_ndarray_function(lhs, rhs, out=None):
"""Internal binary function
"""
if out:
if isinstance(out, NDArray) == False:
raise TypeError('out must be NDArray')
if not out.writable:
raise TypeError('out must be writable')
else:
if not accept_empty_mutate:
raise TypeError('argument out is required to call %s' % func_name)
out = NDArray(_new_empty_handle())
check_call(_LIB.MXFuncInvokeEx(handle,
c_array(NDArrayHandle, (lhs.handle, rhs.handle)),
c_array(mx_float, ()),
c_array(NDArrayHandle, (out.handle,)),
ctypes.c_int(0),
c_array(ctypes.c_char_p, []),
c_array(ctypes.c_char_p, [])))
return out
def unary_ndarray_function(src, out=None):
"""internal NDArray function"""
if out:
if isinstance(out, NDArray) == False:
raise TypeError('out must be NDArray')
if not out.writable:
raise TypeError('out must be writable')
else:
if not accept_empty_mutate:
raise TypeError('argument out is required to call %s' % func_name)
out = NDArray(_new_empty_handle())
check_call(_LIB.MXFuncInvokeEx( \
handle, \
c_array(NDArrayHandle, (src.handle,)), \
c_array(mx_float, ()), \
c_array(NDArrayHandle, (out.handle,)), \
ctypes.c_int(0), \
c_array(ctypes.c_char_p, []), \
c_array(ctypes.c_char_p, [])))
return out
def generic_ndarray_function(*args, **kwargs):
"""Invoke this function by passing in parameters
Parameters
----------
*args
Positional arguments of input scalars and NDArray
out : NDArray or tuple of NDArray, optional
Output NDArray, used to hold the output result.
Returns
-------
out : NDArray
The result NDArray(tuple) of result of computation.
"""
if 'out' in kwargs:
mutate_vars = kwargs['out']
if isinstance(mutate_vars, NDArray):
mutate_vars = (mutate_vars,)
if len(mutate_vars) != n_mutate_vars:
raise TypeError('expect %d out in %s', n_mutate_vars, func_name)
del kwargs['out']
else:
if accept_empty_mutate:
mutate_vars = tuple(
NDArray(_new_empty_handle()) for i in range(n_mutate_vars))
else:
raise TypeError('argument out is required to call %s' % func_name)
check_call(_LIB.MXFuncInvokeEx( \
handle, \
c_array(NDArrayHandle, [args[i].handle for i in use_vars_range]), \
c_array(mx_float, [args[i] for i in scalar_range]), \
c_array(NDArrayHandle, [v.handle for v in mutate_vars]), \
ctypes.c_int(len(kwargs)), \
c_array(ctypes.c_char_p, kwargs.keys()), \
c_array(ctypes.c_char_p, [str(i) for i in kwargs.values()])))
if n_mutate_vars == 1:
return mutate_vars[0]
else:
return mutate_vars
# End of function declaration
if n_mutate_vars == 1 and n_used_vars == 2 and n_scalars == 0:
ret_function = binary_ndarray_function
elif n_mutate_vars == 1 and n_used_vars == 1 and n_scalars == 0:
ret_function = unary_ndarray_function
else:
ret_function = generic_ndarray_function
ret_function.__name__ = func_name
ret_function.__doc__ = doc_str
return ret_function | [
"def",
"_make_ndarray_function",
"(",
"handle",
")",
":",
"NDARRAY_ARG_BEFORE_SCALAR",
"=",
"1",
"ACCEPT_EMPTY_MUTATE_TARGET",
"=",
"1",
"<<",
"2",
"# Get the property of NDArray",
"n_used_vars",
"=",
"mx_uint",
"(",
")",
"n_scalars",
"=",
"mx_uint",
"(",
")",
"n_mutate_vars",
"=",
"mx_uint",
"(",
")",
"type_mask",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXFuncDescribe",
"(",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"n_used_vars",
")",
",",
"ctypes",
".",
"byref",
"(",
"n_scalars",
")",
",",
"ctypes",
".",
"byref",
"(",
"n_mutate_vars",
")",
",",
"ctypes",
".",
"byref",
"(",
"type_mask",
")",
")",
")",
"n_mutate_vars",
"=",
"n_mutate_vars",
".",
"value",
"n_used_vars",
"=",
"n_used_vars",
".",
"value",
"n_scalars",
"=",
"n_scalars",
".",
"value",
"type_mask",
"=",
"type_mask",
".",
"value",
"accept_empty_mutate",
"=",
"(",
"type_mask",
"&",
"ACCEPT_EMPTY_MUTATE_TARGET",
")",
"!=",
"0",
"# infer type of the function",
"if",
"(",
"type_mask",
"&",
"NDARRAY_ARG_BEFORE_SCALAR",
")",
"!=",
"0",
":",
"use_vars_range",
"=",
"range",
"(",
"0",
",",
"n_used_vars",
")",
"scalar_range",
"=",
"range",
"(",
"n_used_vars",
",",
"n_used_vars",
"+",
"n_scalars",
")",
"else",
":",
"scalar_range",
"=",
"range",
"(",
"0",
",",
"n_scalars",
")",
"use_vars_range",
"=",
"range",
"(",
"n_scalars",
",",
"n_used_vars",
"+",
"n_scalars",
")",
"# Get the information from the function",
"name",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"desc",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"num_args",
"=",
"mx_uint",
"(",
")",
"arg_names",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"arg_types",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"arg_descs",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"ret_type",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXFuncGetInfo",
"(",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"name",
")",
",",
"ctypes",
".",
"byref",
"(",
"desc",
")",
",",
"ctypes",
".",
"byref",
"(",
"num_args",
")",
",",
"ctypes",
".",
"byref",
"(",
"arg_names",
")",
",",
"ctypes",
".",
"byref",
"(",
"arg_types",
")",
",",
"ctypes",
".",
"byref",
"(",
"arg_descs",
")",
",",
"ctypes",
".",
"byref",
"(",
"ret_type",
")",
")",
")",
"func_name",
"=",
"py_str",
"(",
"name",
".",
"value",
")",
"param_str",
"=",
"ctypes2docstring",
"(",
"num_args",
",",
"arg_names",
",",
"arg_types",
",",
"arg_descs",
")",
"doc_str",
"=",
"(",
"'%s\\n\\n'",
"+",
"'%s\\n'",
"+",
"'out : NDArray, optional\\n'",
"+",
"' The output NDArray to hold the result.\\n\\n'",
"+",
"'Returns\\n'",
"+",
"'-------\\n'",
"+",
"'out : NDArray\\n'",
"+",
"' The output of binary function.'",
")",
"doc_str",
"=",
"doc_str",
"%",
"(",
"py_str",
"(",
"desc",
".",
"value",
")",
",",
"param_str",
")",
"# Definition of internal functions.",
"def",
"binary_ndarray_function",
"(",
"lhs",
",",
"rhs",
",",
"out",
"=",
"None",
")",
":",
"\"\"\"Internal binary function\n \"\"\"",
"if",
"out",
":",
"if",
"isinstance",
"(",
"out",
",",
"NDArray",
")",
"==",
"False",
":",
"raise",
"TypeError",
"(",
"'out must be NDArray'",
")",
"if",
"not",
"out",
".",
"writable",
":",
"raise",
"TypeError",
"(",
"'out must be writable'",
")",
"else",
":",
"if",
"not",
"accept_empty_mutate",
":",
"raise",
"TypeError",
"(",
"'argument out is required to call %s'",
"%",
"func_name",
")",
"out",
"=",
"NDArray",
"(",
"_new_empty_handle",
"(",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXFuncInvokeEx",
"(",
"handle",
",",
"c_array",
"(",
"NDArrayHandle",
",",
"(",
"lhs",
".",
"handle",
",",
"rhs",
".",
"handle",
")",
")",
",",
"c_array",
"(",
"mx_float",
",",
"(",
")",
")",
",",
"c_array",
"(",
"NDArrayHandle",
",",
"(",
"out",
".",
"handle",
",",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"0",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_char_p",
",",
"[",
"]",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_char_p",
",",
"[",
"]",
")",
")",
")",
"return",
"out",
"def",
"unary_ndarray_function",
"(",
"src",
",",
"out",
"=",
"None",
")",
":",
"\"\"\"internal NDArray function\"\"\"",
"if",
"out",
":",
"if",
"isinstance",
"(",
"out",
",",
"NDArray",
")",
"==",
"False",
":",
"raise",
"TypeError",
"(",
"'out must be NDArray'",
")",
"if",
"not",
"out",
".",
"writable",
":",
"raise",
"TypeError",
"(",
"'out must be writable'",
")",
"else",
":",
"if",
"not",
"accept_empty_mutate",
":",
"raise",
"TypeError",
"(",
"'argument out is required to call %s'",
"%",
"func_name",
")",
"out",
"=",
"NDArray",
"(",
"_new_empty_handle",
"(",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXFuncInvokeEx",
"(",
"handle",
",",
"c_array",
"(",
"NDArrayHandle",
",",
"(",
"src",
".",
"handle",
",",
")",
")",
",",
"c_array",
"(",
"mx_float",
",",
"(",
")",
")",
",",
"c_array",
"(",
"NDArrayHandle",
",",
"(",
"out",
".",
"handle",
",",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"0",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_char_p",
",",
"[",
"]",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_char_p",
",",
"[",
"]",
")",
")",
")",
"return",
"out",
"def",
"generic_ndarray_function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Invoke this function by passing in parameters\n\n Parameters\n ----------\n *args\n Positional arguments of input scalars and NDArray\n out : NDArray or tuple of NDArray, optional\n Output NDArray, used to hold the output result.\n\n Returns\n -------\n out : NDArray\n The result NDArray(tuple) of result of computation.\n \"\"\"",
"if",
"'out'",
"in",
"kwargs",
":",
"mutate_vars",
"=",
"kwargs",
"[",
"'out'",
"]",
"if",
"isinstance",
"(",
"mutate_vars",
",",
"NDArray",
")",
":",
"mutate_vars",
"=",
"(",
"mutate_vars",
",",
")",
"if",
"len",
"(",
"mutate_vars",
")",
"!=",
"n_mutate_vars",
":",
"raise",
"TypeError",
"(",
"'expect %d out in %s'",
",",
"n_mutate_vars",
",",
"func_name",
")",
"del",
"kwargs",
"[",
"'out'",
"]",
"else",
":",
"if",
"accept_empty_mutate",
":",
"mutate_vars",
"=",
"tuple",
"(",
"NDArray",
"(",
"_new_empty_handle",
"(",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n_mutate_vars",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'argument out is required to call %s'",
"%",
"func_name",
")",
"check_call",
"(",
"_LIB",
".",
"MXFuncInvokeEx",
"(",
"handle",
",",
"c_array",
"(",
"NDArrayHandle",
",",
"[",
"args",
"[",
"i",
"]",
".",
"handle",
"for",
"i",
"in",
"use_vars_range",
"]",
")",
",",
"c_array",
"(",
"mx_float",
",",
"[",
"args",
"[",
"i",
"]",
"for",
"i",
"in",
"scalar_range",
"]",
")",
",",
"c_array",
"(",
"NDArrayHandle",
",",
"[",
"v",
".",
"handle",
"for",
"v",
"in",
"mutate_vars",
"]",
")",
",",
"ctypes",
".",
"c_int",
"(",
"len",
"(",
"kwargs",
")",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_char_p",
",",
"kwargs",
".",
"keys",
"(",
")",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_char_p",
",",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"kwargs",
".",
"values",
"(",
")",
"]",
")",
")",
")",
"if",
"n_mutate_vars",
"==",
"1",
":",
"return",
"mutate_vars",
"[",
"0",
"]",
"else",
":",
"return",
"mutate_vars",
"# End of function declaration",
"if",
"n_mutate_vars",
"==",
"1",
"and",
"n_used_vars",
"==",
"2",
"and",
"n_scalars",
"==",
"0",
":",
"ret_function",
"=",
"binary_ndarray_function",
"elif",
"n_mutate_vars",
"==",
"1",
"and",
"n_used_vars",
"==",
"1",
"and",
"n_scalars",
"==",
"0",
":",
"ret_function",
"=",
"unary_ndarray_function",
"else",
":",
"ret_function",
"=",
"generic_ndarray_function",
"ret_function",
".",
"__name__",
"=",
"func_name",
"ret_function",
".",
"__doc__",
"=",
"doc_str",
"return",
"ret_function"
] | https://github.com/tfwu/FaceDetection-ConvNet-3D/blob/f9251c48eb40c5aec8fba7455115c355466555be/python/mxnet/ndarray.py#L930-L1078 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/propgrid.py | python | PGChoices.GetId | (*args, **kwargs) | return _propgrid.PGChoices_GetId(*args, **kwargs) | GetId(self) -> PGChoicesId | GetId(self) -> PGChoicesId | [
"GetId",
"(",
"self",
")",
"-",
">",
"PGChoicesId"
] | def GetId(*args, **kwargs):
"""GetId(self) -> PGChoicesId"""
return _propgrid.PGChoices_GetId(*args, **kwargs) | [
"def",
"GetId",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PGChoices_GetId",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/propgrid.py#L267-L269 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/docs/bcdoc/docstringparser.py | python | LineItemNode._lstrip | (self, node) | return False | Traverses the tree, stripping out whitespace until text data is found
:param node: The node to strip
:return: True if non-whitespace data was found, False otherwise | Traverses the tree, stripping out whitespace until text data is found
:param node: The node to strip
:return: True if non-whitespace data was found, False otherwise | [
"Traverses",
"the",
"tree",
"stripping",
"out",
"whitespace",
"until",
"text",
"data",
"is",
"found",
":",
"param",
"node",
":",
"The",
"node",
"to",
"strip",
":",
"return",
":",
"True",
"if",
"non",
"-",
"whitespace",
"data",
"was",
"found",
"False",
"otherwise"
] | def _lstrip(self, node):
"""
Traverses the tree, stripping out whitespace until text data is found
:param node: The node to strip
:return: True if non-whitespace data was found, False otherwise
"""
for child in node.children:
if isinstance(child, DataNode):
child.lstrip()
if child.data:
return True
else:
found = self._lstrip(child)
if found:
return True
return False | [
"def",
"_lstrip",
"(",
"self",
",",
"node",
")",
":",
"for",
"child",
"in",
"node",
".",
"children",
":",
"if",
"isinstance",
"(",
"child",
",",
"DataNode",
")",
":",
"child",
".",
"lstrip",
"(",
")",
"if",
"child",
".",
"data",
":",
"return",
"True",
"else",
":",
"found",
"=",
"self",
".",
"_lstrip",
"(",
"child",
")",
"if",
"found",
":",
"return",
"True",
"return",
"False"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/docs/bcdoc/docstringparser.py#L154-L170 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/ftplib.py | python | FTP.connect | (self, host='', port=0, timeout=-999, source_address=None) | return self.welcome | Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
- timeout: the timeout to set against the ftp socket(s)
- source_address: a 2-tuple (host, port) for the socket to bind
to as its source address before connecting. | Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
- timeout: the timeout to set against the ftp socket(s)
- source_address: a 2-tuple (host, port) for the socket to bind
to as its source address before connecting. | [
"Connect",
"to",
"host",
".",
"Arguments",
"are",
":",
"-",
"host",
":",
"hostname",
"to",
"connect",
"to",
"(",
"string",
"default",
"previous",
"host",
")",
"-",
"port",
":",
"port",
"to",
"connect",
"to",
"(",
"integer",
"default",
"previous",
"port",
")",
"-",
"timeout",
":",
"the",
"timeout",
"to",
"set",
"against",
"the",
"ftp",
"socket",
"(",
"s",
")",
"-",
"source_address",
":",
"a",
"2",
"-",
"tuple",
"(",
"host",
"port",
")",
"for",
"the",
"socket",
"to",
"bind",
"to",
"as",
"its",
"source",
"address",
"before",
"connecting",
"."
] | def connect(self, host='', port=0, timeout=-999, source_address=None):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
- timeout: the timeout to set against the ftp socket(s)
- source_address: a 2-tuple (host, port) for the socket to bind
to as its source address before connecting.
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
if source_address is not None:
self.source_address = source_address
self.sock = socket.create_connection((self.host, self.port), self.timeout,
source_address=self.source_address)
self.af = self.sock.family
self.file = self.sock.makefile('r', encoding=self.encoding)
self.welcome = self.getresp()
return self.welcome | [
"def",
"connect",
"(",
"self",
",",
"host",
"=",
"''",
",",
"port",
"=",
"0",
",",
"timeout",
"=",
"-",
"999",
",",
"source_address",
"=",
"None",
")",
":",
"if",
"host",
"!=",
"''",
":",
"self",
".",
"host",
"=",
"host",
"if",
"port",
">",
"0",
":",
"self",
".",
"port",
"=",
"port",
"if",
"timeout",
"!=",
"-",
"999",
":",
"self",
".",
"timeout",
"=",
"timeout",
"if",
"source_address",
"is",
"not",
"None",
":",
"self",
".",
"source_address",
"=",
"source_address",
"self",
".",
"sock",
"=",
"socket",
".",
"create_connection",
"(",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
",",
"self",
".",
"timeout",
",",
"source_address",
"=",
"self",
".",
"source_address",
")",
"self",
".",
"af",
"=",
"self",
".",
"sock",
".",
"family",
"self",
".",
"file",
"=",
"self",
".",
"sock",
".",
"makefile",
"(",
"'r'",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"self",
".",
"welcome",
"=",
"self",
".",
"getresp",
"(",
")",
"return",
"self",
".",
"welcome"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/ftplib.py#L135-L156 | |
gimli-org/gimli | 17aa2160de9b15ababd9ef99e89b1bc3277bbb23 | pygimli/physics/ert/ertScheme.py | python | DataSchemeWennerBeta.createData | (self, **kwargs) | return self.data_ | Create a Wenner-beta dataset.
Don't use directly .. call create from DataSchemeManager or
ert.createData(elecs, schemeName='wb', **kwargs) instead. | Create a Wenner-beta dataset. | [
"Create",
"a",
"Wenner",
"-",
"beta",
"dataset",
"."
] | def createData(self, **kwargs):
"""Create a Wenner-beta dataset.
Don't use directly .. call create from DataSchemeManager or
ert.createData(elecs, schemeName='wb', **kwargs) instead.
"""
nElectrodes = self.nElectrodes_
maxSep = nElectrodes - 2
if self.maxSeparation < maxSep:
maxSep = self.maxSeparation
# reserve a couple more than nesseccary ###
self.data_.resize((nElectrodes * nElectrodes))
count = 0
for sep in range(1, maxSep + 1):
for i in range((nElectrodes - 2) - sep):
a = i
b = a + sep
m = b + sep
n = m + sep
count = self.createDatum_(a, b, m, n, count)
self.data_.removeInvalid()
return self.data_ | [
"def",
"createData",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"nElectrodes",
"=",
"self",
".",
"nElectrodes_",
"maxSep",
"=",
"nElectrodes",
"-",
"2",
"if",
"self",
".",
"maxSeparation",
"<",
"maxSep",
":",
"maxSep",
"=",
"self",
".",
"maxSeparation",
"# reserve a couple more than nesseccary ###",
"self",
".",
"data_",
".",
"resize",
"(",
"(",
"nElectrodes",
"*",
"nElectrodes",
")",
")",
"count",
"=",
"0",
"for",
"sep",
"in",
"range",
"(",
"1",
",",
"maxSep",
"+",
"1",
")",
":",
"for",
"i",
"in",
"range",
"(",
"(",
"nElectrodes",
"-",
"2",
")",
"-",
"sep",
")",
":",
"a",
"=",
"i",
"b",
"=",
"a",
"+",
"sep",
"m",
"=",
"b",
"+",
"sep",
"n",
"=",
"m",
"+",
"sep",
"count",
"=",
"self",
".",
"createDatum_",
"(",
"a",
",",
"b",
",",
"m",
",",
"n",
",",
"count",
")",
"self",
".",
"data_",
".",
"removeInvalid",
"(",
")",
"return",
"self",
".",
"data_"
] | https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/physics/ert/ertScheme.py#L607-L634 | |
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/fluid/contrib/mixed_precision/fp16_utils.py | python | update_role_var_grad | (main_prog, params_grads) | Update op_role_var attr for some ops to make sure the gradients
transferred across GPUs is FP16.
1. Check whether the op that outputs gradient is cast or not.
2. If op is cast and gradient is FP32, remove the op_role_var
and find the prev op which outputs FP16 gradient
3. Update the op_role_var of the prev op.
Args:
main_prog (Program): The main program for training.
params_grads (list): A list of params and grads. | Update op_role_var attr for some ops to make sure the gradients
transferred across GPUs is FP16.
1. Check whether the op that outputs gradient is cast or not.
2. If op is cast and gradient is FP32, remove the op_role_var
and find the prev op which outputs FP16 gradient
3. Update the op_role_var of the prev op. | [
"Update",
"op_role_var",
"attr",
"for",
"some",
"ops",
"to",
"make",
"sure",
"the",
"gradients",
"transferred",
"across",
"GPUs",
"is",
"FP16",
".",
"1",
".",
"Check",
"whether",
"the",
"op",
"that",
"outputs",
"gradient",
"is",
"cast",
"or",
"not",
".",
"2",
".",
"If",
"op",
"is",
"cast",
"and",
"gradient",
"is",
"FP32",
"remove",
"the",
"op_role_var",
"and",
"find",
"the",
"prev",
"op",
"which",
"outputs",
"FP16",
"gradient",
"3",
".",
"Update",
"the",
"op_role_var",
"of",
"the",
"prev",
"op",
"."
] | def update_role_var_grad(main_prog, params_grads):
"""
Update op_role_var attr for some ops to make sure the gradients
transferred across GPUs is FP16.
1. Check whether the op that outputs gradient is cast or not.
2. If op is cast and gradient is FP32, remove the op_role_var
and find the prev op which outputs FP16 gradient
3. Update the op_role_var of the prev op.
Args:
main_prog (Program): The main program for training.
params_grads (list): A list of params and grads.
"""
block = main_prog.global_block()
block._sync_with_cpp()
BACKWARD = core.op_proto_and_checker_maker.OpRole.Backward
OPTIMIZE = core.op_proto_and_checker_maker.OpRole.Optimize
for p, g in params_grads:
op = g.op
if g.dtype == core.VarDesc.VarType.FP32 and op.type == 'cast':
role = op.attr('op_role')
if role & int(BACKWARD) and op.has_attr('op_role_var'):
op._remove_attr("op_role_var")
else:
raise ValueError("The cast op {0} must be in BACKWARD role "
"and have op_role_var attr.".format(op))
fp16_grad_name = op.input(op.input_names[0])[0]
op_for_fp16_grad = find_true_prev_op(block.ops, op, fp16_grad_name)
op_role_var_attr_name = \
core.op_proto_and_checker_maker.kOpRoleVarAttrName()
attr_val = [p.name, fp16_grad_name]
if op_for_fp16_grad.has_attr(op_role_var_attr_name):
attr_val.extend(op_for_fp16_grad.attr(op_role_var_attr_name))
op_for_fp16_grad._set_attr(op_role_var_attr_name, attr_val)
# Maximize the all_reduce overlap, and perform the cast
# operation after gradients transfer.
op._set_attr('op_role', OPTIMIZE)
# optimize op should stay behind forward and backward ops
if op == block.ops[-1]:
continue
post_ops = find_true_post_op(block.ops, op, g.name)
if post_ops:
raise ValueError("The cast op {0}'s output should not be"
"used by a non-optimize op, however, it"
"is used by {1}".format(op, post_ops[0]))
# add new op in the python and cpp at the same time
new_op_desc = block.desc.append_op()
new_op_desc.copy_from(op.desc)
new_op = framework.Operator(
block=block,
desc=new_op_desc,
type=None,
inputs=None,
outputs=None,
attrs=None)
block.ops.append(new_op)
op_idx = find_op_index(block.desc, op.desc)
if op_idx == -1:
raise ValueError("The op {0} is not in program".format(op))
block._remove_op(op_idx, sync=False)
block._sync_with_cpp() | [
"def",
"update_role_var_grad",
"(",
"main_prog",
",",
"params_grads",
")",
":",
"block",
"=",
"main_prog",
".",
"global_block",
"(",
")",
"block",
".",
"_sync_with_cpp",
"(",
")",
"BACKWARD",
"=",
"core",
".",
"op_proto_and_checker_maker",
".",
"OpRole",
".",
"Backward",
"OPTIMIZE",
"=",
"core",
".",
"op_proto_and_checker_maker",
".",
"OpRole",
".",
"Optimize",
"for",
"p",
",",
"g",
"in",
"params_grads",
":",
"op",
"=",
"g",
".",
"op",
"if",
"g",
".",
"dtype",
"==",
"core",
".",
"VarDesc",
".",
"VarType",
".",
"FP32",
"and",
"op",
".",
"type",
"==",
"'cast'",
":",
"role",
"=",
"op",
".",
"attr",
"(",
"'op_role'",
")",
"if",
"role",
"&",
"int",
"(",
"BACKWARD",
")",
"and",
"op",
".",
"has_attr",
"(",
"'op_role_var'",
")",
":",
"op",
".",
"_remove_attr",
"(",
"\"op_role_var\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"The cast op {0} must be in BACKWARD role \"",
"\"and have op_role_var attr.\"",
".",
"format",
"(",
"op",
")",
")",
"fp16_grad_name",
"=",
"op",
".",
"input",
"(",
"op",
".",
"input_names",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"op_for_fp16_grad",
"=",
"find_true_prev_op",
"(",
"block",
".",
"ops",
",",
"op",
",",
"fp16_grad_name",
")",
"op_role_var_attr_name",
"=",
"core",
".",
"op_proto_and_checker_maker",
".",
"kOpRoleVarAttrName",
"(",
")",
"attr_val",
"=",
"[",
"p",
".",
"name",
",",
"fp16_grad_name",
"]",
"if",
"op_for_fp16_grad",
".",
"has_attr",
"(",
"op_role_var_attr_name",
")",
":",
"attr_val",
".",
"extend",
"(",
"op_for_fp16_grad",
".",
"attr",
"(",
"op_role_var_attr_name",
")",
")",
"op_for_fp16_grad",
".",
"_set_attr",
"(",
"op_role_var_attr_name",
",",
"attr_val",
")",
"# Maximize the all_reduce overlap, and perform the cast",
"# operation after gradients transfer.",
"op",
".",
"_set_attr",
"(",
"'op_role'",
",",
"OPTIMIZE",
")",
"# optimize op should stay behind forward and backward ops",
"if",
"op",
"==",
"block",
".",
"ops",
"[",
"-",
"1",
"]",
":",
"continue",
"post_ops",
"=",
"find_true_post_op",
"(",
"block",
".",
"ops",
",",
"op",
",",
"g",
".",
"name",
")",
"if",
"post_ops",
":",
"raise",
"ValueError",
"(",
"\"The cast op {0}'s output should not be\"",
"\"used by a non-optimize op, however, it\"",
"\"is used by {1}\"",
".",
"format",
"(",
"op",
",",
"post_ops",
"[",
"0",
"]",
")",
")",
"# add new op in the python and cpp at the same time",
"new_op_desc",
"=",
"block",
".",
"desc",
".",
"append_op",
"(",
")",
"new_op_desc",
".",
"copy_from",
"(",
"op",
".",
"desc",
")",
"new_op",
"=",
"framework",
".",
"Operator",
"(",
"block",
"=",
"block",
",",
"desc",
"=",
"new_op_desc",
",",
"type",
"=",
"None",
",",
"inputs",
"=",
"None",
",",
"outputs",
"=",
"None",
",",
"attrs",
"=",
"None",
")",
"block",
".",
"ops",
".",
"append",
"(",
"new_op",
")",
"op_idx",
"=",
"find_op_index",
"(",
"block",
".",
"desc",
",",
"op",
".",
"desc",
")",
"if",
"op_idx",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"The op {0} is not in program\"",
".",
"format",
"(",
"op",
")",
")",
"block",
".",
"_remove_op",
"(",
"op_idx",
",",
"sync",
"=",
"False",
")",
"block",
".",
"_sync_with_cpp",
"(",
")"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/contrib/mixed_precision/fp16_utils.py#L641-L703 | ||
emscripten-core/emscripten | 0d413d3c5af8b28349682496edc14656f5700c2f | third_party/WebIDL.py | python | Parser.p_ArgumentsEmpty | (self, p) | Arguments : | Arguments : | [
"Arguments",
":"
] | def p_ArgumentsEmpty(self, p):
"""
Arguments :
"""
p[0] = [] | [
"def",
"p_ArgumentsEmpty",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"[",
"]"
] | https://github.com/emscripten-core/emscripten/blob/0d413d3c5af8b28349682496edc14656f5700c2f/third_party/WebIDL.py#L4348-L4352 | ||
nasa/fprime | 595cf3682d8365943d86c1a6fe7c78f0a116acf0 | Autocoders/Python/src/fprime_ac/generators/visitors/PortHVisitor.py | python | PortHVisitor._get_args_sum_string | (self, obj) | return arg_str | Return a string of sizeof calls that sum to the port
buffer size for use within the input ports. | Return a string of sizeof calls that sum to the port
buffer size for use within the input ports. | [
"Return",
"a",
"string",
"of",
"sizeof",
"calls",
"that",
"sum",
"to",
"the",
"port",
"buffer",
"size",
"for",
"use",
"within",
"the",
"input",
"ports",
"."
] | def _get_args_sum_string(self, obj):
"""
Return a string of sizeof calls that sum to the port
buffer size for use within the input ports.
"""
args = obj.get_args()
arg_str = ""
# empty list then void args
if len(args) == 0:
arg_str = "0"
return arg_str
for arg in args:
t = arg.get_type()
if t == "string":
t = arg.get_name() + "String"
if t == "buffer":
t = arg.get_name() + "Buffer"
#
# Make size for pointer modifier here...
if arg.get_modifier() == "pointer":
cl = " *)"
else:
cl = ")"
#
if isinstance(t, tuple):
if t[0][0].upper() == "ENUM":
t = "sizeof(NATIVE_INT_TYPE)"
else:
PRINT.info(
"ERROR: Ill formed enumeration type...(name: %s, type: %s"
% (arg.get_name(), arg.get_type())
)
sys.exit(-1)
elif t in [
"U8",
"U16",
"U32",
"U64",
"I8",
"I16",
"I32",
"I64",
"F32",
"F64",
"bool",
"FwOpcodeType",
"FwChanIdType",
"FwEventIdType",
"FwPrmIdType",
"NATIVE_INT_TYPE",
"NATIVE_UINT_TYPE",
"POINTER_CAST",
]:
t = "sizeof(" + t + cl
else:
if arg.get_modifier() == "pointer":
t = "sizeof(" + t + "*)"
else:
t = t + "::SERIALIZED_SIZE"
arg_str += t
arg_str += " + "
arg_str = arg_str.strip(" + ")
return arg_str | [
"def",
"_get_args_sum_string",
"(",
"self",
",",
"obj",
")",
":",
"args",
"=",
"obj",
".",
"get_args",
"(",
")",
"arg_str",
"=",
"\"\"",
"# empty list then void args",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"arg_str",
"=",
"\"0\"",
"return",
"arg_str",
"for",
"arg",
"in",
"args",
":",
"t",
"=",
"arg",
".",
"get_type",
"(",
")",
"if",
"t",
"==",
"\"string\"",
":",
"t",
"=",
"arg",
".",
"get_name",
"(",
")",
"+",
"\"String\"",
"if",
"t",
"==",
"\"buffer\"",
":",
"t",
"=",
"arg",
".",
"get_name",
"(",
")",
"+",
"\"Buffer\"",
"#",
"# Make size for pointer modifier here...",
"if",
"arg",
".",
"get_modifier",
"(",
")",
"==",
"\"pointer\"",
":",
"cl",
"=",
"\" *)\"",
"else",
":",
"cl",
"=",
"\")\"",
"#",
"if",
"isinstance",
"(",
"t",
",",
"tuple",
")",
":",
"if",
"t",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"==",
"\"ENUM\"",
":",
"t",
"=",
"\"sizeof(NATIVE_INT_TYPE)\"",
"else",
":",
"PRINT",
".",
"info",
"(",
"\"ERROR: Ill formed enumeration type...(name: %s, type: %s\"",
"%",
"(",
"arg",
".",
"get_name",
"(",
")",
",",
"arg",
".",
"get_type",
"(",
")",
")",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"elif",
"t",
"in",
"[",
"\"U8\"",
",",
"\"U16\"",
",",
"\"U32\"",
",",
"\"U64\"",
",",
"\"I8\"",
",",
"\"I16\"",
",",
"\"I32\"",
",",
"\"I64\"",
",",
"\"F32\"",
",",
"\"F64\"",
",",
"\"bool\"",
",",
"\"FwOpcodeType\"",
",",
"\"FwChanIdType\"",
",",
"\"FwEventIdType\"",
",",
"\"FwPrmIdType\"",
",",
"\"NATIVE_INT_TYPE\"",
",",
"\"NATIVE_UINT_TYPE\"",
",",
"\"POINTER_CAST\"",
",",
"]",
":",
"t",
"=",
"\"sizeof(\"",
"+",
"t",
"+",
"cl",
"else",
":",
"if",
"arg",
".",
"get_modifier",
"(",
")",
"==",
"\"pointer\"",
":",
"t",
"=",
"\"sizeof(\"",
"+",
"t",
"+",
"\"*)\"",
"else",
":",
"t",
"=",
"t",
"+",
"\"::SERIALIZED_SIZE\"",
"arg_str",
"+=",
"t",
"arg_str",
"+=",
"\" + \"",
"arg_str",
"=",
"arg_str",
".",
"strip",
"(",
"\" + \"",
")",
"return",
"arg_str"
] | https://github.com/nasa/fprime/blob/595cf3682d8365943d86c1a6fe7c78f0a116acf0/Autocoders/Python/src/fprime_ac/generators/visitors/PortHVisitor.py#L131-L195 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/mailbox.py | python | Maildir.remove | (self, key) | Remove the keyed message; raise KeyError if it doesn't exist. | Remove the keyed message; raise KeyError if it doesn't exist. | [
"Remove",
"the",
"keyed",
"message",
";",
"raise",
"KeyError",
"if",
"it",
"doesn",
"t",
"exist",
"."
] | def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
os.remove(os.path.join(self._path, self._lookup(key))) | [
"def",
"remove",
"(",
"self",
",",
"key",
")",
":",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_path",
",",
"self",
".",
"_lookup",
"(",
"key",
")",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/mailbox.py#L308-L310 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/html2.py | python | WebView.Create | (*args, **kwargs) | return _html2.WebView_Create(*args, **kwargs) | Create(self, Window parent, int id=ID_ANY, String url=wxWebViewDefaultURLStr,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=0, String name=wxWebViewNameStr) -> bool | Create(self, Window parent, int id=ID_ANY, String url=wxWebViewDefaultURLStr,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=0, String name=wxWebViewNameStr) -> bool | [
"Create",
"(",
"self",
"Window",
"parent",
"int",
"id",
"=",
"ID_ANY",
"String",
"url",
"=",
"wxWebViewDefaultURLStr",
"Point",
"pos",
"=",
"DefaultPosition",
"Size",
"size",
"=",
"DefaultSize",
"long",
"style",
"=",
"0",
"String",
"name",
"=",
"wxWebViewNameStr",
")",
"-",
">",
"bool"
] | def Create(*args, **kwargs):
"""
Create(self, Window parent, int id=ID_ANY, String url=wxWebViewDefaultURLStr,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=0, String name=wxWebViewNameStr) -> bool
"""
return _html2.WebView_Create(*args, **kwargs) | [
"def",
"Create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_html2",
".",
"WebView_Create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/html2.py#L130-L136 | |
okex/V3-Open-API-SDK | c5abb0db7e2287718e0055e17e57672ce0ec7fd9 | okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_internal/utils/misc.py | python | dist_is_local | (dist) | return is_local(dist_location(dist)) | Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv. | Return True if given Distribution object is installed locally
(i.e. within current virtualenv). | [
"Return",
"True",
"if",
"given",
"Distribution",
"object",
"is",
"installed",
"locally",
"(",
"i",
".",
"e",
".",
"within",
"current",
"virtualenv",
")",
"."
] | def dist_is_local(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist)) | [
"def",
"dist_is_local",
"(",
"dist",
")",
":",
"# type: (Distribution) -> bool",
"return",
"is_local",
"(",
"dist_location",
"(",
"dist",
")",
")"
] | https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_internal/utils/misc.py#L326-L335 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/_LWPCookieJar.py | python | lwp_cookie_str | (cookie) | return join_header_words([h]) | Return string representation of Cookie in the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring. | Return string representation of Cookie in the LWP cookie file format. | [
"Return",
"string",
"representation",
"of",
"Cookie",
"in",
"the",
"LWP",
"cookie",
"file",
"format",
"."
] | def lwp_cookie_str(cookie):
"""Return string representation of Cookie in the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
keys = cookie._rest.keys()
keys.sort()
for k in keys:
h.append((k, str(cookie._rest[k])))
h.append(("version", str(cookie.version)))
return join_header_words([h]) | [
"def",
"lwp_cookie_str",
"(",
"cookie",
")",
":",
"h",
"=",
"[",
"(",
"cookie",
".",
"name",
",",
"cookie",
".",
"value",
")",
",",
"(",
"\"path\"",
",",
"cookie",
".",
"path",
")",
",",
"(",
"\"domain\"",
",",
"cookie",
".",
"domain",
")",
"]",
"if",
"cookie",
".",
"port",
"is",
"not",
"None",
":",
"h",
".",
"append",
"(",
"(",
"\"port\"",
",",
"cookie",
".",
"port",
")",
")",
"if",
"cookie",
".",
"path_specified",
":",
"h",
".",
"append",
"(",
"(",
"\"path_spec\"",
",",
"None",
")",
")",
"if",
"cookie",
".",
"port_specified",
":",
"h",
".",
"append",
"(",
"(",
"\"port_spec\"",
",",
"None",
")",
")",
"if",
"cookie",
".",
"domain_initial_dot",
":",
"h",
".",
"append",
"(",
"(",
"\"domain_dot\"",
",",
"None",
")",
")",
"if",
"cookie",
".",
"secure",
":",
"h",
".",
"append",
"(",
"(",
"\"secure\"",
",",
"None",
")",
")",
"if",
"cookie",
".",
"expires",
":",
"h",
".",
"append",
"(",
"(",
"\"expires\"",
",",
"time2isoz",
"(",
"float",
"(",
"cookie",
".",
"expires",
")",
")",
")",
")",
"if",
"cookie",
".",
"discard",
":",
"h",
".",
"append",
"(",
"(",
"\"discard\"",
",",
"None",
")",
")",
"if",
"cookie",
".",
"comment",
":",
"h",
".",
"append",
"(",
"(",
"\"comment\"",
",",
"cookie",
".",
"comment",
")",
")",
"if",
"cookie",
".",
"comment_url",
":",
"h",
".",
"append",
"(",
"(",
"\"commenturl\"",
",",
"cookie",
".",
"comment_url",
")",
")",
"keys",
"=",
"cookie",
".",
"_rest",
".",
"keys",
"(",
")",
"keys",
".",
"sort",
"(",
")",
"for",
"k",
"in",
"keys",
":",
"h",
".",
"append",
"(",
"(",
"k",
",",
"str",
"(",
"cookie",
".",
"_rest",
"[",
"k",
"]",
")",
")",
")",
"h",
".",
"append",
"(",
"(",
"\"version\"",
",",
"str",
"(",
"cookie",
".",
"version",
")",
")",
")",
"return",
"join_header_words",
"(",
"[",
"h",
"]",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/_LWPCookieJar.py#L20-L47 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/feature_column/feature_column_v2.py | python | crossed_column | (keys, hash_bucket_size, hash_key=None) | return CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key) | Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `CategoricalColumn`.
ValueError: If any of the keys is `HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`. | Returns a column for performing crosses of categorical features. | [
"Returns",
"a",
"column",
"for",
"performing",
"crosses",
"of",
"categorical",
"features",
"."
] | def crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `CategoricalColumn`.
ValueError: If any of the keys is `HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn))): # pylint: disable=protected-access
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key,
(HashedCategoricalColumn, fc_old._HashedCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key) | [
"def",
"crossed_column",
"(",
"keys",
",",
"hash_bucket_size",
",",
"hash_key",
"=",
"None",
")",
":",
"if",
"not",
"hash_bucket_size",
"or",
"hash_bucket_size",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'hash_bucket_size must be > 1. '",
"'hash_bucket_size: {}'",
".",
"format",
"(",
"hash_bucket_size",
")",
")",
"if",
"not",
"keys",
"or",
"len",
"(",
"keys",
")",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'keys must be a list with length > 1. Given: {}'",
".",
"format",
"(",
"keys",
")",
")",
"for",
"key",
"in",
"keys",
":",
"if",
"(",
"not",
"isinstance",
"(",
"key",
",",
"six",
".",
"string_types",
")",
"and",
"not",
"isinstance",
"(",
"key",
",",
"(",
"CategoricalColumn",
",",
"fc_old",
".",
"_CategoricalColumn",
")",
")",
")",
":",
"# pylint: disable=protected-access",
"raise",
"ValueError",
"(",
"'Unsupported key type. All keys must be either string, or '",
"'categorical column except HashedCategoricalColumn. '",
"'Given: {}'",
".",
"format",
"(",
"key",
")",
")",
"if",
"isinstance",
"(",
"key",
",",
"(",
"HashedCategoricalColumn",
",",
"fc_old",
".",
"_HashedCategoricalColumn",
")",
")",
":",
"# pylint: disable=protected-access",
"raise",
"ValueError",
"(",
"'categorical_column_with_hash_bucket is not supported for crossing. '",
"'Hashing before crossing will increase probability of collision. '",
"'Instead, use the feature name as a string. Given: {}'",
".",
"format",
"(",
"key",
")",
")",
"return",
"CrossedColumn",
"(",
"keys",
"=",
"tuple",
"(",
"keys",
")",
",",
"hash_bucket_size",
"=",
"hash_bucket_size",
",",
"hash_key",
"=",
"hash_key",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/feature_column/feature_column_v2.py#L1982-L2106 | |
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/external/xgboost/python-package/xgboost/core.py | python | _load_lib | () | return lib | Load xgboost Library. | Load xgboost Library. | [
"Load",
"xgboost",
"Library",
"."
] | def _load_lib():
"""Load xgboost Library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.XGBGetLastError.restype = ctypes.c_char_p
return lib | [
"def",
"_load_lib",
"(",
")",
":",
"lib_path",
"=",
"find_lib_path",
"(",
")",
"if",
"len",
"(",
"lib_path",
")",
"==",
"0",
":",
"return",
"None",
"lib",
"=",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"lib_path",
"[",
"0",
"]",
")",
"lib",
".",
"XGBGetLastError",
".",
"restype",
"=",
"ctypes",
".",
"c_char_p",
"return",
"lib"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/xgboost/python-package/xgboost/core.py#L81-L88 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.