nwo stringlengths 5 86 | sha stringlengths 40 40 | path stringlengths 4 189 | language stringclasses 1 value | identifier stringlengths 1 94 | parameters stringlengths 2 4.03k | argument_list stringclasses 1 value | return_statement stringlengths 0 11.5k | docstring stringlengths 1 33.2k | docstring_summary stringlengths 0 5.15k | docstring_tokens list | function stringlengths 34 151k | function_tokens list | url stringlengths 90 278 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ChenglongChen/kaggle-CrowdFlower | a72eabb2e53c308c345905eff79c3c8e3a389239 | Code/Feat/feat_utils.py | python | dump_feat_name | (feat_names, feat_name_file) | save feat_names to feat_name_file | save feat_names to feat_name_file | [
"save",
"feat_names",
"to",
"feat_name_file"
] | def dump_feat_name(feat_names, feat_name_file):
"""
save feat_names to feat_name_file
"""
with open(feat_name_file, "wb") as f:
for i,feat_name in enumerate(feat_names):
if feat_name.startswith("count") or feat_name.startswith("pos_of"):
f.write("('%s', SimpleTransform(config.count_feat_transform)),\n" % feat_name)
else:
f.write("('%s', SimpleTransform()),\n" % feat_name) | [
"def",
"dump_feat_name",
"(",
"feat_names",
",",
"feat_name_file",
")",
":",
"with",
"open",
"(",
"feat_name_file",
",",
"\"wb\"",
")",
"as",
"f",
":",
"for",
"i",
",",
"feat_name",
"in",
"enumerate",
"(",
"feat_names",
")",
":",
"if",
"feat_name",
".",
... | https://github.com/ChenglongChen/kaggle-CrowdFlower/blob/a72eabb2e53c308c345905eff79c3c8e3a389239/Code/Feat/feat_utils.py#L43-L52 | ||
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | deps/src/libxml2-2.9.1/python/libxml2.py | python | xmlDoc.stringGetNodeList | (self, value) | return __tmp | Parse the value string and build the node list associated.
Should produce a flat tree with only TEXTs and ENTITY_REFs. | Parse the value string and build the node list associated.
Should produce a flat tree with only TEXTs and ENTITY_REFs. | [
"Parse",
"the",
"value",
"string",
"and",
"build",
"the",
"node",
"list",
"associated",
".",
"Should",
"produce",
"a",
"flat",
"tree",
"with",
"only",
"TEXTs",
"and",
"ENTITY_REFs",
"."
] | def stringGetNodeList(self, value):
"""Parse the value string and build the node list associated.
Should produce a flat tree with only TEXTs and ENTITY_REFs. """
ret = libxml2mod.xmlStringGetNodeList(self._o, value)
if ret is None:raise treeError('xmlStringGetNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | [
"def",
"stringGetNodeList",
"(",
"self",
",",
"value",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlStringGetNodeList",
"(",
"self",
".",
"_o",
",",
"value",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlStringGetNodeList() failed'",
"... | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L4573-L4579 | |
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/klampt/plan/robotoptimize.py | python | KlamptVariable.bind | (self,obj) | Binds all Variables associated with this to the value of Klamp't object obj | Binds all Variables associated with this to the value of Klamp't object obj | [
"Binds",
"all",
"Variables",
"associated",
"with",
"this",
"to",
"the",
"value",
"of",
"Klamp",
"t",
"object",
"obj"
] | def bind(self,obj):
"""Binds all Variables associated with this to the value of Klamp't object obj"""
if self.type in ['Config','Vector','Vector3','Point']:
self.variables[0].bind(obj)
elif self.type == 'Configs':
assert len(obj) == len(self.variables),"Invalid number of configs in Configs object"
for i,v in enumerate(obj):
self.variables[i].bind(v)
elif self.type == 'Rotation':
if self.encoder is None:
self.variables[0].bind(obj)
else:
self.variables[0].bind(self.encoder(obj))
elif self.type == 'RigidTransform':
if self.encoder is None:
self.variables[0].bind(obj[0])
self.variables[1].bind(obj[1])
else:
T = self.encoder(obj)
self.variables[0].bind(T[0])
self.variables[1].bind(T[1])
else:
raise ValueError("Unsupported object type "+self.type) | [
"def",
"bind",
"(",
"self",
",",
"obj",
")",
":",
"if",
"self",
".",
"type",
"in",
"[",
"'Config'",
",",
"'Vector'",
",",
"'Vector3'",
",",
"'Point'",
"]",
":",
"self",
".",
"variables",
"[",
"0",
"]",
".",
"bind",
"(",
"obj",
")",
"elif",
"self"... | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/plan/robotoptimize.py#L30-L52 | ||
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Code/Tools/waf-1.7.13/crywaflib/mscv_helper.py | python | copy_redist_files | (self, package, filter_list) | return any_files | Copies redistributable files matching a given filter list from a redistributable package
The function returns True if at least one files will be copied, False otherwise
Note: If the filter list is an empty list, all files from the package are copied | Copies redistributable files matching a given filter list from a redistributable package
The function returns True if at least one files will be copied, False otherwise
Note: If the filter list is an empty list, all files from the package are copied | [
"Copies",
"redistributable",
"files",
"matching",
"a",
"given",
"filter",
"list",
"from",
"a",
"redistributable",
"package",
"The",
"function",
"returns",
"True",
"if",
"at",
"least",
"one",
"files",
"will",
"be",
"copied",
"False",
"otherwise",
"Note",
":",
"... | def copy_redist_files(self, package, filter_list):
"""
Copies redistributable files matching a given filter list from a redistributable package
The function returns True if at least one files will be copied, False otherwise
Note: If the filter list is an empty list, all files from the package are copied
"""
folder = ''
if 'REDIST_PATH' in self.env:
if package in self.env['REDIST_PATH']:
folder = self.env['REDIST_PATH'][package]
if len(folder) == 0:
return False
try:
files = os.listdir(folder)
except:
return False
output_folder = self.bld.get_output_folders(self.bld.env['PLATFORM'], self.bld.env['CONFIGURATION'])[0]
if hasattr(self, 'output_sub_folder'):
if os.path.isabs(self.output_sub_folder):
output_folder = self.bld.root.make_node(self.output_sub_folder)
else:
output_folder = output_folder.make_node(self.output_sub_folder)
any_files = False
for file in files:
copy = False
for filter in filter_list:
if filter in file.lower():
copy = True
if len(filter_list) == 0:
copy = True
if copy and os.path.isfile(os.path.join(folder, file)):
self.create_task('copy_outputs', self.bld.root.make_node(os.path.join(folder, file)), output_folder.make_node(file))
any_files = True
return any_files | [
"def",
"copy_redist_files",
"(",
"self",
",",
"package",
",",
"filter_list",
")",
":",
"folder",
"=",
"''",
"if",
"'REDIST_PATH'",
"in",
"self",
".",
"env",
":",
"if",
"package",
"in",
"self",
".",
"env",
"[",
"'REDIST_PATH'",
"]",
":",
"folder",
"=",
... | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/crywaflib/mscv_helper.py#L1231-L1268 | |
mozilla/DeepSpeech | aa1d28530d531d0d92289bf5f11a49fe516fdc86 | native_client/ctcdecode/__init__.py | python | UTF8Alphabet.EncodeSingle | (self, input) | return super(UTF8Alphabet, self).EncodeSingle(input.encode('utf-8')) | Encode a single character/output class into a label. Character must be in
the alphabet, this method will assert that. Use `CanEncodeSingle` to test. | Encode a single character/output class into a label. Character must be in
the alphabet, this method will assert that. Use `CanEncodeSingle` to test. | [
"Encode",
"a",
"single",
"character",
"/",
"output",
"class",
"into",
"a",
"label",
".",
"Character",
"must",
"be",
"in",
"the",
"alphabet",
"this",
"method",
"will",
"assert",
"that",
".",
"Use",
"CanEncodeSingle",
"to",
"test",
"."
] | def EncodeSingle(self, input):
'''
Encode a single character/output class into a label. Character must be in
the alphabet, this method will assert that. Use `CanEncodeSingle` to test.
'''
return super(UTF8Alphabet, self).EncodeSingle(input.encode('utf-8')) | [
"def",
"EncodeSingle",
"(",
"self",
",",
"input",
")",
":",
"return",
"super",
"(",
"UTF8Alphabet",
",",
"self",
")",
".",
"EncodeSingle",
"(",
"input",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] | https://github.com/mozilla/DeepSpeech/blob/aa1d28530d531d0d92289bf5f11a49fe516fdc86/native_client/ctcdecode/__init__.py#L113-L118 | |
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/nn/modules/module.py | python | Module.xpu | (self: T, device: Optional[Union[int, device]] = None) | return self._apply(lambda t: t.xpu(device)) | r"""Moves all model parameters and buffers to the XPU.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on XPU while being optimized.
.. note::
This method modifies the module in-place.
Arguments:
device (int, optional): if specified, all parameters will be
copied to that device
Returns:
Module: self | r"""Moves all model parameters and buffers to the XPU. | [
"r",
"Moves",
"all",
"model",
"parameters",
"and",
"buffers",
"to",
"the",
"XPU",
"."
] | def xpu(self: T, device: Optional[Union[int, device]] = None) -> T:
r"""Moves all model parameters and buffers to the XPU.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on XPU while being optimized.
.. note::
This method modifies the module in-place.
Arguments:
device (int, optional): if specified, all parameters will be
copied to that device
Returns:
Module: self
"""
return self._apply(lambda t: t.xpu(device)) | [
"def",
"xpu",
"(",
"self",
":",
"T",
",",
"device",
":",
"Optional",
"[",
"Union",
"[",
"int",
",",
"device",
"]",
"]",
"=",
"None",
")",
"->",
"T",
":",
"return",
"self",
".",
"_apply",
"(",
"lambda",
"t",
":",
"t",
".",
"xpu",
"(",
"device",
... | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/nn/modules/module.py#L691-L708 | |
NERSC/timemory | 431912b360ff50d1a160d7826e2eea04fbd1037f | timemory/profiler/profiler.py | python | FakeProfiler.__call__ | (self, func) | return function_wrapper | Decorator | Decorator | [
"Decorator"
] | def __call__(self, func):
"""Decorator"""
@wraps(func)
def function_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return function_wrapper | [
"def",
"__call__",
"(",
"self",
",",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"function_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"fu... | https://github.com/NERSC/timemory/blob/431912b360ff50d1a160d7826e2eea04fbd1037f/timemory/profiler/profiler.py#L357-L364 | |
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | python/configobj/validate.py | python | is_mixed_list | (value, *args) | Check that the value is a list.
Allow specifying the type of each member.
Work on lists of specific lengths.
You specify each member as a positional argument specifying type
Each type should be one of the following strings :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So you can specify a list of two strings, followed by
two integers as :
mixed_list('string', 'string', 'integer', 'integer')
The length of the list must match the number of positional
arguments you supply.
>>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
>>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
Traceback (most recent call last):
VdtTypeError: the value "b" is of the wrong type.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
>>> vtor.check(mix_str, 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
This test requires an elaborate setup, because of a change in error string
output from the interpreter between Python 2.2 and 2.3 .
>>> res_seq = (
... 'passed an incorrect value "',
... 'yoda',
... '" for parameter "mixed_list".',
... )
>>> res_str = "'".join(res_seq)
>>> try:
... vtor.check('mixed_list("yoda")', ('a'))
... except VdtParamError, err:
... str(err) == res_str
1 | Check that the value is a list.
Allow specifying the type of each member.
Work on lists of specific lengths.
You specify each member as a positional argument specifying type
Each type should be one of the following strings :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So you can specify a list of two strings, followed by
two integers as :
mixed_list('string', 'string', 'integer', 'integer')
The length of the list must match the number of positional
arguments you supply.
>>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
>>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
Traceback (most recent call last):
VdtTypeError: the value "b" is of the wrong type.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
>>> vtor.check(mix_str, 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
This test requires an elaborate setup, because of a change in error string
output from the interpreter between Python 2.2 and 2.3 .
>>> res_seq = (
... 'passed an incorrect value "',
... 'yoda',
... '" for parameter "mixed_list".',
... )
>>> res_str = "'".join(res_seq)
>>> try:
... vtor.check('mixed_list("yoda")', ('a'))
... except VdtParamError, err:
... str(err) == res_str
1 | [
"Check",
"that",
"the",
"value",
"is",
"a",
"list",
".",
"Allow",
"specifying",
"the",
"type",
"of",
"each",
"member",
".",
"Work",
"on",
"lists",
"of",
"specific",
"lengths",
".",
"You",
"specify",
"each",
"member",
"as",
"a",
"positional",
"argument",
... | def is_mixed_list(value, *args):
"""
Check that the value is a list.
Allow specifying the type of each member.
Work on lists of specific lengths.
You specify each member as a positional argument specifying type
Each type should be one of the following strings :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So you can specify a list of two strings, followed by
two integers as :
mixed_list('string', 'string', 'integer', 'integer')
The length of the list must match the number of positional
arguments you supply.
>>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
>>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
>>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
1
>>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
Traceback (most recent call last):
VdtTypeError: the value "b" is of the wrong type.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
Traceback (most recent call last):
VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
>>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
Traceback (most recent call last):
VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
>>> vtor.check(mix_str, 0)
Traceback (most recent call last):
VdtTypeError: the value "0" is of the wrong type.
This test requires an elaborate setup, because of a change in error string
output from the interpreter between Python 2.2 and 2.3 .
>>> res_seq = (
... 'passed an incorrect value "',
... 'yoda',
... '" for parameter "mixed_list".',
... )
>>> res_str = "'".join(res_seq)
>>> try:
... vtor.check('mixed_list("yoda")', ('a'))
... except VdtParamError, err:
... str(err) == res_str
1
"""
try:
length = len(value)
except TypeError:
raise VdtTypeError(value)
if length < len(args):
raise VdtValueTooShortError(value)
elif length > len(args):
raise VdtValueTooLongError(value)
try:
return [fun_dict[arg](val) for arg, val in zip(args, value)]
except KeyError, e:
raise VdtParamError('mixed_list', e) | [
"def",
"is_mixed_list",
"(",
"value",
",",
"*",
"args",
")",
":",
"try",
":",
"length",
"=",
"len",
"(",
"value",
")",
"except",
"TypeError",
":",
"raise",
"VdtTypeError",
"(",
"value",
")",
"if",
"length",
"<",
"len",
"(",
"args",
")",
":",
"raise",... | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/python/configobj/validate.py#L1231-L1296 | ||
rdkit/rdkit | ede860ae316d12d8568daf5ee800921c3389c84e | rdkit/Chem/Scaffolds/MurckoScaffold.py | python | MakeScaffoldGeneric | (mol) | return Chem.RemoveHs(res) | Makes a Murcko scaffold generic (i.e. all atom types->C and all bonds ->single
>>> Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles('c1ccccc1')))
'C1CCCCC1'
>>> Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles('c1ncccc1')))
'C1CCCCC1'
The following were associated with sf.net issue 246
>>> Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles('c1[nH]ccc1')))
'C1CCCC1'
>>> Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles('C1[NH2+]C1')))
'C1CC1'
>>> Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles('C1[C@](Cl)(F)O1')))
'CC1(C)CC1' | Makes a Murcko scaffold generic (i.e. all atom types->C and all bonds ->single | [
"Makes",
"a",
"Murcko",
"scaffold",
"generic",
"(",
"i",
".",
"e",
".",
"all",
"atom",
"types",
"-",
">",
"C",
"and",
"all",
"bonds",
"-",
">",
"single"
] | def MakeScaffoldGeneric(mol):
""" Makes a Murcko scaffold generic (i.e. all atom types->C and all bonds ->single
>>> Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles('c1ccccc1')))
'C1CCCCC1'
>>> Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles('c1ncccc1')))
'C1CCCCC1'
The following were associated with sf.net issue 246
>>> Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles('c1[nH]ccc1')))
'C1CCCC1'
>>> Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles('C1[NH2+]C1')))
'C1CC1'
>>> Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles('C1[C@](Cl)(F)O1')))
'CC1(C)CC1'
"""
res = Chem.Mol(mol)
for atom in res.GetAtoms():
if atom.GetAtomicNum() != 1:
atom.SetAtomicNum(6)
atom.SetIsAromatic(False)
atom.SetFormalCharge(0)
atom.SetChiralTag(Chem.ChiralType.CHI_UNSPECIFIED)
atom.SetNoImplicit(0)
atom.SetNumExplicitHs(0)
for bond in res.GetBonds():
bond.SetBondType(Chem.BondType.SINGLE)
bond.SetIsAromatic(False)
return Chem.RemoveHs(res) | [
"def",
"MakeScaffoldGeneric",
"(",
"mol",
")",
":",
"res",
"=",
"Chem",
".",
"Mol",
"(",
"mol",
")",
"for",
"atom",
"in",
"res",
".",
"GetAtoms",
"(",
")",
":",
"if",
"atom",
".",
"GetAtomicNum",
"(",
")",
"!=",
"1",
":",
"atom",
".",
"SetAtomicNum... | https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/rdkit/Chem/Scaffolds/MurckoScaffold.py#L18-L47 | |
macchina-io/macchina.io | ef24ba0e18379c3dd48fb84e6dbf991101cb8db0 | platform/JS/V8/v8/tools/dev/v8gen.py | python | GenerateGnArgs._goma_args | (self) | Gn args for using goma. | Gn args for using goma. | [
"Gn",
"args",
"for",
"using",
"goma",
"."
] | def _goma_args(self):
"""Gn args for using goma."""
# Specify goma args if we want to use goma and if goma isn't specified
# via command line already. The command-line always has precedence over
# any other specification.
if (self._use_goma and
not any(re.match(r'use_goma\s*=.*', x) for x in self._gn_args)):
if self._need_goma_dir:
return 'use_goma=true\ngoma_dir="%s"' % self._goma_dir
else:
return 'use_goma=true'
else:
return '' | [
"def",
"_goma_args",
"(",
"self",
")",
":",
"# Specify goma args if we want to use goma and if goma isn't specified",
"# via command line already. The command-line always has precedence over",
"# any other specification.",
"if",
"(",
"self",
".",
"_use_goma",
"and",
"not",
"any",
"... | https://github.com/macchina-io/macchina.io/blob/ef24ba0e18379c3dd48fb84e6dbf991101cb8db0/platform/JS/V8/v8/tools/dev/v8gen.py#L244-L256 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/credentials.py | python | AssumeRoleWithWebIdentityCredentialFetcher._get_credentials | (self) | return client.assume_role_with_web_identity(**kwargs) | Get credentials by calling assume role. | Get credentials by calling assume role. | [
"Get",
"credentials",
"by",
"calling",
"assume",
"role",
"."
] | def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
# Assume role with web identity does not require credentials other than
# the token, explicitly configure the client to not sign requests.
config = Config(signature_version=UNSIGNED)
client = self._client_creator('sts', config=config)
return client.assume_role_with_web_identity(**kwargs) | [
"def",
"_get_credentials",
"(",
"self",
")",
":",
"kwargs",
"=",
"self",
".",
"_assume_role_kwargs",
"(",
")",
"# Assume role with web identity does not require credentials other than",
"# the token, explicitly configure the client to not sign requests.",
"config",
"=",
"Config",
... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/credentials.py#L856-L863 | |
weolar/miniblink49 | 1c4678db0594a4abde23d3ebbcc7cd13c3170777 | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/cpp.py | python | _FunctionState.begin | (self, function_name, function_name_start_position, body_start_position, end_position,
parameter_start_position, parameter_end_position, clean_lines) | Start analyzing function body.
Args:
function_name: The name of the function being tracked.
function_name_start_position: Position in elided where the function name starts.
body_start_position: Position in elided of the { or the ; for a prototype.
end_position: Position in elided just after the final } (or ; is.
parameter_start_position: Position in elided of the '(' for the parameters.
parameter_end_position: Position in elided just after the ')' for the parameters.
clean_lines: A CleansedLines instance containing the file. | Start analyzing function body. | [
"Start",
"analyzing",
"function",
"body",
"."
] | def begin(self, function_name, function_name_start_position, body_start_position, end_position,
parameter_start_position, parameter_end_position, clean_lines):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
function_name_start_position: Position in elided where the function name starts.
body_start_position: Position in elided of the { or the ; for a prototype.
end_position: Position in elided just after the final } (or ; is.
parameter_start_position: Position in elided of the '(' for the parameters.
parameter_end_position: Position in elided just after the ')' for the parameters.
clean_lines: A CleansedLines instance containing the file.
"""
self.in_a_function = True
self.lines_in_function = -1 # Don't count the open brace line.
self.current_function = function_name
self.function_name_start_position = function_name_start_position
self.body_start_position = body_start_position
self.end_position = end_position
self.is_declaration = clean_lines.elided[body_start_position.row][body_start_position.column] == ';'
self.parameter_start_position = parameter_start_position
self.parameter_end_position = parameter_end_position
self.is_pure = False
if self.is_declaration:
characters_after_parameters = SingleLineView(clean_lines.elided, parameter_end_position, body_start_position).single_line
self.is_pure = bool(match(r'\s*=\s*0\s*', characters_after_parameters))
self._clean_lines = clean_lines
self._parameter_list = None | [
"def",
"begin",
"(",
"self",
",",
"function_name",
",",
"function_name_start_position",
",",
"body_start_position",
",",
"end_position",
",",
"parameter_start_position",
",",
"parameter_end_position",
",",
"clean_lines",
")",
":",
"self",
".",
"in_a_function",
"=",
"T... | https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/cpp.py#L539-L566 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/idlelib/tooltip.py | python | TooltipBase.showtip | (self) | display the tooltip | display the tooltip | [
"display",
"the",
"tooltip"
] | def showtip(self):
"""display the tooltip"""
if self.tipwindow:
return
self.tipwindow = tw = Toplevel(self.anchor_widget)
# show no border on the top level window
tw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX.
# Without it, call tips intrude on the typing process by grabbing
# the focus.
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w,
"help", "noActivates")
except TclError:
pass
self.position_window()
self.showcontents()
self.tipwindow.update_idletasks() # Needed on MacOS -- see #34275.
self.tipwindow.lift() | [
"def",
"showtip",
"(",
"self",
")",
":",
"if",
"self",
".",
"tipwindow",
":",
"return",
"self",
".",
"tipwindow",
"=",
"tw",
"=",
"Toplevel",
"(",
"self",
".",
"anchor_widget",
")",
"# show no border on the top level window",
"tw",
".",
"wm_overrideredirect",
... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/idlelib/tooltip.py#L26-L45 | ||
PX4/PX4-Autopilot | 0b9f60a0370be53d683352c63fd92db3d6586e18 | Tools/ecl_ekf/plotting/data_plots.py | python | get_min_arg_time_value | (
time_series_data: np.ndarray, data_time: np.ndarray) | return (min_arg, min_value, min_time) | :param time_series_data:
:param data_time:
:return: | :param time_series_data:
:param data_time:
:return: | [
":",
"param",
"time_series_data",
":",
":",
"param",
"data_time",
":",
":",
"return",
":"
] | def get_min_arg_time_value(
time_series_data: np.ndarray, data_time: np.ndarray) -> Tuple[int, float, float]:
"""
:param time_series_data:
:param data_time:
:return:
"""
min_arg = np.argmin(time_series_data)
min_time = data_time[min_arg]
min_value = np.amin(time_series_data)
return (min_arg, min_value, min_time) | [
"def",
"get_min_arg_time_value",
"(",
"time_series_data",
":",
"np",
".",
"ndarray",
",",
"data_time",
":",
"np",
".",
"ndarray",
")",
"->",
"Tuple",
"[",
"int",
",",
"float",
",",
"float",
"]",
":",
"min_arg",
"=",
"np",
".",
"argmin",
"(",
"time_series... | https://github.com/PX4/PX4-Autopilot/blob/0b9f60a0370be53d683352c63fd92db3d6586e18/Tools/ecl_ekf/plotting/data_plots.py#L14-L24 | |
papyrussolution/OpenPapyrus | bbfb5ec2ea2109b8e2f125edd838e12eaf7b8b91 | Src/OSF/protobuf-3.19.1/python/google/protobuf/service.py | python | RpcController.Failed | (self) | Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined. | Returns true if the call failed. | [
"Returns",
"true",
"if",
"the",
"call",
"failed",
"."
] | def Failed(self):
"""Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined.
"""
raise NotImplementedError | [
"def",
"Failed",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/papyrussolution/OpenPapyrus/blob/bbfb5ec2ea2109b8e2f125edd838e12eaf7b8b91/Src/OSF/protobuf-3.19.1/python/google/protobuf/service.py#L142-L150 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/throbber.py | python | Throbber.SetRest | (self, rest) | Set rest image | Set rest image | [
"Set",
"rest",
"image"
] | def SetRest(self, rest):
"""Set rest image"""
self.rest = rest | [
"def",
"SetRest",
"(",
"self",
",",
"rest",
")",
":",
"self",
".",
"rest",
"=",
"rest"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/throbber.py#L234-L236 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/recfunctions.py | python | find_duplicates | (a, key=None, ignoremask=True, return_index=False) | Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
(masked_array(data=[(1,), (1,), (2,), (2,)],
mask=[(False,), (False,), (False,), (False,)],
fill_value=(999999,),
dtype=[('a', '<i8')]), array([0, 1, 3, 4])) | Find the duplicates in a structured array along a given key | [
"Find",
"the",
"duplicates",
"in",
"a",
"structured",
"array",
"along",
"a",
"given",
"key"
] | def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
(masked_array(data=[(1,), (1,), (2,), (2,)],
mask=[(False,), (False,), (False,), (False,)],
fill_value=(999999,),
dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates | [
"def",
"find_duplicates",
"(",
"a",
",",
"key",
"=",
"None",
",",
"ignoremask",
"=",
"True",
",",
"return_index",
"=",
"False",
")",
":",
"a",
"=",
"np",
".",
"asanyarray",
"(",
"a",
")",
".",
"ravel",
"(",
")",
"# Get a dictionary of fields",
"fields",
... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/recfunctions.py#L1348-L1402 | ||
Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | SymbolExtractorAndRenamer/lldb/utils/vim-lldb/python-vim-lldb/vim_panes.py | python | RegistersPane.get_frame_content | (self, frame) | return result | Returns a list of key-value pairs ("name", "value") of registers in frame | Returns a list of key-value pairs ("name", "value") of registers in frame | [
"Returns",
"a",
"list",
"of",
"key",
"-",
"value",
"pairs",
"(",
"name",
"value",
")",
"of",
"registers",
"in",
"frame"
] | def get_frame_content(self, frame):
""" Returns a list of key-value pairs ("name", "value") of registers in frame """
result = []
for register_sets in frame.GetRegisters():
# hack the register group name into the list of registers...
result.append((" = = %s =" % register_sets.GetName(), ""))
for reg in register_sets:
result.append(self.format_register(reg))
return result | [
"def",
"get_frame_content",
"(",
"self",
",",
"frame",
")",
":",
"result",
"=",
"[",
"]",
"for",
"register_sets",
"in",
"frame",
".",
"GetRegisters",
"(",
")",
":",
"# hack the register group name into the list of registers...",
"result",
".",
"append",
"(",
"(",
... | https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/utils/vim-lldb/python-vim-lldb/vim_panes.py#L519-L529 | |
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/contrib/graph_editor/edit.py | python | detach_control_outputs | (sgv, control_outputs) | Detach all the external control outputs of the subgraph sgv.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
control_outputs: a util.ControlOutputs instance. | Detach all the external control outputs of the subgraph sgv. | [
"Detach",
"all",
"the",
"external",
"control",
"outputs",
"of",
"the",
"subgraph",
"sgv",
"."
] | def detach_control_outputs(sgv, control_outputs):
"""Detach all the external control outputs of the subgraph sgv.
Args:
sgv: the subgraph view to be detached. This argument is converted to a
subgraph using the same rules as the function subgraph.make_view.
control_outputs: a util.ControlOutputs instance.
"""
if not isinstance(control_outputs, util.ControlOutputs):
raise TypeError("Expected a util.ControlOutputs, got: {}",
type(control_outputs))
control_outputs.update()
sgv = subgraph.make_view(sgv)
for op in sgv.ops:
for cop in control_outputs.get(op):
if cop not in sgv.ops:
reroute.remove_control_inputs(cop, op) | [
"def",
"detach_control_outputs",
"(",
"sgv",
",",
"control_outputs",
")",
":",
"if",
"not",
"isinstance",
"(",
"control_outputs",
",",
"util",
".",
"ControlOutputs",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected a util.ControlOutputs, got: {}\"",
",",
"type",
"("... | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/graph_editor/edit.py#L42-L58 | ||
oracle/graaljs | 36a56e8e993d45fc40939a3a4d9c0c24990720f1 | graal-nodejs/tools/inspector_protocol/jinja2/bccache.py | python | BytecodeCache.set_bucket | (self, bucket) | Put the bucket into the cache. | Put the bucket into the cache. | [
"Put",
"the",
"bucket",
"into",
"the",
"cache",
"."
] | def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket) | [
"def",
"set_bucket",
"(",
"self",
",",
"bucket",
")",
":",
"self",
".",
"dump_bytecode",
"(",
"bucket",
")"
] | https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/tools/inspector_protocol/jinja2/bccache.py#L190-L192 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_controls.py | python | TextAttr.HasAlignment | (*args, **kwargs) | return _controls_.TextAttr_HasAlignment(*args, **kwargs) | HasAlignment(self) -> bool | HasAlignment(self) -> bool | [
"HasAlignment",
"(",
"self",
")",
"-",
">",
"bool"
] | def HasAlignment(*args, **kwargs):
"""HasAlignment(self) -> bool"""
return _controls_.TextAttr_HasAlignment(*args, **kwargs) | [
"def",
"HasAlignment",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"TextAttr_HasAlignment",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L1776-L1778 | |
h2oai/deepwater | 80e345c582e6ef912a31f42707a2f31c01b064da | tensorflow/src/main/resources/deepwater/train.py | python | ImageClassificationTrainStrategy.accuracy | (self) | return self._accuracy | Returns the tensor containing the accuracy | Returns the tensor containing the accuracy | [
"Returns",
"the",
"tensor",
"containing",
"the",
"accuracy"
] | def accuracy(self):
""" Returns the tensor containing the accuracy"""
return self._accuracy | [
"def",
"accuracy",
"(",
"self",
")",
":",
"return",
"self",
".",
"_accuracy"
] | https://github.com/h2oai/deepwater/blob/80e345c582e6ef912a31f42707a2f31c01b064da/tensorflow/src/main/resources/deepwater/train.py#L116-L118 | |
SpenceKonde/megaTinyCore | 1c4a70b18a149fe6bcb551dfa6db11ca50b8997b | megaavr/tools/libs/pymcuprog/nvmmzeroplus.py | python | NvmAccessProviderCmsisDapMZeroPlus.read | (self, memory_info, offset, numbytes) | return self.sam.read_flash(address=offset, numbytes=numbytes) | Read the memory in chunks
:param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class
:param offset: relative offset in the memory type
:param numbytes: number of bytes to read
:return: array of bytes read | Read the memory in chunks | [
"Read",
"the",
"memory",
"in",
"chunks"
] | def read(self, memory_info, offset, numbytes):
"""
Read the memory in chunks
:param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class
:param offset: relative offset in the memory type
:param numbytes: number of bytes to read
:return: array of bytes read
"""
offset += memory_info[DeviceMemoryInfoKeys.ADDRESS]
return self.sam.read_flash(address=offset, numbytes=numbytes) | [
"def",
"read",
"(",
"self",
",",
"memory_info",
",",
"offset",
",",
"numbytes",
")",
":",
"offset",
"+=",
"memory_info",
"[",
"DeviceMemoryInfoKeys",
".",
"ADDRESS",
"]",
"return",
"self",
".",
"sam",
".",
"read_flash",
"(",
"address",
"=",
"offset",
",",
... | https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/pymcuprog/nvmmzeroplus.py#L51-L62 | |
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqt/mantidqt/widgets/jupyterconsole.py | python | InProcessJupyterConsole.__init__ | (self, *args, **kwargs) | A constructor matching that of RichJupyterWidget
:param args: Positional arguments passed directly to RichJupyterWidget
:param kwargs: Keyword arguments. The following keywords are understood by this widget:
- startup_code: A code snippet to run on startup.
the rest are passed to RichJupyterWidget | A constructor matching that of RichJupyterWidget
:param args: Positional arguments passed directly to RichJupyterWidget
:param kwargs: Keyword arguments. The following keywords are understood by this widget: | [
"A",
"constructor",
"matching",
"that",
"of",
"RichJupyterWidget",
":",
"param",
"args",
":",
"Positional",
"arguments",
"passed",
"directly",
"to",
"RichJupyterWidget",
":",
"param",
"kwargs",
":",
"Keyword",
"arguments",
".",
"The",
"following",
"keywords",
"are... | def __init__(self, *args, **kwargs):
"""
A constructor matching that of RichJupyterWidget
:param args: Positional arguments passed directly to RichJupyterWidget
:param kwargs: Keyword arguments. The following keywords are understood by this widget:
- startup_code: A code snippet to run on startup.
the rest are passed to RichJupyterWidget
"""
startup_code = kwargs.pop("startup_code", "")
super(InProcessJupyterConsole, self).__init__(*args, **kwargs)
# create an in-process kernel
kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel()
kernel = kernel_manager.kernel
kernel.gui = 'qt'
# use a separate thread for execution
shell = kernel.shell
shell.run_code = async_wrapper(shell.run_code, shell)
# attach channels, start kernel and run any startup code
kernel_client = kernel_manager.client()
kernel_client.start_channels()
if startup_code:
shell.ex(startup_code)
self.kernel_manager = kernel_manager
self.kernel_client = kernel_client
# Override python input to raise a QInputDialog.
kernel.raw_input = QAppThreadCall(input_qinputdialog) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"startup_code",
"=",
"kwargs",
".",
"pop",
"(",
"\"startup_code\"",
",",
"\"\"",
")",
"super",
"(",
"InProcessJupyterConsole",
",",
"self",
")",
".",
"__init__",
"(",
... | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqt/mantidqt/widgets/jupyterconsole.py#L35-L68 | ||
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/python/training/session_manager.py | python | SessionManager._model_not_ready | (self, sess) | Checks if the model is ready or not.
Args:
sess: A `Session`.
Returns:
`None` if the model is ready, a `String` with the reason why it is not
ready otherwise. | Checks if the model is ready or not. | [
"Checks",
"if",
"the",
"model",
"is",
"ready",
"or",
"not",
"."
] | def _model_not_ready(self, sess):
"""Checks if the model is ready or not.
Args:
sess: A `Session`.
Returns:
`None` if the model is ready, a `String` with the reason why it is not
ready otherwise.
"""
if self._ready_op is None:
return None
else:
try:
ready_value = sess.run(self._ready_op)
# The model is considered ready if ready_op returns an empty 1-D tensor.
# Also compare to `None` and dtype being int32 for backward
# compatibility.
if (ready_value is None or ready_value.dtype == np.int32 or
ready_value.size == 0):
return None
else:
# TODO(sherrym): If a custom ready_op returns other types of tensor,
# or strings other than variable names, this message could be
# confusing.
non_initialized_varnames = ", ".join(
[i.decode("utf-8") for i in ready_value])
return "Variables not initialized: " + non_initialized_varnames
except errors.FailedPreconditionError as e:
if "uninitialized" not in str(e):
logging.warning("Model not ready raised: %s", str(e))
raise e
return str(e) | [
"def",
"_model_not_ready",
"(",
"self",
",",
"sess",
")",
":",
"if",
"self",
".",
"_ready_op",
"is",
"None",
":",
"return",
"None",
"else",
":",
"try",
":",
"ready_value",
"=",
"sess",
".",
"run",
"(",
"self",
".",
"_ready_op",
")",
"# The model is consi... | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/training/session_manager.py#L312-L344 | ||
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | python/mxnet/ndarray_doc.py | python | _build_doc | (func_name,
desc,
arg_names,
arg_types,
arg_desc,
key_var_num_args=None,
ret_type=None) | return doc_str | Build docstring for imperative functions. | Build docstring for imperative functions. | [
"Build",
"docstring",
"for",
"imperative",
"functions",
"."
] | def _build_doc(func_name,
desc,
arg_names,
arg_types,
arg_desc,
key_var_num_args=None,
ret_type=None):
"""Build docstring for imperative functions."""
param_str = _build_param_doc(arg_names, arg_types, arg_desc)
# if key_var_num_args:
# desc += '\nThis function support variable length of positional input.'
doc_str = ('%s\n\n' +
'%s\n' +
'out : NDArray, optional\n' +
' The output NDArray to hold the result.\n\n'+
'Returns\n' +
'-------\n' +
'out : NDArray or list of NDArrays\n' +
' The output of this function.')
doc_str = doc_str % (desc, param_str)
extra_doc = "\n" + '\n'.join([x.__doc__ for x in type.__subclasses__(NDArrayDoc)
if x.__name__ == '%sDoc' % func_name])
doc_str += _re.sub(_re.compile(" "), "", extra_doc)
doc_str = _re.sub('NDArray-or-Symbol', 'NDArray', doc_str)
return doc_str | [
"def",
"_build_doc",
"(",
"func_name",
",",
"desc",
",",
"arg_names",
",",
"arg_types",
",",
"arg_desc",
",",
"key_var_num_args",
"=",
"None",
",",
"ret_type",
"=",
"None",
")",
":",
"param_str",
"=",
"_build_param_doc",
"(",
"arg_names",
",",
"arg_types",
"... | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/ndarray_doc.py#L132-L157 | |
PaddlePaddle/Anakin | 5fd68a6cc4c4620cd1a30794c1bf06eebd3f4730 | tools/external_converter_v2/parser/tensorflow/parse_med_2_ak.py | python | MedTransAK.map_med_2_ak | (self, ak_node, med_node) | entrance for trans med graph 2 ak graph
:param ak_node:
:param med_node:
:return: | entrance for trans med graph 2 ak graph
:param ak_node:
:param med_node:
:return: | [
"entrance",
"for",
"trans",
"med",
"graph",
"2",
"ak",
"graph",
":",
"param",
"ak_node",
":",
":",
"param",
"med_node",
":",
":",
"return",
":"
] | def map_med_2_ak(self, ak_node, med_node):
'''
entrance for trans med graph 2 ak graph
:param ak_node:
:param med_node:
:return:
'''
type_name = med_node['ak_type']
func = getattr(self, type_name, None)
param = OpsParam()
ak_op = OpsProtoIO()
med_attr = med_node['ak_attr']
# print('nodename = ', med_node['name'])
func(med_attr, param)
param.feed_node_attr(ak_node)
ak_op.set_name(med_node['ak_type'])
ak_node.set_op(ak_op())
[ak_node.add_in(i['name']) for i in med_node['input']]
[ak_node.add_out(i['name']) for i in med_node['output']] | [
"def",
"map_med_2_ak",
"(",
"self",
",",
"ak_node",
",",
"med_node",
")",
":",
"type_name",
"=",
"med_node",
"[",
"'ak_type'",
"]",
"func",
"=",
"getattr",
"(",
"self",
",",
"type_name",
",",
"None",
")",
"param",
"=",
"OpsParam",
"(",
")",
"ak_op",
"=... | https://github.com/PaddlePaddle/Anakin/blob/5fd68a6cc4c4620cd1a30794c1bf06eebd3f4730/tools/external_converter_v2/parser/tensorflow/parse_med_2_ak.py#L235-L254 | ||
bingwin/MicroChat | 81d9a71a212c1cbca5bba497ec42659a7d25dccf | mars/lint/cpplint.py | python | NestingState.InClassDeclaration | (self) | return self.stack and isinstance(self.stack[-1], _ClassInfo) | Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise. | Check if we are currently one level inside a class or struct declaration. | [
"Check",
"if",
"we",
"are",
"currently",
"one",
"level",
"inside",
"a",
"class",
"or",
"struct",
"declaration",
"."
] | def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo) | [
"def",
"InClassDeclaration",
"(",
"self",
")",
":",
"return",
"self",
".",
"stack",
"and",
"isinstance",
"(",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
",",
"_ClassInfo",
")"
] | https://github.com/bingwin/MicroChat/blob/81d9a71a212c1cbca5bba497ec42659a7d25dccf/mars/lint/cpplint.py#L2255-L2261 | |
lmb-freiburg/flownet2 | b92e198b56b0e52e1ba0a5a98dc0e39fa5ae70cc | scripts/cpp_lint.py | python | FileInfo.FullName | (self) | return os.path.abspath(self._filename).replace('\\', '/') | Make Windows paths like Unix. | Make Windows paths like Unix. | [
"Make",
"Windows",
"paths",
"like",
"Unix",
"."
] | def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/') | [
"def",
"FullName",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"_filename",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")"
] | https://github.com/lmb-freiburg/flownet2/blob/b92e198b56b0e52e1ba0a5a98dc0e39fa5ae70cc/scripts/cpp_lint.py#L881-L883 | |
cvmfs/cvmfs | 4637bdb5153178eadf885c1acf37bdc5c685bf8a | cpplint.py | python | GetLineWidth | (line) | Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters. | Determines the width of the line in column positions. | [
"Determines",
"the",
"width",
"of",
"the",
"line",
"in",
"column",
"positions",
"."
] | def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line) | [
"def",
"GetLineWidth",
"(",
"line",
")",
":",
"if",
"isinstance",
"(",
"line",
",",
"unicode",
")",
":",
"width",
"=",
"0",
"for",
"uc",
"in",
"unicodedata",
".",
"normalize",
"(",
"'NFC'",
",",
"line",
")",
":",
"if",
"unicodedata",
".",
"east_asian_w... | https://github.com/cvmfs/cvmfs/blob/4637bdb5153178eadf885c1acf37bdc5c685bf8a/cpplint.py#L4354-L4373 | ||
chromiumembedded/cef | 80caf947f3fe2210e5344713c5281d8af9bdc295 | tools/crash_server.py | python | CrashHTTPRequestHandler.do_HEAD | (self) | Default empty implementation for handling HEAD requests. | Default empty implementation for handling HEAD requests. | [
"Default",
"empty",
"implementation",
"for",
"handling",
"HEAD",
"requests",
"."
] | def do_HEAD(self):
""" Default empty implementation for handling HEAD requests. """
self._send_default_response_headers() | [
"def",
"do_HEAD",
"(",
"self",
")",
":",
"self",
".",
"_send_default_response_headers",
"(",
")"
] | https://github.com/chromiumembedded/cef/blob/80caf947f3fe2210e5344713c5281d8af9bdc295/tools/crash_server.py#L220-L222 | ||
llvm/llvm-project | ffa6262cb4e2a335d26416fad39a581b4f98c5f4 | lldb/third_party/Python/module/pexpect-4.6/pexpect/FSM.py | python | FSM.add_transition | (self, input_symbol, state, action=None, next_state=None) | This adds a transition that associates:
(input_symbol, current_state) --> (action, next_state)
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged.
You can also set transitions for a list of symbols by using
add_transition_list(). | This adds a transition that associates: | [
"This",
"adds",
"a",
"transition",
"that",
"associates",
":"
] | def add_transition (self, input_symbol, state, action=None, next_state=None):
'''This adds a transition that associates:
(input_symbol, current_state) --> (action, next_state)
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged.
You can also set transitions for a list of symbols by using
add_transition_list(). '''
if next_state is None:
next_state = state
self.state_transitions[(input_symbol, state)] = (action, next_state) | [
"def",
"add_transition",
"(",
"self",
",",
"input_symbol",
",",
"state",
",",
"action",
"=",
"None",
",",
"next_state",
"=",
"None",
")",
":",
"if",
"next_state",
"is",
"None",
":",
"next_state",
"=",
"state",
"self",
".",
"state_transitions",
"[",
"(",
... | https://github.com/llvm/llvm-project/blob/ffa6262cb4e2a335d26416fad39a581b4f98c5f4/lldb/third_party/Python/module/pexpect-4.6/pexpect/FSM.py#L131-L146 | ||
Kitware/ParaView | f760af9124ff4634b23ebbeab95a4f56e0261955 | Wrapping/Python/paraview/simple.py | python | demo1 | () | Simple demo that create the following pipeline::
sphere - shrink +
cone + > append | Simple demo that create the following pipeline:: | [
"Simple",
"demo",
"that",
"create",
"the",
"following",
"pipeline",
"::"
] | def demo1():
"""
Simple demo that create the following pipeline::
sphere - shrink +
cone + > append
"""
# Create a sphere of radius = 2, theta res. = 32
# This object becomes the active source.
ss = Sphere(Radius=2, ThetaResolution=32)
# Apply the shrink filter. The Input property is optional. If Input
# is not specified, the filter is applied to the active source.
shr = Shrink(Input=ss)
# Create a cone source.
cs = Cone()
# Append cone and shrink
app = AppendDatasets()
app.Input = [shr, cs]
# Show the output of the append filter. The argument is optional
# as the app filter is now the active object.
Show(app)
# Render the default view.
Render() | [
"def",
"demo1",
"(",
")",
":",
"# Create a sphere of radius = 2, theta res. = 32",
"# This object becomes the active source.",
"ss",
"=",
"Sphere",
"(",
"Radius",
"=",
"2",
",",
"ThetaResolution",
"=",
"32",
")",
"# Apply the shrink filter. The Input property is optional. If In... | https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/Wrapping/Python/paraview/simple.py#L2549-L2572 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/build/waf-1.7.13/platforms/compile_settings_android.py | python | load_android_common_settings | (conf) | Setup all compiler and linker settings shared over all android configurations | Setup all compiler and linker settings shared over all android configurations | [
"Setup",
"all",
"compiler",
"and",
"linker",
"settings",
"shared",
"over",
"all",
"android",
"configurations"
] | def load_android_common_settings(conf):
"""
Setup all compiler and linker settings shared over all android configurations
"""
env = conf.env
ndk_root = env['ANDROID_NDK_HOME']
defines = []
if env['ANDROID_NDK_REV_MAJOR'] < 19:
defines += [
'__ANDROID_API__={}'.format(env['ANDROID_NDK_PLATFORM_NUMBER']),
]
append_to_unique_list(env['DEFINES'], defines)
# Pattern to transform outputs
env['cprogram_PATTERN'] = env['cxxprogram_PATTERN'] = '%s'
env['cshlib_PATTERN'] = env['cxxshlib_PATTERN'] = 'lib%s.so'
env['cstlib_PATTERN'] = env['cxxstlib_PATTERN'] = 'lib%s.a'
env['RPATH_ST'] = '-Wl,-rpath,%s'
env['SONAME_ST'] = '-Wl,-soname,%s' # sets the DT_SONAME field in the shared object, used for ELF object loading
# frameworks aren't supported on Android, disable it
env['FRAMEWORK'] = []
env['FRAMEWORK_ST'] = ''
env['FRAMEWORKPATH'] = []
env['FRAMEWORKPATH_ST'] = ''
# java settings
env['JAVA_VERSION'] = '1.7'
env['CLASSPATH'] = []
platform = os.path.join(env['ANDROID_SDK_HOME'], 'platforms', env['ANDROID_SDK_VERSION'])
android_jar = os.path.join(platform, 'android.jar')
env['JAVACFLAGS'] = [
'-encoding', 'UTF-8',
'-bootclasspath', android_jar,
'-target', env['JAVA_VERSION'],
]
# android interface processing
env['AIDL_PREPROC_ST'] = '-p%s'
env['AIDL_PREPROCESSES'] = [ os.path.join(platform, 'framework.aidl') ]
# aapt settings
env['AAPT_ASSETS_ST'] = [ '-A' ]
env['AAPT_ASSETS'] = []
env['AAPT_RESOURCE_ST'] = [ '-S' ]
env['AAPT_RESOURCES'] = []
env['AAPT_INLC_ST'] = [ '-I' ]
env['AAPT_INCLUDES'] = [ android_jar ]
env['AAPT_PACKAGE_FLAGS'] = [ '--auto-add-overlay' ]
# apk packaging settings
env['ANDROID_MANIFEST'] = ''
env['ANDROID_DEBUG_MODE'] = ''
# manifest merger settings
tools_path = os.path.join(env['ANDROID_SDK_HOME'], 'tools', 'lib')
tools_contents = os.listdir(tools_path)
tools_jars = [ entry for entry in tools_contents if entry.lower().endswith('.jar') ]
manifest_merger_lib_names = [
# entry point for the merger
'manifest-merger',
# dependent libs
'sdk-common',
'common'
]
manifest_merger_libs = []
for jar in tools_jars:
if any(lib_name for lib_name in manifest_merger_lib_names if jar.lower().startswith(lib_name)):
manifest_merger_libs.append(jar)
if len(manifest_merger_libs) < len(manifest_merger_lib_names):
conf.fatal('[ERROR] Failed to find the required file(s) for the Manifest Merger. Please use the Android SDK Manager to update to the latest SDK Tools version and run the configure command again.')
env['MANIFEST_MERGER_CLASSPATH'] = os.pathsep.join([ os.path.join(tools_path, jar_file) for jar_file in manifest_merger_libs ])
# zipalign settings
env['ZIPALIGN_SIZE'] = '4' # alignment in bytes, e.g. '4' provides 32-bit alignment (has to be a string)
# jarsigner settings
env['KEYSTORE_ALIAS'] = conf.get_android_env_keystore_alias()
env['KEYSTORE'] = conf.get_android_env_keystore_path() | [
"def",
"load_android_common_settings",
"(",
"conf",
")",
":",
"env",
"=",
"conf",
".",
"env",
"ndk_root",
"=",
"env",
"[",
"'ANDROID_NDK_HOME'",
"]",
"defines",
"=",
"[",
"]",
"if",
"env",
"[",
"'ANDROID_NDK_REV_MAJOR'",
"]",
"<",
"19",
":",
"defines",
"+=... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/platforms/compile_settings_android.py#L88-L181 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_core.py | python | ImageHistogram.MakeKey | (*args, **kwargs) | return _core_.ImageHistogram_MakeKey(*args, **kwargs) | MakeKey(byte r, byte g, byte b) -> unsigned long
Get the key in the histogram for the given RGB values | MakeKey(byte r, byte g, byte b) -> unsigned long | [
"MakeKey",
"(",
"byte",
"r",
"byte",
"g",
"byte",
"b",
")",
"-",
">",
"unsigned",
"long"
] | def MakeKey(*args, **kwargs):
"""
MakeKey(byte r, byte g, byte b) -> unsigned long
Get the key in the histogram for the given RGB values
"""
return _core_.ImageHistogram_MakeKey(*args, **kwargs) | [
"def",
"MakeKey",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"ImageHistogram_MakeKey",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L2747-L2753 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/setuptools/py2/setuptools/_vendor/pyparsing.py | python | ParseResults.pprint | (self, *args, **kwargs) | Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']] | Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) | [
"Pretty",
"-",
"printer",
"for",
"parsed",
"results",
"as",
"a",
"list",
"using",
"the",
"C",
"{",
"pprint",
"}",
"module",
".",
"Accepts",
"additional",
"positional",
"or",
"keyword",
"args",
"as",
"defined",
"for",
"the",
"C",
"{",
"pprint",
".",
"ppri... | def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs) | [
"def",
"pprint",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"pprint",
".",
"pprint",
"(",
"self",
".",
"asList",
"(",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/setuptools/_vendor/pyparsing.py#L916-L937 | ||
mapnik/mapnik | f3da900c355e1d15059c4a91b00203dcc9d9f0ef | scons/scons-local-4.1.0/SCons/Tool/aixlink.py | python | generate | (env) | Add Builders and construction variables for Visual Age linker to
an Environment. | Add Builders and construction variables for Visual Age linker to
an Environment. | [
"Add",
"Builders",
"and",
"construction",
"variables",
"for",
"Visual",
"Age",
"linker",
"to",
"an",
"Environment",
"."
] | def generate(env):
"""
Add Builders and construction variables for Visual Age linker to
an Environment.
"""
link.generate(env)
env['SMARTLINKFLAGS'] = smart_linkflags
env['LINKFLAGS'] = SCons.Util.CLVar('$SMARTLINKFLAGS')
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -qmkshrobj -qsuppress=1501-218')
env['SHLIBSUFFIX'] = '.a' | [
"def",
"generate",
"(",
"env",
")",
":",
"link",
".",
"generate",
"(",
"env",
")",
"env",
"[",
"'SMARTLINKFLAGS'",
"]",
"=",
"smart_linkflags",
"env",
"[",
"'LINKFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"'$SMARTLINKFLAGS'",
")",
"env",... | https://github.com/mapnik/mapnik/blob/f3da900c355e1d15059c4a91b00203dcc9d9f0ef/scons/scons-local-4.1.0/SCons/Tool/aixlink.py#L51-L61 | ||
eric612/MobileNet-YOLO | 69b4441cb3ec8d553fbdef788ad033e246f901bd | scripts/cpp_lint.py | python | _CppLintState.ResetErrorCounts | (self) | Sets the module's error statistic back to zero. | Sets the module's error statistic back to zero. | [
"Sets",
"the",
"module",
"s",
"error",
"statistic",
"back",
"to",
"zero",
"."
] | def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {} | [
"def",
"ResetErrorCounts",
"(",
"self",
")",
":",
"self",
".",
"error_count",
"=",
"0",
"self",
".",
"errors_by_category",
"=",
"{",
"}"
] | https://github.com/eric612/MobileNet-YOLO/blob/69b4441cb3ec8d553fbdef788ad033e246f901bd/scripts/cpp_lint.py#L746-L749 | ||
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/python2_version/klampt/src/robotsim.py | python | RobotModel.getComJacobian | (self) | return _robotsim.RobotModel_getComJacobian(self) | getComJacobian(RobotModel self)
Returns the Jacobian matrix of the current center of mass.
Returns:
(list of 3 lists): a 3xn matrix J such that np.dot(J,dq) gives the
COM velocity at the currene configuration | getComJacobian(RobotModel self) | [
"getComJacobian",
"(",
"RobotModel",
"self",
")"
] | def getComJacobian(self):
"""
getComJacobian(RobotModel self)
Returns the Jacobian matrix of the current center of mass.
Returns:
(list of 3 lists): a 3xn matrix J such that np.dot(J,dq) gives the
COM velocity at the currene configuration
"""
return _robotsim.RobotModel_getComJacobian(self) | [
"def",
"getComJacobian",
"(",
"self",
")",
":",
"return",
"_robotsim",
".",
"RobotModel_getComJacobian",
"(",
"self",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/src/robotsim.py#L4842-L4856 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_core.py | python | Window.InheritsBackgroundColour | (*args, **kwargs) | return _core_.Window_InheritsBackgroundColour(*args, **kwargs) | InheritsBackgroundColour(self) -> bool | InheritsBackgroundColour(self) -> bool | [
"InheritsBackgroundColour",
"(",
"self",
")",
"-",
">",
"bool"
] | def InheritsBackgroundColour(*args, **kwargs):
"""InheritsBackgroundColour(self) -> bool"""
return _core_.Window_InheritsBackgroundColour(*args, **kwargs) | [
"def",
"InheritsBackgroundColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Window_InheritsBackgroundColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L10902-L10904 | |
sdhash/sdhash | b9eff63e4e5867e910f41fd69032bbb1c94a2a5e | sdhash-ui/serverui/__init__.py | python | we_are_frozen | () | return hasattr(sys, "frozen") | Returns whether we are frozen via py2exe.
This will affect how we find out where we are located. | Returns whether we are frozen via py2exe.
This will affect how we find out where we are located. | [
"Returns",
"whether",
"we",
"are",
"frozen",
"via",
"py2exe",
".",
"This",
"will",
"affect",
"how",
"we",
"find",
"out",
"where",
"we",
"are",
"located",
"."
] | def we_are_frozen():
"""Returns whether we are frozen via py2exe.
This will affect how we find out where we are located."""
return hasattr(sys, "frozen") | [
"def",
"we_are_frozen",
"(",
")",
":",
"return",
"hasattr",
"(",
"sys",
",",
"\"frozen\"",
")"
] | https://github.com/sdhash/sdhash/blob/b9eff63e4e5867e910f41fd69032bbb1c94a2a5e/sdhash-ui/serverui/__init__.py#L16-L19 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/ribbon/toolbar.py | python | RibbonToolBar.GetToolState | (self, tool_id) | return tool.state & RIBBON_TOOLBAR_TOOL_TOGGLED | Gets the on/off state of a toggle tool.
:param `tool_id`: id of the tool in question, as passed to :meth:`~RibbonToolBar.AddTool`.
:return: ``True`` if the tool is toggled on, ``False`` otherwise.
:see: :meth:`~RibbonToolBar.ToggleTool`
.. versionadded:: 0.9.5 | Gets the on/off state of a toggle tool. | [
"Gets",
"the",
"on",
"/",
"off",
"state",
"of",
"a",
"toggle",
"tool",
"."
] | def GetToolState(self, tool_id):
"""
Gets the on/off state of a toggle tool.
:param `tool_id`: id of the tool in question, as passed to :meth:`~RibbonToolBar.AddTool`.
:return: ``True`` if the tool is toggled on, ``False`` otherwise.
:see: :meth:`~RibbonToolBar.ToggleTool`
.. versionadded:: 0.9.5
"""
tool = self.FindById(tool_id)
if tool is None:
raise Exception("Invalid tool id")
return tool.state & RIBBON_TOOLBAR_TOOL_TOGGLED | [
"def",
"GetToolState",
"(",
"self",
",",
"tool_id",
")",
":",
"tool",
"=",
"self",
".",
"FindById",
"(",
"tool_id",
")",
"if",
"tool",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Invalid tool id\"",
")",
"return",
"tool",
".",
"state",
"&",
"RIBBON_... | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/ribbon/toolbar.py#L722-L739 | |
gklz1982/caffe-yolov2 | ebb27029db4ddc0d40e520634633b0fa9cdcc10d | scripts/cpp_lint.py | python | _NestingState.InNamespaceBody | (self) | return self.stack and isinstance(self.stack[-1], _NamespaceInfo) | Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise. | Check if we are currently one level inside a namespace body. | [
"Check",
"if",
"we",
"are",
"currently",
"one",
"level",
"inside",
"a",
"namespace",
"body",
"."
] | def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo) | [
"def",
"InNamespaceBody",
"(",
"self",
")",
":",
"return",
"self",
".",
"stack",
"and",
"isinstance",
"(",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
",",
"_NamespaceInfo",
")"
] | https://github.com/gklz1982/caffe-yolov2/blob/ebb27029db4ddc0d40e520634633b0fa9cdcc10d/scripts/cpp_lint.py#L1940-L1946 | |
BitMEX/api-connectors | 37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812 | auto-generated/python/swagger_client/models/chat.py | python | Chat.user | (self, user) | Sets the user of this Chat.
:param user: The user of this Chat. # noqa: E501
:type: str | Sets the user of this Chat. | [
"Sets",
"the",
"user",
"of",
"this",
"Chat",
"."
] | def user(self, user):
"""Sets the user of this Chat.
:param user: The user of this Chat. # noqa: E501
:type: str
"""
if user is None:
raise ValueError("Invalid value for `user`, must not be `None`") # noqa: E501
self._user = user | [
"def",
"user",
"(",
"self",
",",
"user",
")",
":",
"if",
"user",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `user`, must not be `None`\"",
")",
"# noqa: E501",
"self",
".",
"_user",
"=",
"user"
] | https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/chat.py#L131-L141 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/core/dtypes/cast.py | python | infer_dtype_from | (val, pandas_dtype: bool = False) | return infer_dtype_from_array(val, pandas_dtype=pandas_dtype) | Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object | Interpret the dtype from a scalar or array. | [
"Interpret",
"the",
"dtype",
"from",
"a",
"scalar",
"or",
"array",
"."
] | def infer_dtype_from(val, pandas_dtype: bool = False) -> tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if not is_list_like(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype) | [
"def",
"infer_dtype_from",
"(",
"val",
",",
"pandas_dtype",
":",
"bool",
"=",
"False",
")",
"->",
"tuple",
"[",
"DtypeObj",
",",
"Any",
"]",
":",
"if",
"not",
"is_list_like",
"(",
"val",
")",
":",
"return",
"infer_dtype_from_scalar",
"(",
"val",
",",
"pa... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/dtypes/cast.py#L672-L686 | |
Vipermdl/OCR_detection_IC15 | 8eebd353d6fac97f5832a138d7af3bd3071670db | model/modules/pspnet.py | python | ResNet.__mean_image_subtraction | (self, images, means = [123.68, 116.78, 103.94]) | return images | image normalization
:param images: bs * w * h * channel
:param means:
:return: | image normalization
:param images: bs * w * h * channel
:param means:
:return: | [
"image",
"normalization",
":",
"param",
"images",
":",
"bs",
"*",
"w",
"*",
"h",
"*",
"channel",
":",
"param",
"means",
":",
":",
"return",
":"
] | def __mean_image_subtraction(self, images, means = [123.68, 116.78, 103.94]):
'''
image normalization
:param images: bs * w * h * channel
:param means:
:return:
'''
num_channels = images.data.shape[1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
for i in range(num_channels):
images.data[:, i, :, :] -= means[i]
return images | [
"def",
"__mean_image_subtraction",
"(",
"self",
",",
"images",
",",
"means",
"=",
"[",
"123.68",
",",
"116.78",
",",
"103.94",
"]",
")",
":",
"num_channels",
"=",
"images",
".",
"data",
".",
"shape",
"[",
"1",
"]",
"if",
"len",
"(",
"means",
")",
"!=... | https://github.com/Vipermdl/OCR_detection_IC15/blob/8eebd353d6fac97f5832a138d7af3bd3071670db/model/modules/pspnet.py#L175-L188 | |
plumonito/dtslam | 5994bb9cf7a11981b830370db206bceb654c085d | 3rdparty/opencv-git/3rdparty/jinja2/ext.py | python | InternationalizationExtension._parse_block | (self, parser, allow_pluralize) | return referenced, concat(buf) | Parse until the next block tag with a given name. | Parse until the next block tag with a given name. | [
"Parse",
"until",
"the",
"next",
"block",
"tag",
"with",
"a",
"given",
"name",
"."
] | def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf) | [
"def",
"_parse_block",
"(",
"self",
",",
"parser",
",",
"allow_pluralize",
")",
":",
"referenced",
"=",
"[",
"]",
"buf",
"=",
"[",
"]",
"while",
"1",
":",
"if",
"parser",
".",
"stream",
".",
"current",
".",
"type",
"==",
"'data'",
":",
"buf",
".",
... | https://github.com/plumonito/dtslam/blob/5994bb9cf7a11981b830370db206bceb654c085d/3rdparty/opencv-git/3rdparty/jinja2/ext.py#L309-L339 | |
may0324/DeepCompression-caffe | 0aff6c1287bda4cfc7f378ed8a16524e1afabd8c | scripts/cpp_lint.py | python | ParseArguments | (args) | return filenames | Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint. | Parses the command line arguments. | [
"Parses",
"the",
"command",
"line",
"arguments",
"."
] | def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames | [
"def",
"ParseArguments",
"(",
"args",
")",
":",
"try",
":",
"(",
"opts",
",",
"filenames",
")",
"=",
"getopt",
".",
"getopt",
"(",
"args",
",",
"''",
",",
"[",
"'help'",
",",
"'output='",
",",
"'verbose='",
",",
"'counting='",
",",
"'filter='",
",",
... | https://github.com/may0324/DeepCompression-caffe/blob/0aff6c1287bda4cfc7f378ed8a16524e1afabd8c/scripts/cpp_lint.py#L4779-L4846 | |
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | GeneratorInterface/Herwig7Interface/scripts/parallelization.py | python | uint | (string) | return value | Unsigned int type | Unsigned int type | [
"Unsigned",
"int",
"type"
] | def uint(string):
"""Unsigned int type"""
value = int(string)
if value < 0:
msg = '{0} is negative'.format(string)
raise argparse.ArgumentTypeError(msg)
return value | [
"def",
"uint",
"(",
"string",
")",
":",
"value",
"=",
"int",
"(",
"string",
")",
"if",
"value",
"<",
"0",
":",
"msg",
"=",
"'{0} is negative'",
".",
"format",
"(",
"string",
")",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"msg",
")",
"return",... | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/GeneratorInterface/Herwig7Interface/scripts/parallelization.py#L50-L56 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/sparse/linalg/matfuncs.py | python | _smart_matrix_product | (A, B, alpha=None, structure=None) | return out | A matrix product that knows about sparse and structured matrices.
Parameters
----------
A : 2d ndarray
First matrix.
B : 2d ndarray
Second matrix.
alpha : float
The matrix product will be scaled by this constant.
structure : str, optional
A string describing the structure of both matrices `A` and `B`.
Only `upper_triangular` is currently supported.
Returns
-------
M : 2d ndarray
Matrix product of A and B. | A matrix product that knows about sparse and structured matrices. | [
"A",
"matrix",
"product",
"that",
"knows",
"about",
"sparse",
"and",
"structured",
"matrices",
"."
] | def _smart_matrix_product(A, B, alpha=None, structure=None):
"""
A matrix product that knows about sparse and structured matrices.
Parameters
----------
A : 2d ndarray
First matrix.
B : 2d ndarray
Second matrix.
alpha : float
The matrix product will be scaled by this constant.
structure : str, optional
A string describing the structure of both matrices `A` and `B`.
Only `upper_triangular` is currently supported.
Returns
-------
M : 2d ndarray
Matrix product of A and B.
"""
if len(A.shape) != 2:
raise ValueError('expected A to be a rectangular matrix')
if len(B.shape) != 2:
raise ValueError('expected B to be a rectangular matrix')
f = None
if structure == UPPER_TRIANGULAR:
if not isspmatrix(A) and not isspmatrix(B):
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
if f is not None:
if alpha is None:
alpha = 1.
out = f(alpha, A, B)
else:
if alpha is None:
out = A.dot(B)
else:
out = alpha * A.dot(B)
return out | [
"def",
"_smart_matrix_product",
"(",
"A",
",",
"B",
",",
"alpha",
"=",
"None",
",",
"structure",
"=",
"None",
")",
":",
"if",
"len",
"(",
"A",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'expected A to be a rectangular matrix'",
")",
... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/sparse/linalg/matfuncs.py#L145-L184 | |
quantOS-org/DataCore | e2ef9bd2c22ee9e2845675b6435a14fa607f3551 | mdlink/deps/windows/protobuf-2.5.0/python/google/protobuf/internal/containers.py | python | RepeatedCompositeFieldContainer.__delslice__ | (self, start, stop) | Deletes the subset of items from between the specified indices. | Deletes the subset of items from between the specified indices. | [
"Deletes",
"the",
"subset",
"of",
"items",
"from",
"between",
"the",
"specified",
"indices",
"."
] | def __delslice__(self, start, stop):
"""Deletes the subset of items from between the specified indices."""
del self._values[start:stop]
self._message_listener.Modified() | [
"def",
"__delslice__",
"(",
"self",
",",
"start",
",",
"stop",
")",
":",
"del",
"self",
".",
"_values",
"[",
"start",
":",
"stop",
"]",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")"
] | https://github.com/quantOS-org/DataCore/blob/e2ef9bd2c22ee9e2845675b6435a14fa607f3551/mdlink/deps/windows/protobuf-2.5.0/python/google/protobuf/internal/containers.py#L257-L260 | ||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/backend.py | python | softsign | (x) | return nn.softsign(x) | Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor. | Softsign of a tensor. | [
"Softsign",
"of",
"a",
"tensor",
"."
] | def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x) | [
"def",
"softsign",
"(",
"x",
")",
":",
"return",
"nn",
".",
"softsign",
"(",
"x",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/backend.py#L4309-L4318 | |
microsoft/TSS.MSR | 0f2516fca2cd9929c31d5450e39301c9bde43688 | TSS.Py/src/TpmTypes.py | python | CertifyResponse.signatureSigAlg | (self) | return signature.GetUnionSelector() if signature else TPM_ALG_ID.NULL | Selector of the algorithm used to construct the signature | Selector of the algorithm used to construct the signature | [
"Selector",
"of",
"the",
"algorithm",
"used",
"to",
"construct",
"the",
"signature"
] | def signatureSigAlg(self): # TPM_ALG_ID
""" Selector of the algorithm used to construct the signature """
return signature.GetUnionSelector() if signature else TPM_ALG_ID.NULL | [
"def",
"signatureSigAlg",
"(",
"self",
")",
":",
"# TPM_ALG_ID",
"return",
"signature",
".",
"GetUnionSelector",
"(",
")",
"if",
"signature",
"else",
"TPM_ALG_ID",
".",
"NULL"
] | https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L12473-L12475 | |
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Editor/Python/windows/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py | python | TarFile.next | (self) | return tarinfo | Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available. | Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available. | [
"Return",
"the",
"next",
"member",
"of",
"the",
"archive",
"as",
"a",
"TarInfo",
"object",
"when",
"TarFile",
"is",
"opened",
"for",
"reading",
".",
"Return",
"None",
"if",
"there",
"is",
"no",
"more",
"available",
"."
] | def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo | [
"def",
"next",
"(",
"self",
")",
":",
"self",
".",
"_check",
"(",
"\"ra\"",
")",
"if",
"self",
".",
"firstmember",
"is",
"not",
"None",
":",
"m",
"=",
"self",
".",
"firstmember",
"self",
".",
"firstmember",
"=",
"None",
"return",
"m",
"# Read the next ... | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L2414-L2458 | |
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/docbook/__init__.py | python | DocbookHtml | (env, target, source=None, *args, **kw) | return result | A pseudo-Builder, providing a Docbook toolchain for HTML output. | A pseudo-Builder, providing a Docbook toolchain for HTML output. | [
"A",
"pseudo",
"-",
"Builder",
"providing",
"a",
"Docbook",
"toolchain",
"for",
"HTML",
"output",
"."
] | def DocbookHtml(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for HTML output.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_HTML', ['html','docbook.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
r = __builder.__call__(env, __ensure_suffix(t,'.html'), s, **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
return result | [
"def",
"DocbookHtml",
"(",
"env",
",",
"target",
",",
"source",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"# Init list of targets/sources",
"target",
",",
"source",
"=",
"__extend_targets_sources",
"(",
"target",
",",
"source",
")",
"# ... | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/docbook/__init__.py#L549-L569 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/py/sliceshell.py | python | SlicesShell.readlines | (self) | return lines | Replacement for stdin.readlines(). | Replacement for stdin.readlines(). | [
"Replacement",
"for",
"stdin",
".",
"readlines",
"()",
"."
] | def readlines(self):
"""Replacement for stdin.readlines()."""
lines = []
while lines[-1:] != ['\n']:
lines.append(self.readline())
return lines | [
"def",
"readlines",
"(",
"self",
")",
":",
"lines",
"=",
"[",
"]",
"while",
"lines",
"[",
"-",
"1",
":",
"]",
"!=",
"[",
"'\\n'",
"]",
":",
"lines",
".",
"append",
"(",
"self",
".",
"readline",
"(",
")",
")",
"return",
"lines"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/py/sliceshell.py#L2990-L2995 | |
lmnt-com/haste | 5f704f6132c4aacf2310120b7a1c8d0eea441ab9 | frameworks/tf/layer_norm_gru.py | python | LayerNormGRU.__init__ | (self, num_units, direction='unidirectional', **kwargs) | Initialize the parameters of the GRU layer.
Arguments:
num_units: int, the number of units in the GRU cell.
direction: string, 'unidirectional' or 'bidirectional'.
**kwargs: Dict, keyword arguments (see below).
Keyword Arguments:
kernel_initializer: (optional) the initializer to use for the input
matrix weights. Defaults to `glorot_uniform`.
recurrent_initializer: (optional) the initializer to use for the
recurrent matrix weights. Defaults to `orthogonal`.
bias_initializer: (optional) the initializer to use for input bias
vectors. Defaults to `zeros`.
recurrent_bias_initializer: (optional) the initializer to use for
recurrent bias vectors. Defaults to `zeros`.
kernel_transform: (optional) a function with signature
`(kernel: Tensor) -> Tensor` that transforms the kernel before it is
used. Defaults to the identity function.
recurrent_transform: (optional) a function with signature
`(recurrent_kernel: Tensor) -> Tensor` that transforms the recurrent
kernel before it is used. Defaults to the identity function.
bias_transform: (optional) a function with signature
`(bias: Tensor) -> Tensor` that transforms the bias before it is used.
Defaults to the identity function.
recurrent_bias_transform: (optional) a function with signature
`(recurrent_bias: Tensor) -> Tensor` that transforms the recurrent bias
before it is used. Defaults to the identity function.
dropout: (optional) float, sets the dropout rate for DropConnect
regularization on the recurrent matrix. Defaults to 0.
zoneout: (optional) float, sets the zoneout rate for Zoneout
regularization. Defaults to 0.
dtype: (optional) the data type for this layer. Defaults to `tf.float32`.
name: (optional) string, the name for this layer. | Initialize the parameters of the GRU layer. | [
"Initialize",
"the",
"parameters",
"of",
"the",
"GRU",
"layer",
"."
] | def __init__(self, num_units, direction='unidirectional', **kwargs):
"""
Initialize the parameters of the GRU layer.
Arguments:
num_units: int, the number of units in the GRU cell.
direction: string, 'unidirectional' or 'bidirectional'.
**kwargs: Dict, keyword arguments (see below).
Keyword Arguments:
kernel_initializer: (optional) the initializer to use for the input
matrix weights. Defaults to `glorot_uniform`.
recurrent_initializer: (optional) the initializer to use for the
recurrent matrix weights. Defaults to `orthogonal`.
bias_initializer: (optional) the initializer to use for input bias
vectors. Defaults to `zeros`.
recurrent_bias_initializer: (optional) the initializer to use for
recurrent bias vectors. Defaults to `zeros`.
kernel_transform: (optional) a function with signature
`(kernel: Tensor) -> Tensor` that transforms the kernel before it is
used. Defaults to the identity function.
recurrent_transform: (optional) a function with signature
`(recurrent_kernel: Tensor) -> Tensor` that transforms the recurrent
kernel before it is used. Defaults to the identity function.
bias_transform: (optional) a function with signature
`(bias: Tensor) -> Tensor` that transforms the bias before it is used.
Defaults to the identity function.
recurrent_bias_transform: (optional) a function with signature
`(recurrent_bias: Tensor) -> Tensor` that transforms the recurrent bias
before it is used. Defaults to the identity function.
dropout: (optional) float, sets the dropout rate for DropConnect
regularization on the recurrent matrix. Defaults to 0.
zoneout: (optional) float, sets the zoneout rate for Zoneout
regularization. Defaults to 0.
dtype: (optional) the data type for this layer. Defaults to `tf.float32`.
name: (optional) string, the name for this layer.
"""
super().__init__(LayerNormGRULayer, num_units, direction, 'gru_cell', **kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"num_units",
",",
"direction",
"=",
"'unidirectional'",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"LayerNormGRULayer",
",",
"num_units",
",",
"direction",
",",
"'gru_cell'",
",",
"*",
... | https://github.com/lmnt-com/haste/blob/5f704f6132c4aacf2310120b7a1c8d0eea441ab9/frameworks/tf/layer_norm_gru.py#L194-L231 | ||
abforce/xposed_art_n | ec3fbe417d74d4664cec053d91dd4e3881176374 | tools/cpplint.py | python | FindPreviousMatchingAngleBracket | (clean_lines, linenum, init_prefix) | return False | Find the corresponding < that started a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_prefix: Part of the current line before the initial >.
Returns:
True if a matching bracket exists. | Find the corresponding < that started a template. | [
"Find",
"the",
"corresponding",
"<",
"that",
"started",
"a",
"template",
"."
] | def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
"""Find the corresponding < that started a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_prefix: Part of the current line before the initial >.
Returns:
True if a matching bracket exists.
"""
line = init_prefix
nesting_stack = ['>']
while True:
# Find the previous operator
match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(2)
line = match.group(1)
if nesting_stack[-1] == '>':
# Expecting opening angle bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator == '<':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma before a bracket, this is most likely a
# template argument. The opening angle bracket is probably
# there if we look for it, so just return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting opening parenthesis or opening bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator in ('(', '['):
nesting_stack.pop()
else:
# Scan the previous line
linenum -= 1
if linenum < 0:
break
line = clean_lines.elided[linenum]
# Exhausted all earlier lines and still no matching angle bracket.
return False | [
"def",
"FindPreviousMatchingAngleBracket",
"(",
"clean_lines",
",",
"linenum",
",",
"init_prefix",
")",
":",
"line",
"=",
"init_prefix",
"nesting_stack",
"=",
"[",
"'>'",
"]",
"while",
"True",
":",
"# Find the previous operator",
"match",
"=",
"Search",
"(",
"r'^(... | https://github.com/abforce/xposed_art_n/blob/ec3fbe417d74d4664cec053d91dd4e3881176374/tools/cpplint.py#L2178-L2232 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/fastparquet/api.py | python | sorted_partitioned_columns | (pf, filters=None) | return out | The columns that are known to be sorted partition-by-partition
They may not be sorted within each partition, but all elements in one
row group are strictly greater than all elements in previous row groups.
Examples
--------
>>> sorted_partitioned_columns(pf)
{'id': {'min': [1, 5, 10], 'max': [4, 9, 20]}}
Returns
-------
A set of column names
See Also
--------
statistics | The columns that are known to be sorted partition-by-partition | [
"The",
"columns",
"that",
"are",
"known",
"to",
"be",
"sorted",
"partition",
"-",
"by",
"-",
"partition"
] | def sorted_partitioned_columns(pf, filters=None):
"""
The columns that are known to be sorted partition-by-partition
They may not be sorted within each partition, but all elements in one
row group are strictly greater than all elements in previous row groups.
Examples
--------
>>> sorted_partitioned_columns(pf)
{'id': {'min': [1, 5, 10], 'max': [4, 9, 20]}}
Returns
-------
A set of column names
See Also
--------
statistics
"""
s = statistics(pf)
if (filters is not None) & (filters != []):
idx_list = [i for i, rg in enumerate(pf.row_groups) if
not(filter_out_stats(rg, filters, pf.schema)) and
not(filter_out_cats(rg, filters))]
for stat in s.keys():
for col in s[stat].keys():
s[stat][col] = [s[stat][col][i] for i in idx_list]
columns = pf.columns
out = dict()
for c in columns:
min, max = s['min'][c], s['max'][c]
if any(x is None for x in min + max):
continue
try:
if (sorted(min) == min and
sorted(max) == max and
all(mx < mn for mx, mn in zip(max[:-1], min[1:]))):
out[c] = {'min': min, 'max': max}
except TypeError:
# because some types, e.g., dicts cannot be sorted/compared
continue
return out | [
"def",
"sorted_partitioned_columns",
"(",
"pf",
",",
"filters",
"=",
"None",
")",
":",
"s",
"=",
"statistics",
"(",
"pf",
")",
"if",
"(",
"filters",
"is",
"not",
"None",
")",
"&",
"(",
"filters",
"!=",
"[",
"]",
")",
":",
"idx_list",
"=",
"[",
"i",... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/fastparquet/api.py#L758-L800 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/stc.py | python | StyledTextCtrl.ChangeLexerState | (*args, **kwargs) | return _stc.StyledTextCtrl_ChangeLexerState(*args, **kwargs) | ChangeLexerState(self, int start, int end) -> int | ChangeLexerState(self, int start, int end) -> int | [
"ChangeLexerState",
"(",
"self",
"int",
"start",
"int",
"end",
")",
"-",
">",
"int"
] | def ChangeLexerState(*args, **kwargs):
"""ChangeLexerState(self, int start, int end) -> int"""
return _stc.StyledTextCtrl_ChangeLexerState(*args, **kwargs) | [
"def",
"ChangeLexerState",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_ChangeLexerState",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/stc.py#L6333-L6335 | |
LiquidPlayer/LiquidCore | 9405979363f2353ac9a71ad8ab59685dd7f919c9 | deps/node-10.15.3/deps/v8/third_party/jinja2/sandbox.py | python | SandboxedEnvironment.is_safe_attribute | (self, obj, attr, value) | return not (attr.startswith('_') or is_internal_attribute(obj, attr)) | The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function. | The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function. | [
"The",
"sandboxed",
"environment",
"will",
"call",
"this",
"method",
"to",
"check",
"if",
"the",
"attribute",
"of",
"an",
"object",
"is",
"safe",
"to",
"access",
".",
"Per",
"default",
"all",
"attributes",
"starting",
"with",
"an",
"underscore",
"are",
"cons... | def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr)) | [
"def",
"is_safe_attribute",
"(",
"self",
",",
"obj",
",",
"attr",
",",
"value",
")",
":",
"return",
"not",
"(",
"attr",
".",
"startswith",
"(",
"'_'",
")",
"or",
"is_internal_attribute",
"(",
"obj",
",",
"attr",
")",
")"
] | https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/deps/v8/third_party/jinja2/sandbox.py#L323-L330 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/propgrid.py | python | PGProperty.GetDisplayedCommonValueCount | (*args, **kwargs) | return _propgrid.PGProperty_GetDisplayedCommonValueCount(*args, **kwargs) | GetDisplayedCommonValueCount(self) -> int | GetDisplayedCommonValueCount(self) -> int | [
"GetDisplayedCommonValueCount",
"(",
"self",
")",
"-",
">",
"int"
] | def GetDisplayedCommonValueCount(*args, **kwargs):
"""GetDisplayedCommonValueCount(self) -> int"""
return _propgrid.PGProperty_GetDisplayedCommonValueCount(*args, **kwargs) | [
"def",
"GetDisplayedCommonValueCount",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PGProperty_GetDisplayedCommonValueCount",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/propgrid.py#L504-L506 | |
rethinkdb/rethinkdb_rebirth | 54a76551512bebfe1ab1071d9b19dec2cd9c40e6 | packaging/osx/ds_store/store.py | python | DSStoreEntry.read | (cls, block) | return DSStoreEntry(filename, code, typecode, value) | Read a ``.DS_Store`` entry from the containing Block | Read a ``.DS_Store`` entry from the containing Block | [
"Read",
"a",
".",
"DS_Store",
"entry",
"from",
"the",
"containing",
"Block"
] | def read(cls, block):
"""Read a ``.DS_Store`` entry from the containing Block"""
# First read the filename
nlen = block.read(b'>I')[0]
filename = block.read(2 * nlen).decode('utf-16be')
# Next, read the code and type
code, typecode = block.read(b'>4s4s')
# Finally, read the data
if typecode == b'bool':
value = block.read(b'>?')[0]
elif typecode == b'long' or typecode == b'shor':
value = block.read(b'>I')[0]
elif typecode == b'blob':
vlen = block.read(b'>I')[0]
value = block.read(vlen)
codec = codecs.get(code, None)
if codec:
value = codec.decode(value)
typecode = codec
elif typecode == b'ustr':
vlen = block.read(b'>I')[0]
value = block.read(2 * vlen).decode('utf-16be')
elif typecode == b'type':
value = block.read(b'>4s')[0]
elif typecode == b'comp' or typecode == b'dutc':
value = block.read(b'>Q')[0]
else:
raise ValueError('Unknown type code "%s"' % typecode)
return DSStoreEntry(filename, code, typecode, value) | [
"def",
"read",
"(",
"cls",
",",
"block",
")",
":",
"# First read the filename",
"nlen",
"=",
"block",
".",
"read",
"(",
"b'>I'",
")",
"[",
"0",
"]",
"filename",
"=",
"block",
".",
"read",
"(",
"2",
"*",
"nlen",
")",
".",
"decode",
"(",
"'utf-16be'",
... | https://github.com/rethinkdb/rethinkdb_rebirth/blob/54a76551512bebfe1ab1071d9b19dec2cd9c40e6/packaging/osx/ds_store/store.py#L74-L106 | |
kamyu104/LeetCode-Solutions | 77605708a927ea3b85aee5a479db733938c7c211 | Python/find-k-pairs-with-smallest-sums.py | python | Solution2.kSmallestPairs | (self, nums1, nums2, k) | return nsmallest(k, product(nums1, nums2), key=sum) | :type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]] | :type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]] | [
":",
"type",
"nums1",
":",
"List",
"[",
"int",
"]",
":",
"type",
"nums2",
":",
"List",
"[",
"int",
"]",
":",
"type",
"k",
":",
"int",
":",
"rtype",
":",
"List",
"[",
"List",
"[",
"int",
"]]"
] | def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
return nsmallest(k, product(nums1, nums2), key=sum) | [
"def",
"kSmallestPairs",
"(",
"self",
",",
"nums1",
",",
"nums2",
",",
"k",
")",
":",
"return",
"nsmallest",
"(",
"k",
",",
"product",
"(",
"nums1",
",",
"nums2",
")",
",",
"key",
"=",
"sum",
")"
] | https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/find-k-pairs-with-smallest-sums.py#L43-L50 | |
intel-iot-devkit/how-to-code-samples | b4ea616f36bbfa2e042beb1698f968cfd651d79f | robot-arm/python/iot_robot_arm/hardware/board.py | python | Board.remove_event_handler | (self, event, handler) | Remove hardware event handler. | Remove hardware event handler. | [
"Remove",
"hardware",
"event",
"handler",
"."
] | def remove_event_handler(self, event, handler):
"""
Remove hardware event handler.
"""
self.emitter.remove(event, handler) | [
"def",
"remove_event_handler",
"(",
"self",
",",
"event",
",",
"handler",
")",
":",
"self",
".",
"emitter",
".",
"remove",
"(",
"event",
",",
"handler",
")"
] | https://github.com/intel-iot-devkit/how-to-code-samples/blob/b4ea616f36bbfa2e042beb1698f968cfd651d79f/robot-arm/python/iot_robot_arm/hardware/board.py#L86-L92 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/core/dtypes/concat.py | python | union_categoricals | (
to_union, sort_categories: bool = False, ignore_order: bool = False
) | return Categorical(new_codes, categories=categories, ordered=ordered, fastpath=True) | Combine list-like of Categorical-like, unioning categories.
All categories must have the same dtype.
Parameters
----------
to_union : list-like
Categorical, CategoricalIndex, or Series with dtype='category'.
sort_categories : bool, default False
If true, resulting categories will be lexsorted, otherwise
they will be ordered as they appear in the data.
ignore_order : bool, default False
If true, the ordered attribute of the Categoricals will be ignored.
Results in an unordered categorical.
Returns
-------
Categorical
Raises
------
TypeError
- all inputs do not have the same dtype
- all inputs do not have the same ordered property
- all inputs are ordered and their categories are not identical
- sort_categories=True and Categoricals are ordered
ValueError
Empty list of categoricals passed
Notes
-----
To learn more about categories, see `link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html#unioning>`__
Examples
--------
>>> from pandas.api.types import union_categoricals
If you want to combine categoricals that do not necessarily have
the same categories, `union_categoricals` will combine a list-like
of categoricals. The new categories will be the union of the
categories being combined.
>>> a = pd.Categorical(["b", "c"])
>>> b = pd.Categorical(["a", "b"])
>>> union_categoricals([a, b])
['b', 'c', 'a', 'b']
Categories (3, object): ['b', 'c', 'a']
By default, the resulting categories will be ordered as they appear
in the `categories` of the data. If you want the categories to be
lexsorted, use `sort_categories=True` argument.
>>> union_categoricals([a, b], sort_categories=True)
['b', 'c', 'a', 'b']
Categories (3, object): ['a', 'b', 'c']
`union_categoricals` also works with the case of combining two
categoricals of the same categories and order information (e.g. what
you could also `append` for).
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "a"], ordered=True)
>>> union_categoricals([a, b])
['a', 'b', 'a', 'b', 'a']
Categories (2, object): ['a' < 'b']
Raises `TypeError` because the categories are ordered and not identical.
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "c"], ordered=True)
>>> union_categoricals([a, b])
Traceback (most recent call last):
...
TypeError: to union ordered Categoricals, all categories must be the same
New in version 0.20.0
Ordered categoricals with different categories or orderings can be
combined by using the `ignore_ordered=True` argument.
>>> a = pd.Categorical(["a", "b", "c"], ordered=True)
>>> b = pd.Categorical(["c", "b", "a"], ordered=True)
>>> union_categoricals([a, b], ignore_order=True)
['a', 'b', 'c', 'c', 'b', 'a']
Categories (3, object): ['a', 'b', 'c']
`union_categoricals` also works with a `CategoricalIndex`, or `Series`
containing categorical data, but note that the resulting array will
always be a plain `Categorical`
>>> a = pd.Series(["b", "c"], dtype='category')
>>> b = pd.Series(["a", "b"], dtype='category')
>>> union_categoricals([a, b])
['b', 'c', 'a', 'b']
Categories (3, object): ['b', 'c', 'a'] | Combine list-like of Categorical-like, unioning categories. | [
"Combine",
"list",
"-",
"like",
"of",
"Categorical",
"-",
"like",
"unioning",
"categories",
"."
] | def union_categoricals(
to_union, sort_categories: bool = False, ignore_order: bool = False
):
"""
Combine list-like of Categorical-like, unioning categories.
All categories must have the same dtype.
Parameters
----------
to_union : list-like
Categorical, CategoricalIndex, or Series with dtype='category'.
sort_categories : bool, default False
If true, resulting categories will be lexsorted, otherwise
they will be ordered as they appear in the data.
ignore_order : bool, default False
If true, the ordered attribute of the Categoricals will be ignored.
Results in an unordered categorical.
Returns
-------
Categorical
Raises
------
TypeError
- all inputs do not have the same dtype
- all inputs do not have the same ordered property
- all inputs are ordered and their categories are not identical
- sort_categories=True and Categoricals are ordered
ValueError
Empty list of categoricals passed
Notes
-----
To learn more about categories, see `link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html#unioning>`__
Examples
--------
>>> from pandas.api.types import union_categoricals
If you want to combine categoricals that do not necessarily have
the same categories, `union_categoricals` will combine a list-like
of categoricals. The new categories will be the union of the
categories being combined.
>>> a = pd.Categorical(["b", "c"])
>>> b = pd.Categorical(["a", "b"])
>>> union_categoricals([a, b])
['b', 'c', 'a', 'b']
Categories (3, object): ['b', 'c', 'a']
By default, the resulting categories will be ordered as they appear
in the `categories` of the data. If you want the categories to be
lexsorted, use `sort_categories=True` argument.
>>> union_categoricals([a, b], sort_categories=True)
['b', 'c', 'a', 'b']
Categories (3, object): ['a', 'b', 'c']
`union_categoricals` also works with the case of combining two
categoricals of the same categories and order information (e.g. what
you could also `append` for).
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "a"], ordered=True)
>>> union_categoricals([a, b])
['a', 'b', 'a', 'b', 'a']
Categories (2, object): ['a' < 'b']
Raises `TypeError` because the categories are ordered and not identical.
>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "c"], ordered=True)
>>> union_categoricals([a, b])
Traceback (most recent call last):
...
TypeError: to union ordered Categoricals, all categories must be the same
New in version 0.20.0
Ordered categoricals with different categories or orderings can be
combined by using the `ignore_ordered=True` argument.
>>> a = pd.Categorical(["a", "b", "c"], ordered=True)
>>> b = pd.Categorical(["c", "b", "a"], ordered=True)
>>> union_categoricals([a, b], ignore_order=True)
['a', 'b', 'c', 'c', 'b', 'a']
Categories (3, object): ['a', 'b', 'c']
`union_categoricals` also works with a `CategoricalIndex`, or `Series`
containing categorical data, but note that the resulting array will
always be a plain `Categorical`
>>> a = pd.Series(["b", "c"], dtype='category')
>>> b = pd.Series(["a", "b"], dtype='category')
>>> union_categoricals([a, b])
['b', 'c', 'a', 'b']
Categories (3, object): ['b', 'c', 'a']
"""
from pandas import Categorical
from pandas.core.arrays.categorical import recode_for_categories
if len(to_union) == 0:
raise ValueError("No Categoricals to union")
def _maybe_unwrap(x):
if isinstance(x, (ABCCategoricalIndex, ABCSeries)):
return x._values
elif isinstance(x, Categorical):
return x
else:
raise TypeError("all components to combine must be Categorical")
to_union = [_maybe_unwrap(x) for x in to_union]
first = to_union[0]
if not all(
is_dtype_equal(other.categories.dtype, first.categories.dtype)
for other in to_union[1:]
):
raise TypeError("dtype of categories must be the same")
ordered = False
if all(first._categories_match_up_to_permutation(other) for other in to_union[1:]):
# identical categories - fastpath
categories = first.categories
ordered = first.ordered
all_codes = [first._encode_with_my_categories(x)._codes for x in to_union]
new_codes = np.concatenate(all_codes)
if sort_categories and not ignore_order and ordered:
raise TypeError("Cannot use sort_categories=True with ordered Categoricals")
if sort_categories and not categories.is_monotonic_increasing:
categories = categories.sort_values()
indexer = categories.get_indexer(first.categories)
from pandas.core.algorithms import take_nd
new_codes = take_nd(indexer, new_codes, fill_value=-1)
elif ignore_order or all(not c.ordered for c in to_union):
# different categories - union and recode
cats = first.categories.append([c.categories for c in to_union[1:]])
categories = cats.unique()
if sort_categories:
categories = categories.sort_values()
new_codes = [
recode_for_categories(c.codes, c.categories, categories) for c in to_union
]
new_codes = np.concatenate(new_codes)
else:
# ordered - to show a proper error message
if all(c.ordered for c in to_union):
msg = "to union ordered Categoricals, all categories must be the same"
raise TypeError(msg)
else:
raise TypeError("Categorical.ordered must be the same")
if ignore_order:
ordered = False
return Categorical(new_codes, categories=categories, ordered=ordered, fastpath=True) | [
"def",
"union_categoricals",
"(",
"to_union",
",",
"sort_categories",
":",
"bool",
"=",
"False",
",",
"ignore_order",
":",
"bool",
"=",
"False",
")",
":",
"from",
"pandas",
"import",
"Categorical",
"from",
"pandas",
".",
"core",
".",
"arrays",
".",
"categori... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/dtypes/concat.py#L154-L319 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/pydoc.py | python | Doc.document | (self, object, name=None, *args) | return self.docother(*args) | Generate documentation for an object. | Generate documentation for an object. | [
"Generate",
"documentation",
"for",
"an",
"object",
"."
] | def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args) | [
"def",
"document",
"(",
"self",
",",
"object",
",",
"name",
"=",
"None",
",",
"*",
"args",
")",
":",
"args",
"=",
"(",
"object",
",",
"name",
")",
"+",
"args",
"# 'try' clause is to attempt to handle the possibility that inspect",
"# identifies something in a way th... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/pydoc.py#L351-L367 | |
infinit/elle | a8154593c42743f45b9df09daf62b44630c24a02 | drake/src/drake/deprecation.py | python | deprecated | (func) | return f | This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used. | This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used. | [
"This",
"is",
"a",
"decorator",
"which",
"can",
"be",
"used",
"to",
"mark",
"functions",
"as",
"deprecated",
".",
"It",
"will",
"result",
"in",
"a",
"warning",
"being",
"emmitted",
"when",
"the",
"function",
"is",
"used",
"."
] | def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used.'''
@functools.wraps(func)
def f(*args, **kwargs):
warnings.warn(
'Call to deprecated function {}.'.format(func.__name__),
category=DeprecationWarning,
stacklevel=2)
return func(*args, **kwargs)
return f | [
"def",
"deprecated",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"'Call to deprecated function {}.'",
".",
"format",
"(",
"func"... | https://github.com/infinit/elle/blob/a8154593c42743f45b9df09daf62b44630c24a02/drake/src/drake/deprecation.py#L12-L23 | |
D-X-Y/caffe-faster-rcnn | eb50c97ff48f3df115d0e85fe0a32b0c7e2aa4cb | python/caffe/net_spec.py | python | Top.to_proto | (self) | return to_proto(self) | Generate a NetParameter that contains all layers needed to compute
this top. | Generate a NetParameter that contains all layers needed to compute
this top. | [
"Generate",
"a",
"NetParameter",
"that",
"contains",
"all",
"layers",
"needed",
"to",
"compute",
"this",
"top",
"."
] | def to_proto(self):
"""Generate a NetParameter that contains all layers needed to compute
this top."""
return to_proto(self) | [
"def",
"to_proto",
"(",
"self",
")",
":",
"return",
"to_proto",
"(",
"self",
")"
] | https://github.com/D-X-Y/caffe-faster-rcnn/blob/eb50c97ff48f3df115d0e85fe0a32b0c7e2aa4cb/python/caffe/net_spec.py#L90-L94 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/stc.py | python | StyledTextCtrl.SetEndAtLastLine | (*args, **kwargs) | return _stc.StyledTextCtrl_SetEndAtLastLine(*args, **kwargs) | SetEndAtLastLine(self, bool endAtLastLine)
Sets the scroll range so that maximum scroll position has
the last line at the bottom of the view (default).
Setting this to false allows scrolling one page below the last line. | SetEndAtLastLine(self, bool endAtLastLine) | [
"SetEndAtLastLine",
"(",
"self",
"bool",
"endAtLastLine",
")"
] | def SetEndAtLastLine(*args, **kwargs):
"""
SetEndAtLastLine(self, bool endAtLastLine)
Sets the scroll range so that maximum scroll position has
the last line at the bottom of the view (default).
Setting this to false allows scrolling one page below the last line.
"""
return _stc.StyledTextCtrl_SetEndAtLastLine(*args, **kwargs) | [
"def",
"SetEndAtLastLine",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_SetEndAtLastLine",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/stc.py#L4209-L4217 | |
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/python/training/optimizer.py | python | Optimizer._slot_dict | (self, slot_name) | return named_slots | Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name. | Returns a dict for caching slots created under the given name. | [
"Returns",
"a",
"dict",
"for",
"caching",
"slots",
"created",
"under",
"the",
"given",
"name",
"."
] | def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots | [
"def",
"_slot_dict",
"(",
"self",
",",
"slot_name",
")",
":",
"named_slots",
"=",
"self",
".",
"_slots",
".",
"get",
"(",
"slot_name",
",",
"None",
")",
"if",
"named_slots",
"is",
"None",
":",
"named_slots",
"=",
"{",
"}",
"self",
".",
"_slots",
"[",
... | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/training/optimizer.py#L446-L460 | |
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/python/framework/graph_util.py | python | extract_sub_graph | (graph_def, dest_nodes) | return out | Extract the subgraph that can reach any of the nodes in 'dest_nodes'.
Args:
graph_def: A graph_pb2.GraphDef proto.
dest_nodes: A list of strings specifying the destination node names.
Returns:
The GraphDef of the sub-graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto. | Extract the subgraph that can reach any of the nodes in 'dest_nodes'. | [
"Extract",
"the",
"subgraph",
"that",
"can",
"reach",
"any",
"of",
"the",
"nodes",
"in",
"dest_nodes",
"."
] | def extract_sub_graph(graph_def, dest_nodes):
"""Extract the subgraph that can reach any of the nodes in 'dest_nodes'.
Args:
graph_def: A graph_pb2.GraphDef proto.
dest_nodes: A list of strings specifying the destination node names.
Returns:
The GraphDef of the sub-graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
edges = {} # Keyed by the dest node name.
name_to_node_map = {} # Keyed by node name.
# Keeps track of node sequences. It is important to still output the
# operations in the original order.
node_seq = {} # Keyed by node name.
seq = 0
for node in graph_def.node:
n = _node_name(node.name)
name_to_node_map[n] = node
edges[n] = [_node_name(x) for x in node.input]
node_seq[n] = seq
seq += 1
for d in dest_nodes:
assert d in name_to_node_map, "%s is not in graph" % d
nodes_to_keep = set()
# Breadth first search to find all the nodes that we should keep.
next_to_visit = dest_nodes[:]
while next_to_visit:
n = next_to_visit[0]
del next_to_visit[0]
if n in nodes_to_keep:
# Already visited this node.
continue
nodes_to_keep.add(n)
next_to_visit += edges[n]
nodes_to_keep_list = sorted(list(nodes_to_keep), key=lambda n: node_seq[n])
# Now construct the output GraphDef
out = graph_pb2.GraphDef()
for n in nodes_to_keep_list:
out.node.extend([copy.deepcopy(name_to_node_map[n])])
return out | [
"def",
"extract_sub_graph",
"(",
"graph_def",
",",
"dest_nodes",
")",
":",
"if",
"not",
"isinstance",
"(",
"graph_def",
",",
"graph_pb2",
".",
"GraphDef",
")",
":",
"raise",
"TypeError",
"(",
"\"graph_def must be a graph_pb2.GraphDef proto.\"",
")",
"edges",
"=",
... | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/framework/graph_util.py#L127-L178 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/s3transfer/manager.py | python | TransferConfig.__init__ | (self,
multipart_threshold=8 * MB,
multipart_chunksize=8 * MB,
max_request_concurrency=10,
max_submission_concurrency=5,
max_request_queue_size=1000,
max_submission_queue_size=1000,
max_io_queue_size=1000,
io_chunksize=256 * KB,
num_download_attempts=5,
max_in_memory_upload_chunks=10,
max_in_memory_download_chunks=10,
max_bandwidth=None) | Configurations for the transfer mangager
:param multipart_threshold: The threshold for which multipart
transfers occur.
:param max_request_concurrency: The maximum number of S3 API
transfer-related requests that can happen at a time.
:param max_submission_concurrency: The maximum number of threads
processing a call to a TransferManager method. Processing a
call usually entails determining which S3 API requests that need
to be enqueued, but does **not** entail making any of the
S3 API data transfering requests needed to perform the transfer.
The threads controlled by ``max_request_concurrency`` is
responsible for that.
:param multipart_chunksize: The size of each transfer if a request
becomes a multipart transfer.
:param max_request_queue_size: The maximum amount of S3 API requests
that can be queued at a time. A value of zero means that there
is no maximum.
:param max_submission_queue_size: The maximum amount of
TransferManager method calls that can be queued at a time. A value
of zero means that there is no maximum.
:param max_io_queue_size: The maximum amount of read parts that
can be queued to be written to disk per download. A value of zero
means that there is no maximum. The default size for each element
in this queue is 8 KB.
:param io_chunksize: The max size of each chunk in the io queue.
Currently, this is size used when reading from the downloaded
stream as well.
:param num_download_attempts: The number of download attempts that
will be tried upon errors with downloading an object in S3. Note
that these retries account for errors that occur when streamming
down the data from s3 (i.e. socket errors and read timeouts that
occur after recieving an OK response from s3).
Other retryable exceptions such as throttling errors and 5xx errors
are already retried by botocore (this default is 5). The
``num_download_attempts`` does not take into account the
number of exceptions retried by botocore.
:param max_in_memory_upload_chunks: The number of chunks that can
be stored in memory at a time for all ongoing upload requests.
This pertains to chunks of data that need to be stored in memory
during an upload if the data is sourced from a file-like object.
The total maximum memory footprint due to a in-memory upload
chunks is roughly equal to:
max_in_memory_upload_chunks * multipart_chunksize
+ max_submission_concurrency * multipart_chunksize
``max_submission_concurrency`` has an affect on this value because
for each thread pulling data off of a file-like object, they may
be waiting with a single read chunk to be submitted for upload
because the ``max_in_memory_upload_chunks`` value has been reached
by the threads making the upload request.
:param max_in_memory_download_chunks: The number of chunks that can
be buffered in memory and **not** in the io queue at a time for all
ongoing dowload requests. This pertains specifically to file-like
objects that cannot be seeked. The total maximum memory footprint
due to a in-memory download chunks is roughly equal to:
max_in_memory_download_chunks * multipart_chunksize
:param max_bandwidth: The maximum bandwidth that will be consumed
in uploading and downloading file content. The value is in terms of
bytes per second. | Configurations for the transfer mangager | [
"Configurations",
"for",
"the",
"transfer",
"mangager"
] | def __init__(self,
multipart_threshold=8 * MB,
multipart_chunksize=8 * MB,
max_request_concurrency=10,
max_submission_concurrency=5,
max_request_queue_size=1000,
max_submission_queue_size=1000,
max_io_queue_size=1000,
io_chunksize=256 * KB,
num_download_attempts=5,
max_in_memory_upload_chunks=10,
max_in_memory_download_chunks=10,
max_bandwidth=None):
"""Configurations for the transfer mangager
:param multipart_threshold: The threshold for which multipart
transfers occur.
:param max_request_concurrency: The maximum number of S3 API
transfer-related requests that can happen at a time.
:param max_submission_concurrency: The maximum number of threads
processing a call to a TransferManager method. Processing a
call usually entails determining which S3 API requests that need
to be enqueued, but does **not** entail making any of the
S3 API data transfering requests needed to perform the transfer.
The threads controlled by ``max_request_concurrency`` is
responsible for that.
:param multipart_chunksize: The size of each transfer if a request
becomes a multipart transfer.
:param max_request_queue_size: The maximum amount of S3 API requests
that can be queued at a time. A value of zero means that there
is no maximum.
:param max_submission_queue_size: The maximum amount of
TransferManager method calls that can be queued at a time. A value
of zero means that there is no maximum.
:param max_io_queue_size: The maximum amount of read parts that
can be queued to be written to disk per download. A value of zero
means that there is no maximum. The default size for each element
in this queue is 8 KB.
:param io_chunksize: The max size of each chunk in the io queue.
Currently, this is size used when reading from the downloaded
stream as well.
:param num_download_attempts: The number of download attempts that
will be tried upon errors with downloading an object in S3. Note
that these retries account for errors that occur when streamming
down the data from s3 (i.e. socket errors and read timeouts that
occur after recieving an OK response from s3).
Other retryable exceptions such as throttling errors and 5xx errors
are already retried by botocore (this default is 5). The
``num_download_attempts`` does not take into account the
number of exceptions retried by botocore.
:param max_in_memory_upload_chunks: The number of chunks that can
be stored in memory at a time for all ongoing upload requests.
This pertains to chunks of data that need to be stored in memory
during an upload if the data is sourced from a file-like object.
The total maximum memory footprint due to a in-memory upload
chunks is roughly equal to:
max_in_memory_upload_chunks * multipart_chunksize
+ max_submission_concurrency * multipart_chunksize
``max_submission_concurrency`` has an affect on this value because
for each thread pulling data off of a file-like object, they may
be waiting with a single read chunk to be submitted for upload
because the ``max_in_memory_upload_chunks`` value has been reached
by the threads making the upload request.
:param max_in_memory_download_chunks: The number of chunks that can
be buffered in memory and **not** in the io queue at a time for all
ongoing dowload requests. This pertains specifically to file-like
objects that cannot be seeked. The total maximum memory footprint
due to a in-memory download chunks is roughly equal to:
max_in_memory_download_chunks * multipart_chunksize
:param max_bandwidth: The maximum bandwidth that will be consumed
in uploading and downloading file content. The value is in terms of
bytes per second.
"""
self.multipart_threshold = multipart_threshold
self.multipart_chunksize = multipart_chunksize
self.max_request_concurrency = max_request_concurrency
self.max_submission_concurrency = max_submission_concurrency
self.max_request_queue_size = max_request_queue_size
self.max_submission_queue_size = max_submission_queue_size
self.max_io_queue_size = max_io_queue_size
self.io_chunksize = io_chunksize
self.num_download_attempts = num_download_attempts
self.max_in_memory_upload_chunks = max_in_memory_upload_chunks
self.max_in_memory_download_chunks = max_in_memory_download_chunks
self.max_bandwidth = max_bandwidth
self._validate_attrs_are_nonzero() | [
"def",
"__init__",
"(",
"self",
",",
"multipart_threshold",
"=",
"8",
"*",
"MB",
",",
"multipart_chunksize",
"=",
"8",
"*",
"MB",
",",
"max_request_concurrency",
"=",
"10",
",",
"max_submission_concurrency",
"=",
"5",
",",
"max_request_queue_size",
"=",
"1000",
... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/s3transfer/manager.py#L48-L147 | ||
assimp/assimp | 97c7e084c2f7f8c9355ea42f73605890481bddc5 | port/PyAssimp/scripts/transformations.py | python | euler_matrix | (ai, aj, ak, axes='sxyz') | return M | Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes) | Return homogeneous rotation matrix from Euler angles and axis sequence. | [
"Return",
"homogeneous",
"rotation",
"matrix",
"from",
"Euler",
"angles",
"and",
"axis",
"sequence",
"."
] | def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M | [
"def",
"euler_matrix",
"(",
"ai",
",",
"aj",
",",
"ak",
",",
"axes",
"=",
"'sxyz'",
")",
":",
"try",
":",
"firstaxis",
",",
"parity",
",",
"repetition",
",",
"frame",
"=",
"_AXES2TUPLE",
"[",
"axes",
"]",
"except",
"(",
"AttributeError",
",",
"KeyError... | https://github.com/assimp/assimp/blob/97c7e084c2f7f8c9355ea42f73605890481bddc5/port/PyAssimp/scripts/transformations.py#L968-L1028 | |
rdkit/rdkit | ede860ae316d12d8568daf5ee800921c3389c84e | rdkit/Chem/MolStandardize/resonance.py | python | enumerate_resonance_smiles | (smiles) | return {Chem.MolToSmiles(m, isomericSmiles=True) for m in mesomers} | Return a set of resonance forms as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible resonance form.
:rtype: set of strings. | Return a set of resonance forms as SMILES strings, given a SMILES string. | [
"Return",
"a",
"set",
"of",
"resonance",
"forms",
"as",
"SMILES",
"strings",
"given",
"a",
"SMILES",
"string",
"."
] | def enumerate_resonance_smiles(smiles):
"""Return a set of resonance forms as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible resonance form.
:rtype: set of strings.
"""
mol = Chem.MolFromSmiles(smiles)
# Chem.SanitizeMol(mol) # MolFromSmiles does Sanitize by default
mesomers = ResonanceEnumerator().enumerate(mol)
return {Chem.MolToSmiles(m, isomericSmiles=True) for m in mesomers} | [
"def",
"enumerate_resonance_smiles",
"(",
"smiles",
")",
":",
"mol",
"=",
"Chem",
".",
"MolFromSmiles",
"(",
"smiles",
")",
"# Chem.SanitizeMol(mol) # MolFromSmiles does Sanitize by default",
"mesomers",
"=",
"ResonanceEnumerator",
"(",
")",
".",
"enumerate",
"(",
"mol... | https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/rdkit/Chem/MolStandardize/resonance.py#L79-L89 | |
doxygen/doxygen | c5d4b67565a5fadea5d84d28cfe86db605b4593f | doc/translator.py | python | xopen | (fname, mode='r', encoding='utf-8-sig') | Unified file opening for Python 2 an Python 3.
Python 2 does not have the encoding argument. Python 3 has one, and
the default 'utf-8-sig' is used (skips the BOM automatically). | Unified file opening for Python 2 an Python 3. | [
"Unified",
"file",
"opening",
"for",
"Python",
"2",
"an",
"Python",
"3",
"."
] | def xopen(fname, mode='r', encoding='utf-8-sig'):
'''Unified file opening for Python 2 an Python 3.
Python 2 does not have the encoding argument. Python 3 has one, and
the default 'utf-8-sig' is used (skips the BOM automatically).
'''
if sys.version_info[0] == 2:
return open(fname, mode=mode) # Python 2 without encoding
else:
return open(fname, mode=mode, encoding=encoding) | [
"def",
"xopen",
"(",
"fname",
",",
"mode",
"=",
"'r'",
",",
"encoding",
"=",
"'utf-8-sig'",
")",
":",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
":",
"return",
"open",
"(",
"fname",
",",
"mode",
"=",
"mode",
")",
"# Python 2 without ... | https://github.com/doxygen/doxygen/blob/c5d4b67565a5fadea5d84d28cfe86db605b4593f/doc/translator.py#L79-L89 | ||
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Arch/ArchWall.py | python | areSameWallTypes | (walls) | return True | Check if a list of walls have the same height, width and alignment.
Parameters
----------
walls: list of <ArchComponent.Component>
Returns
-------
bool
True if the walls have the same height, width and alignment, False if
otherwise. | Check if a list of walls have the same height, width and alignment. | [
"Check",
"if",
"a",
"list",
"of",
"walls",
"have",
"the",
"same",
"height",
"width",
"and",
"alignment",
"."
] | def areSameWallTypes(walls):
"""Check if a list of walls have the same height, width and alignment.
Parameters
----------
walls: list of <ArchComponent.Component>
Returns
-------
bool
True if the walls have the same height, width and alignment, False if
otherwise.
"""
for att in ["Width","Height","Align"]:
value = None
for w in walls:
if not hasattr(w,att):
return False
if not value:
value = getattr(w,att)
else:
if type(value) == float:
if round(value,Draft.precision()) != round(getattr(w,att),Draft.precision()):
return False
else:
if value != getattr(w,att):
return False
return True | [
"def",
"areSameWallTypes",
"(",
"walls",
")",
":",
"for",
"att",
"in",
"[",
"\"Width\"",
",",
"\"Height\"",
",",
"\"Align\"",
"]",
":",
"value",
"=",
"None",
"for",
"w",
"in",
"walls",
":",
"if",
"not",
"hasattr",
"(",
"w",
",",
"att",
")",
":",
"r... | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Arch/ArchWall.py#L225-L253 | |
TimoSaemann/caffe-segnet-cudnn5 | abcf30dca449245e101bf4ced519f716177f0885 | python/caffe/pycaffe.py | python | _Net_get_id_name | (func, field) | return get_id_name | Generic property that maps func to the layer names into an OrderedDict.
Used for top_names and bottom_names.
Parameters
----------
func: function id -> [id]
field: implementation field name (cache)
Returns
------
A one-parameter function that can be set as a property. | Generic property that maps func to the layer names into an OrderedDict. | [
"Generic",
"property",
"that",
"maps",
"func",
"to",
"the",
"layer",
"names",
"into",
"an",
"OrderedDict",
"."
] | def _Net_get_id_name(func, field):
"""
Generic property that maps func to the layer names into an OrderedDict.
Used for top_names and bottom_names.
Parameters
----------
func: function id -> [id]
field: implementation field name (cache)
Returns
------
A one-parameter function that can be set as a property.
"""
@property
def get_id_name(self):
if not hasattr(self, field):
id_to_name = list(self.blobs)
res = OrderedDict([(self._layer_names[i],
[id_to_name[j] for j in func(self, i)])
for i in range(len(self.layers))])
setattr(self, field, res)
return getattr(self, field)
return get_id_name | [
"def",
"_Net_get_id_name",
"(",
"func",
",",
"field",
")",
":",
"@",
"property",
"def",
"get_id_name",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"field",
")",
":",
"id_to_name",
"=",
"list",
"(",
"self",
".",
"blobs",
")",
"res... | https://github.com/TimoSaemann/caffe-segnet-cudnn5/blob/abcf30dca449245e101bf4ced519f716177f0885/python/caffe/pycaffe.py#L295-L319 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/fractions.py | python | Fraction.__abs__ | (a) | return Fraction(abs(a._numerator), a._denominator, _normalize=False) | abs(a) | abs(a) | [
"abs",
"(",
"a",
")"
] | def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator, _normalize=False) | [
"def",
"__abs__",
"(",
"a",
")",
":",
"return",
"Fraction",
"(",
"abs",
"(",
"a",
".",
"_numerator",
")",
",",
"a",
".",
"_denominator",
",",
"_normalize",
"=",
"False",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/fractions.py#L500-L502 | |
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py | python | get_input_alternatives | (input_ops) | return input_alternatives, features | Obtain all input alternatives using the input_fn output and heuristics. | Obtain all input alternatives using the input_fn output and heuristics. | [
"Obtain",
"all",
"input",
"alternatives",
"using",
"the",
"input_fn",
"output",
"and",
"heuristics",
"."
] | def get_input_alternatives(input_ops):
"""Obtain all input alternatives using the input_fn output and heuristics."""
input_alternatives = {}
if isinstance(input_ops, input_fn_utils.InputFnOps):
features, unused_labels, default_inputs = input_ops
input_alternatives[DEFAULT_INPUT_ALTERNATIVE_KEY] = default_inputs
else:
features, unused_labels = input_ops
if not features:
raise ValueError('Features must be defined.')
# TODO(b/34253951): reinstate the "features" input_signature.
# The "features" input_signature, as written, does not work with
# SparseTensors. It is simply commented out as a stopgap, pending discussion
# on the bug as to the correct solution.
# Add the "features" input_signature in any case.
# Note defensive copy because model_fns alter the features dict.
# input_alternatives[FEATURES_INPUT_ALTERNATIVE_KEY] = (
# copy.copy(features))
return input_alternatives, features | [
"def",
"get_input_alternatives",
"(",
"input_ops",
")",
":",
"input_alternatives",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"input_ops",
",",
"input_fn_utils",
".",
"InputFnOps",
")",
":",
"features",
",",
"unused_labels",
",",
"default_inputs",
"=",
"input_ops",
... | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py#L158-L180 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/optimize/slsqp.py | python | fmin_slsqp | (func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None,
bounds=(), fprime=None, fprime_eqcons=None,
fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6,
iprint=1, disp=None, full_output=0, epsilon=_epsilon,
callback=None) | Minimize a function using Sequential Least SQuares Programming
Python interface function for the SLSQP Optimization subroutine
originally implemented by Dieter Kraft.
Parameters
----------
func : callable f(x,*args)
Objective function. Must return a scalar.
x0 : 1-D ndarray of float
Initial guess for the independent variable(s).
eqcons : list, optional
A list of functions of length n such that
eqcons[j](x,*args) == 0.0 in a successfully optimized
problem.
f_eqcons : callable f(x,*args), optional
Returns a 1-D array in which each element must equal 0.0 in a
successfully optimized problem. If f_eqcons is specified,
eqcons is ignored.
ieqcons : list, optional
A list of functions of length n such that
ieqcons[j](x,*args) >= 0.0 in a successfully optimized
problem.
f_ieqcons : callable f(x,*args), optional
Returns a 1-D ndarray in which each element must be greater or
equal to 0.0 in a successfully optimized problem. If
f_ieqcons is specified, ieqcons is ignored.
bounds : list, optional
A list of tuples specifying the lower and upper bound
for each independent variable [(xl0, xu0),(xl1, xu1),...]
Infinite values will be interpreted as large floating values.
fprime : callable `f(x,*args)`, optional
A function that evaluates the partial derivatives of func.
fprime_eqcons : callable `f(x,*args)`, optional
A function of the form `f(x, *args)` that returns the m by n
array of equality constraint normals. If not provided,
the normals will be approximated. The array returned by
fprime_eqcons should be sized as ( len(eqcons), len(x0) ).
fprime_ieqcons : callable `f(x,*args)`, optional
A function of the form `f(x, *args)` that returns the m by n
array of inequality constraint normals. If not provided,
the normals will be approximated. The array returned by
fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ).
args : sequence, optional
Additional arguments passed to func and fprime.
iter : int, optional
The maximum number of iterations.
acc : float, optional
Requested accuracy.
iprint : int, optional
The verbosity of fmin_slsqp :
* iprint <= 0 : Silent operation
* iprint == 1 : Print summary upon completion (default)
* iprint >= 2 : Print status of each iterate and summary
disp : int, optional
Over-rides the iprint interface (preferred).
full_output : bool, optional
If False, return only the minimizer of func (default).
Otherwise, output final objective function and summary
information.
epsilon : float, optional
The step size for finite-difference derivative estimates.
callback : callable, optional
Called after each iteration, as ``callback(x)``, where ``x`` is the
current parameter vector.
Returns
-------
out : ndarray of float
The final minimizer of func.
fx : ndarray of float, if full_output is true
The final value of the objective function.
its : int, if full_output is true
The number of iterations.
imode : int, if full_output is true
The exit mode from the optimizer (see below).
smode : string, if full_output is true
Message describing the exit mode from the optimizer.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'SLSQP' `method` in particular.
Notes
-----
Exit modes are defined as follows ::
-1 : Gradient evaluation required (g & a)
0 : Optimization terminated successfully.
1 : Function evaluation required (f & c)
2 : More equality constraints than independent variables
3 : More than 3*n iterations in LSQ subproblem
4 : Inequality constraints incompatible
5 : Singular matrix E in LSQ subproblem
6 : Singular matrix C in LSQ subproblem
7 : Rank-deficient equality constraint subproblem HFTI
8 : Positive directional derivative for linesearch
9 : Iteration limit exceeded
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-sqlsp>`. | Minimize a function using Sequential Least SQuares Programming | [
"Minimize",
"a",
"function",
"using",
"Sequential",
"Least",
"SQuares",
"Programming"
] | def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None,
bounds=(), fprime=None, fprime_eqcons=None,
fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6,
iprint=1, disp=None, full_output=0, epsilon=_epsilon,
callback=None):
"""
Minimize a function using Sequential Least SQuares Programming
Python interface function for the SLSQP Optimization subroutine
originally implemented by Dieter Kraft.
Parameters
----------
func : callable f(x,*args)
Objective function. Must return a scalar.
x0 : 1-D ndarray of float
Initial guess for the independent variable(s).
eqcons : list, optional
A list of functions of length n such that
eqcons[j](x,*args) == 0.0 in a successfully optimized
problem.
f_eqcons : callable f(x,*args), optional
Returns a 1-D array in which each element must equal 0.0 in a
successfully optimized problem. If f_eqcons is specified,
eqcons is ignored.
ieqcons : list, optional
A list of functions of length n such that
ieqcons[j](x,*args) >= 0.0 in a successfully optimized
problem.
f_ieqcons : callable f(x,*args), optional
Returns a 1-D ndarray in which each element must be greater or
equal to 0.0 in a successfully optimized problem. If
f_ieqcons is specified, ieqcons is ignored.
bounds : list, optional
A list of tuples specifying the lower and upper bound
for each independent variable [(xl0, xu0),(xl1, xu1),...]
Infinite values will be interpreted as large floating values.
fprime : callable `f(x,*args)`, optional
A function that evaluates the partial derivatives of func.
fprime_eqcons : callable `f(x,*args)`, optional
A function of the form `f(x, *args)` that returns the m by n
array of equality constraint normals. If not provided,
the normals will be approximated. The array returned by
fprime_eqcons should be sized as ( len(eqcons), len(x0) ).
fprime_ieqcons : callable `f(x,*args)`, optional
A function of the form `f(x, *args)` that returns the m by n
array of inequality constraint normals. If not provided,
the normals will be approximated. The array returned by
fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ).
args : sequence, optional
Additional arguments passed to func and fprime.
iter : int, optional
The maximum number of iterations.
acc : float, optional
Requested accuracy.
iprint : int, optional
The verbosity of fmin_slsqp :
* iprint <= 0 : Silent operation
* iprint == 1 : Print summary upon completion (default)
* iprint >= 2 : Print status of each iterate and summary
disp : int, optional
Over-rides the iprint interface (preferred).
full_output : bool, optional
If False, return only the minimizer of func (default).
Otherwise, output final objective function and summary
information.
epsilon : float, optional
The step size for finite-difference derivative estimates.
callback : callable, optional
Called after each iteration, as ``callback(x)``, where ``x`` is the
current parameter vector.
Returns
-------
out : ndarray of float
The final minimizer of func.
fx : ndarray of float, if full_output is true
The final value of the objective function.
its : int, if full_output is true
The number of iterations.
imode : int, if full_output is true
The exit mode from the optimizer (see below).
smode : string, if full_output is true
Message describing the exit mode from the optimizer.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'SLSQP' `method` in particular.
Notes
-----
Exit modes are defined as follows ::
-1 : Gradient evaluation required (g & a)
0 : Optimization terminated successfully.
1 : Function evaluation required (f & c)
2 : More equality constraints than independent variables
3 : More than 3*n iterations in LSQ subproblem
4 : Inequality constraints incompatible
5 : Singular matrix E in LSQ subproblem
6 : Singular matrix C in LSQ subproblem
7 : Rank-deficient equality constraint subproblem HFTI
8 : Positive directional derivative for linesearch
9 : Iteration limit exceeded
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-sqlsp>`.
"""
if disp is not None:
iprint = disp
opts = {'maxiter': iter,
'ftol': acc,
'iprint': iprint,
'disp': iprint != 0,
'eps': epsilon,
'callback': callback}
# Build the constraints as a tuple of dictionaries
cons = ()
# 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take
# the same extra arguments as the objective function.
cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons)
cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons)
# 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian
# (fprime_eqcons, fprime_ieqcons); also take the same extra arguments
# as the objective function.
if f_eqcons:
cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons,
'args': args}, )
if f_ieqcons:
cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons,
'args': args}, )
res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds,
constraints=cons, **opts)
if full_output:
return res['x'], res['fun'], res['nit'], res['status'], res['message']
else:
return res['x'] | [
"def",
"fmin_slsqp",
"(",
"func",
",",
"x0",
",",
"eqcons",
"=",
"(",
")",
",",
"f_eqcons",
"=",
"None",
",",
"ieqcons",
"=",
"(",
")",
",",
"f_ieqcons",
"=",
"None",
",",
"bounds",
"=",
"(",
")",
",",
"fprime",
"=",
"None",
",",
"fprime_eqcons",
... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/optimize/slsqp.py#L69-L212 | ||
cornell-zhang/heterocl | 6d9e4b4acc2ee2707b2d25b27298c0335bccedfd | python/heterocl/compute_api.py | python | mutate | (domain, fcompute, name=None) | Perform a computation repeatedly in the given mutation domain.
This API allows users to write a loop in a tensorized way, which makes it
easier to exploit the parallelism when performing optimizations. The rules
for the computation function are the same as that of :obj:`compute`.
Parameters
----------
domain : tuple
The mutation domain
fcompute : callable
The computation function that will be performed repeatedly
name : str, optional
The name of the operation
Returns
-------
None
Examples
--------
.. code-block:: python
# this example finds the max two numbers in A and stores it in M
A = hcl.placeholder((10,))
M = hcl.placeholder((2,))
def loop_body(x):
with hcl.if_(A[x] > M[0]):
with hcl.if_(A[x] > M[1]):
M[0] = M[1]
M[1] = A[x]
with hcl.else_():
M[0] = A[x]
hcl.mutate(A.shape, lambda x: loop_body(x)) | Perform a computation repeatedly in the given mutation domain. | [
"Perform",
"a",
"computation",
"repeatedly",
"in",
"the",
"given",
"mutation",
"domain",
"."
] | def mutate(domain, fcompute, name=None):
"""
Perform a computation repeatedly in the given mutation domain.
This API allows users to write a loop in a tensorized way, which makes it
easier to exploit the parallelism when performing optimizations. The rules
for the computation function are the same as that of :obj:`compute`.
Parameters
----------
domain : tuple
The mutation domain
fcompute : callable
The computation function that will be performed repeatedly
name : str, optional
The name of the operation
Returns
-------
None
Examples
--------
.. code-block:: python
# this example finds the max two numbers in A and stores it in M
A = hcl.placeholder((10,))
M = hcl.placeholder((2,))
def loop_body(x):
with hcl.if_(A[x] > M[0]):
with hcl.if_(A[x] > M[1]):
M[0] = M[1]
M[1] = A[x]
with hcl.else_():
M[0] = A[x]
hcl.mutate(A.shape, lambda x: loop_body(x))
"""
# check API correctness
if not isinstance(domain, tuple):
raise APIError("The mutation domain must be a tuple")
name = get_name("mutate", name)
# prepare the iteration variables
args, nargs = process_fcompute(fcompute, domain)
indices = [_IterVar((0, domain[n]), args[n], 0) for n in range(0, nargs)]
var_list = [i.var for i in indices]
# perform the computation
with Stage(name) as stage:
stage.stmt_stack.append([])
fcompute(*var_list)
body = stage.pop_stmt()
stage.emit(make_for(indices, body, 0, name))
stage.axis_list = indices + stage.axis_list | [
"def",
"mutate",
"(",
"domain",
",",
"fcompute",
",",
"name",
"=",
"None",
")",
":",
"# check API correctness",
"if",
"not",
"isinstance",
"(",
"domain",
",",
"tuple",
")",
":",
"raise",
"APIError",
"(",
"\"The mutation domain must be a tuple\"",
")",
"name",
... | https://github.com/cornell-zhang/heterocl/blob/6d9e4b4acc2ee2707b2d25b27298c0335bccedfd/python/heterocl/compute_api.py#L332-L389 | ||
christinaa/LLVM-VideoCore4 | 7773c3c9e5d22b785d4b96ed0acea37c8aa9c183 | bindings/python/llvm/object.py | python | ObjectFile.get_sections | (self, cache=False) | Obtain the sections in this object file.
This is a generator for llvm.object.Section instances.
Sections are exposed as limited-use objects. See the module's
documentation on iterators for more. | Obtain the sections in this object file. | [
"Obtain",
"the",
"sections",
"in",
"this",
"object",
"file",
"."
] | def get_sections(self, cache=False):
"""Obtain the sections in this object file.
This is a generator for llvm.object.Section instances.
Sections are exposed as limited-use objects. See the module's
documentation on iterators for more.
"""
sections = lib.LLVMGetSections(self)
last = None
while True:
if lib.LLVMIsSectionIteratorAtEnd(self, sections):
break
last = Section(sections)
if cache:
last.cache()
yield last
lib.LLVMMoveToNextSection(sections)
last.expire()
if last is not None:
last.expire()
lib.LLVMDisposeSectionIterator(sections) | [
"def",
"get_sections",
"(",
"self",
",",
"cache",
"=",
"False",
")",
":",
"sections",
"=",
"lib",
".",
"LLVMGetSections",
"(",
"self",
")",
"last",
"=",
"None",
"while",
"True",
":",
"if",
"lib",
".",
"LLVMIsSectionIteratorAtEnd",
"(",
"self",
",",
"sect... | https://github.com/christinaa/LLVM-VideoCore4/blob/7773c3c9e5d22b785d4b96ed0acea37c8aa9c183/bindings/python/llvm/object.py#L123-L149 | ||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/engine/training_utils.py | python | standardize_weights | (y,
sample_weight=None,
class_weight=None,
sample_weight_mode=None) | return None | Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array. If both `sample_weight` and `class_weight` are provided,
the weights are multiplied.
Arguments:
y: Numpy array of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`. `"temporal"` indicated
that we expect 2D weight data that will be applied to the last 2
dimensions of the targets (i.e. we are weighting timesteps, not
samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments. | Performs sample weight validation and standardization. | [
"Performs",
"sample",
"weight",
"validation",
"and",
"standardization",
"."
] | def standardize_weights(y,
sample_weight=None,
class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array. If both `sample_weight` and `class_weight` are provided,
the weights are multiplied.
Arguments:
y: Numpy array of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`. `"temporal"` indicated
that we expect 2D weight data that will be applied to the last 2
dimensions of the targets (i.e. we are weighting timesteps, not
samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
# Iterator may return sample_weight as 1-tuple
if isinstance(sample_weight, tuple):
sample_weight = sample_weight[0]
if sample_weight_mode is not None and sample_weight_mode != 'samplewise':
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' + str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify '
'sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' +
str(sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if (not tensor_util.is_tensor(sample_weight) and
y.shape[:sample_weight.ndim] != sample_weight.shape):
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + ' for an input with shape ' +
str(y.shape) + '. '
'sample_weight cannot be broadcast.')
# Class weights applied per-sample.
class_sample_weight = None
if isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('`class_weight` not supported for '
'3+ dimensional targets.')
if len(y.shape) == 2:
if y.shape[1] > 1:
y_classes = np.argmax(y, axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
class_sample_weight = np.asarray(
[class_weight[cls] for cls in y_classes if cls in class_weight])
if len(class_sample_weight) != len(y_classes):
# subtract the sets to pick all missing classes
existing_classes = set(y_classes)
existing_class_weight = set(class_weight.keys())
raise ValueError(
'`class_weight` must contain all classes in the data.'
' The classes %s exist in the data but not in '
'`class_weight`.' % (existing_classes - existing_class_weight))
if class_sample_weight is not None and sample_weight is not None:
# Multiply weights if both are provided.
return class_sample_weight * sample_weight
if sample_weight is not None:
return sample_weight
if class_sample_weight is not None:
return class_sample_weight
return None | [
"def",
"standardize_weights",
"(",
"y",
",",
"sample_weight",
"=",
"None",
",",
"class_weight",
"=",
"None",
",",
"sample_weight_mode",
"=",
"None",
")",
":",
"# Iterator may return sample_weight as 1-tuple",
"if",
"isinstance",
"(",
"sample_weight",
",",
"tuple",
"... | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/engine/training_utils.py#L849-L953 | |
0ad/0ad | f58db82e0e925016d83f4e3fa7ca599e3866e2af | source/tools/fontbuilder2/Packer.py | python | CygonRectanglePacker.tryFindBestPlacement | (self, rectangleWidth, rectangleHeight) | Finds the best position for a rectangle of the given dimensions
rectangleWidth: Width of the rectangle to find a position for
rectangleHeight: Height of the rectangle to find a position for
Returns a Point instance if a valid placement for the rectangle could
be found, otherwise returns None | Finds the best position for a rectangle of the given dimensions
rectangleWidth: Width of the rectangle to find a position for
rectangleHeight: Height of the rectangle to find a position for
Returns a Point instance if a valid placement for the rectangle could
be found, otherwise returns None | [
"Finds",
"the",
"best",
"position",
"for",
"a",
"rectangle",
"of",
"the",
"given",
"dimensions",
"rectangleWidth",
":",
"Width",
"of",
"the",
"rectangle",
"to",
"find",
"a",
"position",
"for",
"rectangleHeight",
":",
"Height",
"of",
"the",
"rectangle",
"to",
... | def tryFindBestPlacement(self, rectangleWidth, rectangleHeight):
"""Finds the best position for a rectangle of the given dimensions
rectangleWidth: Width of the rectangle to find a position for
rectangleHeight: Height of the rectangle to find a position for
Returns a Point instance if a valid placement for the rectangle could
be found, otherwise returns None"""
# Slice index, vertical position and score of the best placement we
# could find
bestSliceIndex = -1 # Slice index where the best placement was found
bestSliceY = 0 # Y position of the best placement found
# lower == better!
bestScore = self.packingAreaHeight
# This is the counter for the currently checked position. The search
# works by skipping from slice to slice, determining the suitability
# of the location for the placement of the rectangle.
leftSliceIndex = 0
# Determine the slice in which the right end of the rectangle is located
rightSliceIndex = bisect_left(self.heightSlices, Point(rectangleWidth, 0))
while rightSliceIndex <= len(self.heightSlices):
# Determine the highest slice within the slices covered by the
# rectangle at its current placement. We cannot put the rectangle
# any lower than this without overlapping the other rectangles.
highest = self.heightSlices[leftSliceIndex].y
for index in range(leftSliceIndex + 1, rightSliceIndex):
if self.heightSlices[index].y > highest:
highest = self.heightSlices[index].y
# Only process this position if it doesn't leave the packing area
if highest + rectangleHeight < self.packingAreaHeight:
score = highest
if score < bestScore:
bestSliceIndex = leftSliceIndex
bestSliceY = highest
bestScore = score
# Advance the starting slice to the next slice start
leftSliceIndex += 1
if leftSliceIndex >= len(self.heightSlices):
break
# Advance the ending slice until we're on the proper slice again,
# given the new starting position of the rectangle.
rightRectangleEnd = self.heightSlices[leftSliceIndex].x + rectangleWidth
while rightSliceIndex <= len(self.heightSlices):
if rightSliceIndex == len(self.heightSlices):
rightSliceStart = self.packingAreaWidth
else:
rightSliceStart = self.heightSlices[rightSliceIndex].x
# Is this the slice we're looking for?
if rightSliceStart > rightRectangleEnd:
break
rightSliceIndex += 1
# If we crossed the end of the slice array, the rectangle's right
# end has left the packing area, and thus, our search ends.
if rightSliceIndex > len(self.heightSlices):
break
# Return the best placement we found for this rectangle. If the
# rectangle didn't fit anywhere, the slice index will still have its
# initialization value of -1 and we can report that no placement
# could be found.
if bestSliceIndex == -1:
return None
else:
return Point(self.heightSlices[bestSliceIndex].x, bestSliceY) | [
"def",
"tryFindBestPlacement",
"(",
"self",
",",
"rectangleWidth",
",",
"rectangleHeight",
")",
":",
"# Slice index, vertical position and score of the best placement we",
"# could find",
"bestSliceIndex",
"=",
"-",
"1",
"# Slice index where the best placement was found",
"bestSlic... | https://github.com/0ad/0ad/blob/f58db82e0e925016d83f4e3fa7ca599e3866e2af/source/tools/fontbuilder2/Packer.py#L151-L224 | ||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | scripts/reduction_gui/reduction/diffraction/diffraction_filter_setup_script.py | python | FilterSetupScript.set_default_pars | (self, inst_name) | return | Default parameters | Default parameters | [
"Default",
"parameters"
] | def set_default_pars(self, inst_name):
""" Default parameters
"""
return | [
"def",
"set_default_pars",
"(",
"self",
",",
"inst_name",
")",
":",
"return"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/reduction_gui/reduction/diffraction/diffraction_filter_setup_script.py#L83-L87 | |
apple/swift-lldb | d74be846ef3e62de946df343e8c234bde93a8912 | scripts/Python/static-binding/lldb.py | python | SBCommandReturnObject.GetError | (self, *args) | return _lldb.SBCommandReturnObject_GetError(self, *args) | GetError(SBCommandReturnObject self) -> char const
GetError(SBCommandReturnObject self, bool if_no_immediate) -> char const * | GetError(SBCommandReturnObject self) -> char const
GetError(SBCommandReturnObject self, bool if_no_immediate) -> char const * | [
"GetError",
"(",
"SBCommandReturnObject",
"self",
")",
"-",
">",
"char",
"const",
"GetError",
"(",
"SBCommandReturnObject",
"self",
"bool",
"if_no_immediate",
")",
"-",
">",
"char",
"const",
"*"
] | def GetError(self, *args):
"""
GetError(SBCommandReturnObject self) -> char const
GetError(SBCommandReturnObject self, bool if_no_immediate) -> char const *
"""
return _lldb.SBCommandReturnObject_GetError(self, *args) | [
"def",
"GetError",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_lldb",
".",
"SBCommandReturnObject_GetError",
"(",
"self",
",",
"*",
"args",
")"
] | https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L2878-L2883 | |
apiaryio/drafter | 4634ebd07f6c6f257cc656598ccd535492fdfb55 | tools/gyp/pylib/gyp/mac_tool.py | python | MacTool.ExecFilterLibtool | (self, *cmd_list) | return libtoolout.returncode | Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'. | Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'. | [
"Calls",
"libtool",
"and",
"filters",
"out",
"/",
"path",
"/",
"to",
"/",
"libtool",
":",
"file",
":",
"foo",
".",
"o",
"has",
"no",
"symbols",
"."
] | def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: (?:for architecture: \S* )?'
r'file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode | [
"def",
"ExecFilterLibtool",
"(",
"self",
",",
"*",
"cmd_list",
")",
":",
"libtool_re",
"=",
"re",
".",
"compile",
"(",
"r'^.*libtool: (?:for architecture: \\S* )?'",
"r'file: .* has no symbols$'",
")",
"libtool_re5",
"=",
"re",
".",
"compile",
"(",
"r'^.*libtool: warn... | https://github.com/apiaryio/drafter/blob/4634ebd07f6c6f257cc656598ccd535492fdfb55/tools/gyp/pylib/gyp/mac_tool.py#L246-L273 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/linalg_grad.py | python | _RightShift | (x) | return array_ops.pad(x[..., :-1, :], pad) | Shifts next-to-last dimension to the right, adding zero on the left. | Shifts next-to-last dimension to the right, adding zero on the left. | [
"Shifts",
"next",
"-",
"to",
"-",
"last",
"dimension",
"to",
"the",
"right",
"adding",
"zero",
"on",
"the",
"left",
"."
] | def _RightShift(x):
"""Shifts next-to-last dimension to the right, adding zero on the left."""
rank = array_ops.rank(x)
zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)
pad = array_ops.concat([zeros, array_ops.constant([[1, 0], [0, 0]])], axis=0)
return array_ops.pad(x[..., :-1, :], pad) | [
"def",
"_RightShift",
"(",
"x",
")",
":",
"rank",
"=",
"array_ops",
".",
"rank",
"(",
"x",
")",
"zeros",
"=",
"array_ops",
".",
"zeros",
"(",
"(",
"rank",
"-",
"2",
",",
"2",
")",
",",
"dtype",
"=",
"dtypes",
".",
"int32",
")",
"pad",
"=",
"arr... | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/linalg_grad.py#L479-L484 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/ipython/py3/IPython/core/compilerop.py | python | CachingCompiler.cache | (self, transformed_code, number=0, raw_code=None) | return name | Make a name for a block of code, and cache the code.
Parameters
----------
transformed_code : str
The executable Python source code to cache and compile.
number : int
A number which forms part of the code's name. Used for the execution
counter.
raw_code : str
The raw code before transformation, if None, set to `transformed_code`.
Returns
-------
The name of the cached code (as a string). Pass this as the filename
argument to compilation, so that tracebacks are correctly hooked up. | Make a name for a block of code, and cache the code. | [
"Make",
"a",
"name",
"for",
"a",
"block",
"of",
"code",
"and",
"cache",
"the",
"code",
"."
] | def cache(self, transformed_code, number=0, raw_code=None):
"""Make a name for a block of code, and cache the code.
Parameters
----------
transformed_code : str
The executable Python source code to cache and compile.
number : int
A number which forms part of the code's name. Used for the execution
counter.
raw_code : str
The raw code before transformation, if None, set to `transformed_code`.
Returns
-------
The name of the cached code (as a string). Pass this as the filename
argument to compilation, so that tracebacks are correctly hooked up.
"""
if raw_code is None:
raw_code = transformed_code
name = self.get_code_name(raw_code, transformed_code, number)
entry = (
len(transformed_code),
time.time(),
[line + "\n" for line in transformed_code.splitlines()],
name,
)
linecache.cache[name] = entry
linecache._ipython_cache[name] = entry
return name | [
"def",
"cache",
"(",
"self",
",",
"transformed_code",
",",
"number",
"=",
"0",
",",
"raw_code",
"=",
"None",
")",
":",
"if",
"raw_code",
"is",
"None",
":",
"raw_code",
"=",
"transformed_code",
"name",
"=",
"self",
".",
"get_code_name",
"(",
"raw_code",
"... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py3/IPython/core/compilerop.py#L134-L164 | |
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py | python | _report_compression_stats | (bucket, state) | Report compression stats at the frequency of `compression_stats_logging_frequency` specified in PowerSGD state. | Report compression stats at the frequency of `compression_stats_logging_frequency` specified in PowerSGD state. | [
"Report",
"compression",
"stats",
"at",
"the",
"frequency",
"of",
"compression_stats_logging_frequency",
"specified",
"in",
"PowerSGD",
"state",
"."
] | def _report_compression_stats(bucket, state):
"""
Report compression stats at the frequency of `compression_stats_logging_frequency` specified in PowerSGD state.
"""
if (
bucket.is_last()
and state.iter >= state.next_stats_report
):
stats = state.compression_stats()
logging.info(
"Compression stats: iter {}, total before compression {}, total after compression {}, "
"rate {}".format(state.iter, stats[1], stats[2], stats[0])
)
state.next_stats_report = state.iter + state.compression_stats_logging_frequency | [
"def",
"_report_compression_stats",
"(",
"bucket",
",",
"state",
")",
":",
"if",
"(",
"bucket",
".",
"is_last",
"(",
")",
"and",
"state",
".",
"iter",
">=",
"state",
".",
"next_stats_report",
")",
":",
"stats",
"=",
"state",
".",
"compression_stats",
"(",
... | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py#L89-L102 | ||
metashell/metashell | f4177e4854ea00c8dbc722cadab26ef413d798ea | 3rd/templight/clang/tools/scan-build-py/libscanbuild/report.py | python | bug_summary | (output_dir, bug_counter) | return name | Bug summary is a HTML table to give a better overview of the bugs. | Bug summary is a HTML table to give a better overview of the bugs. | [
"Bug",
"summary",
"is",
"a",
"HTML",
"table",
"to",
"give",
"a",
"better",
"overview",
"of",
"the",
"bugs",
"."
] | def bug_summary(output_dir, bug_counter):
""" Bug summary is a HTML table to give a better overview of the bugs. """
name = os.path.join(output_dir, 'summary.html.fragment')
with open(name, 'w') as handle:
indent = 4
handle.write(reindent("""
|<h2>Bug Summary</h2>
|<table>
| <thead>
| <tr>
| <td>Bug Type</td>
| <td>Quantity</td>
| <td class="sorttable_nosort">Display?</td>
| </tr>
| </thead>
| <tbody>""", indent))
handle.write(reindent("""
| <tr style="font-weight:bold">
| <td class="SUMM_DESC">All Bugs</td>
| <td class="Q">{0}</td>
| <td>
| <center>
| <input checked type="checkbox" id="AllBugsCheck"
| onClick="CopyCheckedStateToCheckButtons(this);"/>
| </center>
| </td>
| </tr>""", indent).format(bug_counter.total))
for category, types in bug_counter.categories.items():
handle.write(reindent("""
| <tr>
| <th>{0}</th><th colspan=2></th>
| </tr>""", indent).format(category))
for bug_type in types.values():
handle.write(reindent("""
| <tr>
| <td class="SUMM_DESC">{bug_type}</td>
| <td class="Q">{bug_count}</td>
| <td>
| <center>
| <input checked type="checkbox"
| onClick="ToggleDisplay(this,'{bug_type_class}');"/>
| </center>
| </td>
| </tr>""", indent).format(**bug_type))
handle.write(reindent("""
| </tbody>
|</table>""", indent))
handle.write(comment('SUMMARYBUGEND'))
return name | [
"def",
"bug_summary",
"(",
"output_dir",
",",
"bug_counter",
")",
":",
"name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'summary.html.fragment'",
")",
"with",
"open",
"(",
"name",
",",
"'w'",
")",
"as",
"handle",
":",
"indent",
"=",
... | https://github.com/metashell/metashell/blob/f4177e4854ea00c8dbc722cadab26ef413d798ea/3rd/templight/clang/tools/scan-build-py/libscanbuild/report.py#L110-L159 | |
danxuhk/ContinuousCRF-CNN | 2b6dcaf179620f118b225ed12c890414ca828e21 | scripts/cpp_lint.py | python | CheckCheck | (filename, clean_lines, linenum, error) | Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Checks the use of CHECK and EXPECT macros. | [
"Checks",
"the",
"use",
"of",
"CHECK",
"and",
"EXPECT",
"macros",
"."
] | def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
check_macro = None
start_pos = -1
for macro in _CHECK_MACROS:
i = lines[linenum].find(macro)
if i >= 0:
check_macro = macro
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
if not matched:
continue
start_pos = len(matched.group(1))
break
if not check_macro or start_pos < 0:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator)) | [
"def",
"CheckCheck",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"# Decide the set of replacement macros that should be suggested",
"lines",
"=",
"clean_lines",
".",
"elided",
"check_macro",
"=",
"None",
"start_pos",
"=",
"-",
"1",
"... | https://github.com/danxuhk/ContinuousCRF-CNN/blob/2b6dcaf179620f118b225ed12c890414ca828e21/scripts/cpp_lint.py#L3282-L3406 | ||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py | python | VARMA.get_state_transition | (self) | return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0) | Construct state transition matrix from VARMA parameters.
Returns:
the state transition matrix. It has shape
[self.state_dimendion, self.state_dimension]. | Construct state transition matrix from VARMA parameters. | [
"Construct",
"state",
"transition",
"matrix",
"from",
"VARMA",
"parameters",
"."
] | def get_state_transition(self):
"""Construct state transition matrix from VARMA parameters.
Returns:
the state transition matrix. It has shape
[self.state_dimendion, self.state_dimension].
"""
# Pad any unused AR blocks with zeros. The extra state is necessary if
# ma_order >= ar_order.
ar_coefs_padded = array_ops.reshape(
array_ops.pad(self.ar_coefs,
[[0, 0], [0, 0],
[0, self.state_num_blocks - self.ar_order]]),
[self.num_features, self.state_dimension])
shift_matrix = array_ops.pad(
linalg_ops.eye(
(self.state_num_blocks - 1) * self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features]])
return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0) | [
"def",
"get_state_transition",
"(",
"self",
")",
":",
"# Pad any unused AR blocks with zeros. The extra state is necessary if",
"# ma_order >= ar_order.",
"ar_coefs_padded",
"=",
"array_ops",
".",
"reshape",
"(",
"array_ops",
".",
"pad",
"(",
"self",
".",
"ar_coefs",
",",
... | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py#L105-L123 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_core.py | python | ThreadEvent.SetString | (*args, **kwargs) | return _core_.ThreadEvent_SetString(*args, **kwargs) | SetString(self, String string) | SetString(self, String string) | [
"SetString",
"(",
"self",
"String",
"string",
")"
] | def SetString(*args, **kwargs):
"""SetString(self, String string)"""
return _core_.ThreadEvent_SetString(*args, **kwargs) | [
"def",
"SetString",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"ThreadEvent_SetString",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L5402-L5404 | |
rodeofx/OpenWalter | 6116fbe3f04f1146c854afbfbdbe944feaee647e | walter/maya/scripts/walterPanel/walterOutliner.py | python | TreeItem.getName | (self) | return "Unknown" | The label of the item. | The label of the item. | [
"The",
"label",
"of",
"the",
"item",
"."
] | def getName(self):
"""The label of the item."""
if self.getType() == self.TYPE_ORIGIN:
return self.originObject
elif self.getType() == self.TYPE_ALEMBIC:
if self.alembicObject == "/":
# It's a root node
return "Walter Root"
return self.alembicObject.split('/')[-1]
elif self.getType() == self.TYPE_EXPRESSION:
return self.expression
elif self.getType() == self.TYPE_EXPGROUP:
return self.expressionGroup
return "Unknown" | [
"def",
"getName",
"(",
"self",
")",
":",
"if",
"self",
".",
"getType",
"(",
")",
"==",
"self",
".",
"TYPE_ORIGIN",
":",
"return",
"self",
".",
"originObject",
"elif",
"self",
".",
"getType",
"(",
")",
"==",
"self",
".",
"TYPE_ALEMBIC",
":",
"if",
"se... | https://github.com/rodeofx/OpenWalter/blob/6116fbe3f04f1146c854afbfbdbe944feaee647e/walter/maya/scripts/walterPanel/walterOutliner.py#L1787-L1802 | |
SpaceNetChallenge/BuildingDetectors | 3def3c44b5847c744cd2f3356182892d92496579 | qinhaifang/src/lib/datasets/pascal_voc_det.py | python | PascalVOCDet._load_pascal_annotations | (self, index) | return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False} | Load image and bounding boxes info from XML file
in the PASCAL VOC format according to image index | Load image and bounding boxes info from XML file
in the PASCAL VOC format according to image index | [
"Load",
"image",
"and",
"bounding",
"boxes",
"info",
"from",
"XML",
"file",
"in",
"the",
"PASCAL",
"VOC",
"format",
"according",
"to",
"image",
"index"
] | def _load_pascal_annotations(self, index):
"""
Load image and bounding boxes info from XML file
in the PASCAL VOC format according to image index
"""
image_name = self._image_index[index]
filename = os.path.join(self._data_path, 'Annotations', image_name + '.xml')
tree = xmlET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
if len(non_diff_objs) != len(objs):
print 'Removed {} difficult objects'.format(len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros(num_objs, dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
# boxes[ind, :] will be boxes
# gt_classes[ind] will be the associated class name for this box
# overlaps[ind, class] will assign 1.0 to ground truth
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False} | [
"def",
"_load_pascal_annotations",
"(",
"self",
",",
"index",
")",
":",
"image_name",
"=",
"self",
".",
"_image_index",
"[",
"index",
"]",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_data_path",
",",
"'Annotations'",
",",
"image_na... | https://github.com/SpaceNetChallenge/BuildingDetectors/blob/3def3c44b5847c744cd2f3356182892d92496579/qinhaifang/src/lib/datasets/pascal_voc_det.py#L151-L193 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/customtreectrl.py | python | CustomTreeCtrl.SetFont | (self, font) | return True | Sets the :class:`CustomTreeCtrl` font.
:param `font`: a valid :class:`Font` instance.
:note: Overridden from :class:`PyScrolledWindow`. | Sets the :class:`CustomTreeCtrl` font. | [
"Sets",
"the",
":",
"class",
":",
"CustomTreeCtrl",
"font",
"."
] | def SetFont(self, font):
"""
Sets the :class:`CustomTreeCtrl` font.
:param `font`: a valid :class:`Font` instance.
:note: Overridden from :class:`PyScrolledWindow`.
"""
wx.PyScrolledWindow.SetFont(self, font)
self._normalFont = font
family = self._normalFont.GetFamily()
if family == wx.FONTFAMILY_UNKNOWN:
family = wx.FONTFAMILY_SWISS
self._boldFont = wx.Font(self._normalFont.GetPointSize(), family,
self._normalFont.GetStyle(), wx.BOLD, self._normalFont.GetUnderlined(),
self._normalFont.GetFaceName(), self._normalFont.GetEncoding())
self._italicFont = wx.Font(self._normalFont.GetPointSize(), family,
wx.FONTSTYLE_ITALIC, wx.NORMAL, self._normalFont.GetUnderlined(),
self._normalFont.GetFaceName(), self._normalFont.GetEncoding())
self.CalculatePositions()
self.Refresh()
self.AdjustMyScrollbars()
return True | [
"def",
"SetFont",
"(",
"self",
",",
"font",
")",
":",
"wx",
".",
"PyScrolledWindow",
".",
"SetFont",
"(",
"self",
",",
"font",
")",
"self",
".",
"_normalFont",
"=",
"font",
"family",
"=",
"self",
".",
"_normalFont",
".",
"GetFamily",
"(",
")",
"if",
... | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/customtreectrl.py#L3842-L3871 | |
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | Framework/PythonInterface/plugins/algorithms/FindGoniometerFromUB.py | python | getSignMaxAbsValInCol | (mat) | return np.sign(mat) * (abs(mat) == abs(mat).max(axis=0)) | Used to find most likely permutation of axes to provide consistency with reference UB.
:param mat: a 2D array
:return out: sign of largest element in each column of abs(matrix) | Used to find most likely permutation of axes to provide consistency with reference UB.
:param mat: a 2D array
:return out: sign of largest element in each column of abs(matrix) | [
"Used",
"to",
"find",
"most",
"likely",
"permutation",
"of",
"axes",
"to",
"provide",
"consistency",
"with",
"reference",
"UB",
".",
":",
"param",
"mat",
":",
"a",
"2D",
"array",
":",
"return",
"out",
":",
"sign",
"of",
"largest",
"element",
"in",
"each"... | def getSignMaxAbsValInCol(mat):
"""
Used to find most likely permutation of axes to provide consistency with reference UB.
:param mat: a 2D array
:return out: sign of largest element in each column of abs(matrix)
"""
return np.sign(mat) * (abs(mat) == abs(mat).max(axis=0)) | [
"def",
"getSignMaxAbsValInCol",
"(",
"mat",
")",
":",
"return",
"np",
".",
"sign",
"(",
"mat",
")",
"*",
"(",
"abs",
"(",
"mat",
")",
"==",
"abs",
"(",
"mat",
")",
".",
"max",
"(",
"axis",
"=",
"0",
")",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/FindGoniometerFromUB.py#L18-L24 | |
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/chigger/utils/Options.py | python | Option.get | (self) | return self.__value | Get the value of this option. | Get the value of this option. | [
"Get",
"the",
"value",
"of",
"this",
"option",
"."
] | def get(self):
"""
Get the value of this option.
"""
return self.__value | [
"def",
"get",
"(",
"self",
")",
":",
"return",
"self",
".",
"__value"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/chigger/utils/Options.py#L90-L94 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.