repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py | Subvariant.implicit_includes | def implicit_includes (self, feature, target_type):
""" Returns the properties which specify implicit include paths to
generated headers. This traverses all targets in this subvariant,
and subvariants referred by <implcit-dependecy>properties.
For all targets which are of type 'target-type' (or for all targets,
if 'target_type' is not specified), the result will contain
<$(feature)>path-to-that-target.
"""
assert isinstance(feature, basestring)
assert isinstance(target_type, basestring)
if not target_type:
key = feature
else:
key = feature + "-" + target_type
result = self.implicit_includes_cache_.get(key)
if not result:
target_paths = self.all_target_directories(target_type)
target_paths = unique(target_paths)
result = ["<%s>%s" % (feature, p) for p in target_paths]
self.implicit_includes_cache_[key] = result
return result | python | def implicit_includes (self, feature, target_type):
""" Returns the properties which specify implicit include paths to
generated headers. This traverses all targets in this subvariant,
and subvariants referred by <implcit-dependecy>properties.
For all targets which are of type 'target-type' (or for all targets,
if 'target_type' is not specified), the result will contain
<$(feature)>path-to-that-target.
"""
assert isinstance(feature, basestring)
assert isinstance(target_type, basestring)
if not target_type:
key = feature
else:
key = feature + "-" + target_type
result = self.implicit_includes_cache_.get(key)
if not result:
target_paths = self.all_target_directories(target_type)
target_paths = unique(target_paths)
result = ["<%s>%s" % (feature, p) for p in target_paths]
self.implicit_includes_cache_[key] = result
return result | [
"def",
"implicit_includes",
"(",
"self",
",",
"feature",
",",
"target_type",
")",
":",
"assert",
"isinstance",
"(",
"feature",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"target_type",
",",
"basestring",
")",
"if",
"not",
"target_type",
":",
"key",
"=",
"feature",
"else",
":",
"key",
"=",
"feature",
"+",
"\"-\"",
"+",
"target_type",
"result",
"=",
"self",
".",
"implicit_includes_cache_",
".",
"get",
"(",
"key",
")",
"if",
"not",
"result",
":",
"target_paths",
"=",
"self",
".",
"all_target_directories",
"(",
"target_type",
")",
"target_paths",
"=",
"unique",
"(",
"target_paths",
")",
"result",
"=",
"[",
"\"<%s>%s\"",
"%",
"(",
"feature",
",",
"p",
")",
"for",
"p",
"in",
"target_paths",
"]",
"self",
".",
"implicit_includes_cache_",
"[",
"key",
"]",
"=",
"result",
"return",
"result"
] | Returns the properties which specify implicit include paths to
generated headers. This traverses all targets in this subvariant,
and subvariants referred by <implcit-dependecy>properties.
For all targets which are of type 'target-type' (or for all targets,
if 'target_type' is not specified), the result will contain
<$(feature)>path-to-that-target. | [
"Returns",
"the",
"properties",
"which",
"specify",
"implicit",
"include",
"paths",
"to",
"generated",
"headers",
".",
"This",
"traverses",
"all",
"targets",
"in",
"this",
"subvariant",
"and",
"subvariants",
"referred",
"by",
"<implcit",
"-",
"dependecy",
">",
"properties",
".",
"For",
"all",
"targets",
"which",
"are",
"of",
"type",
"target",
"-",
"type",
"(",
"or",
"for",
"all",
"targets",
"if",
"target_type",
"is",
"not",
"specified",
")",
"the",
"result",
"will",
"contain",
"<$",
"(",
"feature",
")",
">",
"path",
"-",
"to",
"-",
"that",
"-",
"target",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py#L1131-L1154 | train |
apple/turicreate | src/unity/python/turicreate/meta/asttools/__init__.py | cmp_ast | def cmp_ast(node1, node2):
'''
Compare if two nodes are equal.
'''
if type(node1) != type(node2):
return False
if isinstance(node1, (list, tuple)):
if len(node1) != len(node2):
return False
for left, right in zip(node1, node2):
if not cmp_ast(left, right):
return False
elif isinstance(node1, ast.AST):
for field in node1._fields:
left = getattr(node1, field, Undedined)
right = getattr(node2, field, Undedined)
if not cmp_ast(left, right):
return False
else:
return node1 == node2
return True | python | def cmp_ast(node1, node2):
'''
Compare if two nodes are equal.
'''
if type(node1) != type(node2):
return False
if isinstance(node1, (list, tuple)):
if len(node1) != len(node2):
return False
for left, right in zip(node1, node2):
if not cmp_ast(left, right):
return False
elif isinstance(node1, ast.AST):
for field in node1._fields:
left = getattr(node1, field, Undedined)
right = getattr(node2, field, Undedined)
if not cmp_ast(left, right):
return False
else:
return node1 == node2
return True | [
"def",
"cmp_ast",
"(",
"node1",
",",
"node2",
")",
":",
"if",
"type",
"(",
"node1",
")",
"!=",
"type",
"(",
"node2",
")",
":",
"return",
"False",
"if",
"isinstance",
"(",
"node1",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"len",
"(",
"node1",
")",
"!=",
"len",
"(",
"node2",
")",
":",
"return",
"False",
"for",
"left",
",",
"right",
"in",
"zip",
"(",
"node1",
",",
"node2",
")",
":",
"if",
"not",
"cmp_ast",
"(",
"left",
",",
"right",
")",
":",
"return",
"False",
"elif",
"isinstance",
"(",
"node1",
",",
"ast",
".",
"AST",
")",
":",
"for",
"field",
"in",
"node1",
".",
"_fields",
":",
"left",
"=",
"getattr",
"(",
"node1",
",",
"field",
",",
"Undedined",
")",
"right",
"=",
"getattr",
"(",
"node2",
",",
"field",
",",
"Undedined",
")",
"if",
"not",
"cmp_ast",
"(",
"left",
",",
"right",
")",
":",
"return",
"False",
"else",
":",
"return",
"node1",
"==",
"node2",
"return",
"True"
] | Compare if two nodes are equal. | [
"Compare",
"if",
"two",
"nodes",
"are",
"equal",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/asttools/__init__.py#L23-L49 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py | create_more_container_files | def create_more_container_files(sourceDir, suffix, maxElements, containers, containers2):
"""Creates additional files for the individual MPL-containers."""
# Create files for each MPL-container with 20 to 'maxElements' elements
# which will be used during generation.
for container in containers:
for i in range(20, maxElements, 10):
# Create copy of "template"-file.
newFile = os.path.join( sourceDir, container, container + str(i+10) + suffix )
shutil.copyfile( os.path.join( sourceDir, container, container + "20" + suffix ), newFile )
# Adjust copy of "template"-file accordingly.
for line in fileinput.input( newFile, inplace=1, mode="rU" ):
line = re.sub(r'20', '%TWENTY%', line.rstrip())
line = re.sub(r'11', '%ELEVEN%', line.rstrip())
line = re.sub(r'10(?![0-9])', '%TEN%', line.rstrip())
line = re.sub(r'%TWENTY%', re.escape(str(i+10)), line.rstrip())
line = re.sub(r'%ELEVEN%', re.escape(str(i + 1)), line.rstrip())
line = re.sub(r'%TEN%', re.escape(str(i)), line.rstrip())
print(line)
for container in containers2:
for i in range(20, maxElements, 10):
# Create copy of "template"-file.
newFile = os.path.join( sourceDir, container, container + str(i+10) + "_c" + suffix )
shutil.copyfile( os.path.join( sourceDir, container, container + "20_c" + suffix ), newFile )
# Adjust copy of "template"-file accordingly.
for line in fileinput.input( newFile, inplace=1, mode="rU" ):
line = re.sub(r'20', '%TWENTY%', line.rstrip())
line = re.sub(r'11', '%ELEVEN%', line.rstrip())
line = re.sub(r'10(?![0-9])', '%TEN%', line.rstrip())
line = re.sub(r'%TWENTY%', re.escape(str(i+10)), line.rstrip())
line = re.sub(r'%ELEVEN%', re.escape(str(i + 1)), line.rstrip())
line = re.sub(r'%TEN%', re.escape(str(i)), line.rstrip())
print(line) | python | def create_more_container_files(sourceDir, suffix, maxElements, containers, containers2):
"""Creates additional files for the individual MPL-containers."""
# Create files for each MPL-container with 20 to 'maxElements' elements
# which will be used during generation.
for container in containers:
for i in range(20, maxElements, 10):
# Create copy of "template"-file.
newFile = os.path.join( sourceDir, container, container + str(i+10) + suffix )
shutil.copyfile( os.path.join( sourceDir, container, container + "20" + suffix ), newFile )
# Adjust copy of "template"-file accordingly.
for line in fileinput.input( newFile, inplace=1, mode="rU" ):
line = re.sub(r'20', '%TWENTY%', line.rstrip())
line = re.sub(r'11', '%ELEVEN%', line.rstrip())
line = re.sub(r'10(?![0-9])', '%TEN%', line.rstrip())
line = re.sub(r'%TWENTY%', re.escape(str(i+10)), line.rstrip())
line = re.sub(r'%ELEVEN%', re.escape(str(i + 1)), line.rstrip())
line = re.sub(r'%TEN%', re.escape(str(i)), line.rstrip())
print(line)
for container in containers2:
for i in range(20, maxElements, 10):
# Create copy of "template"-file.
newFile = os.path.join( sourceDir, container, container + str(i+10) + "_c" + suffix )
shutil.copyfile( os.path.join( sourceDir, container, container + "20_c" + suffix ), newFile )
# Adjust copy of "template"-file accordingly.
for line in fileinput.input( newFile, inplace=1, mode="rU" ):
line = re.sub(r'20', '%TWENTY%', line.rstrip())
line = re.sub(r'11', '%ELEVEN%', line.rstrip())
line = re.sub(r'10(?![0-9])', '%TEN%', line.rstrip())
line = re.sub(r'%TWENTY%', re.escape(str(i+10)), line.rstrip())
line = re.sub(r'%ELEVEN%', re.escape(str(i + 1)), line.rstrip())
line = re.sub(r'%TEN%', re.escape(str(i)), line.rstrip())
print(line) | [
"def",
"create_more_container_files",
"(",
"sourceDir",
",",
"suffix",
",",
"maxElements",
",",
"containers",
",",
"containers2",
")",
":",
"# Create files for each MPL-container with 20 to 'maxElements' elements",
"# which will be used during generation.",
"for",
"container",
"in",
"containers",
":",
"for",
"i",
"in",
"range",
"(",
"20",
",",
"maxElements",
",",
"10",
")",
":",
"# Create copy of \"template\"-file.",
"newFile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sourceDir",
",",
"container",
",",
"container",
"+",
"str",
"(",
"i",
"+",
"10",
")",
"+",
"suffix",
")",
"shutil",
".",
"copyfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sourceDir",
",",
"container",
",",
"container",
"+",
"\"20\"",
"+",
"suffix",
")",
",",
"newFile",
")",
"# Adjust copy of \"template\"-file accordingly.",
"for",
"line",
"in",
"fileinput",
".",
"input",
"(",
"newFile",
",",
"inplace",
"=",
"1",
",",
"mode",
"=",
"\"rU\"",
")",
":",
"line",
"=",
"re",
".",
"sub",
"(",
"r'20'",
",",
"'%TWENTY%'",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"line",
"=",
"re",
".",
"sub",
"(",
"r'11'",
",",
"'%ELEVEN%'",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"line",
"=",
"re",
".",
"sub",
"(",
"r'10(?![0-9])'",
",",
"'%TEN%'",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"line",
"=",
"re",
".",
"sub",
"(",
"r'%TWENTY%'",
",",
"re",
".",
"escape",
"(",
"str",
"(",
"i",
"+",
"10",
")",
")",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"line",
"=",
"re",
".",
"sub",
"(",
"r'%ELEVEN%'",
",",
"re",
".",
"escape",
"(",
"str",
"(",
"i",
"+",
"1",
")",
")",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"line",
"=",
"re",
".",
"sub",
"(",
"r'%TEN%'",
",",
"re",
".",
"escape",
"(",
"str",
"(",
"i",
")",
")",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"print",
"(",
"line",
")",
"for",
"container",
"in",
"containers2",
":",
"for",
"i",
"in",
"range",
"(",
"20",
",",
"maxElements",
",",
"10",
")",
":",
"# Create copy of \"template\"-file.",
"newFile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sourceDir",
",",
"container",
",",
"container",
"+",
"str",
"(",
"i",
"+",
"10",
")",
"+",
"\"_c\"",
"+",
"suffix",
")",
"shutil",
".",
"copyfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sourceDir",
",",
"container",
",",
"container",
"+",
"\"20_c\"",
"+",
"suffix",
")",
",",
"newFile",
")",
"# Adjust copy of \"template\"-file accordingly.",
"for",
"line",
"in",
"fileinput",
".",
"input",
"(",
"newFile",
",",
"inplace",
"=",
"1",
",",
"mode",
"=",
"\"rU\"",
")",
":",
"line",
"=",
"re",
".",
"sub",
"(",
"r'20'",
",",
"'%TWENTY%'",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"line",
"=",
"re",
".",
"sub",
"(",
"r'11'",
",",
"'%ELEVEN%'",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"line",
"=",
"re",
".",
"sub",
"(",
"r'10(?![0-9])'",
",",
"'%TEN%'",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"line",
"=",
"re",
".",
"sub",
"(",
"r'%TWENTY%'",
",",
"re",
".",
"escape",
"(",
"str",
"(",
"i",
"+",
"10",
")",
")",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"line",
"=",
"re",
".",
"sub",
"(",
"r'%ELEVEN%'",
",",
"re",
".",
"escape",
"(",
"str",
"(",
"i",
"+",
"1",
")",
")",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"line",
"=",
"re",
".",
"sub",
"(",
"r'%TEN%'",
",",
"re",
".",
"escape",
"(",
"str",
"(",
"i",
")",
")",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"print",
"(",
"line",
")"
] | Creates additional files for the individual MPL-containers. | [
"Creates",
"additional",
"files",
"for",
"the",
"individual",
"MPL",
"-",
"containers",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py#L21-L53 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py | create_input_for_numbered_sequences | def create_input_for_numbered_sequences(headerDir, sourceDir, containers, maxElements):
"""Creates additional source- and header-files for the numbered sequence MPL-containers."""
# Create additional container-list without "map".
containersWithoutMap = containers[:]
try:
containersWithoutMap.remove('map')
except ValueError:
# We can safely ignore if "map" is not contained in 'containers'!
pass
# Create header/source-files.
create_more_container_files(headerDir, ".hpp", maxElements, containers, containersWithoutMap)
create_more_container_files(sourceDir, ".cpp", maxElements, containers, containersWithoutMap) | python | def create_input_for_numbered_sequences(headerDir, sourceDir, containers, maxElements):
"""Creates additional source- and header-files for the numbered sequence MPL-containers."""
# Create additional container-list without "map".
containersWithoutMap = containers[:]
try:
containersWithoutMap.remove('map')
except ValueError:
# We can safely ignore if "map" is not contained in 'containers'!
pass
# Create header/source-files.
create_more_container_files(headerDir, ".hpp", maxElements, containers, containersWithoutMap)
create_more_container_files(sourceDir, ".cpp", maxElements, containers, containersWithoutMap) | [
"def",
"create_input_for_numbered_sequences",
"(",
"headerDir",
",",
"sourceDir",
",",
"containers",
",",
"maxElements",
")",
":",
"# Create additional container-list without \"map\".",
"containersWithoutMap",
"=",
"containers",
"[",
":",
"]",
"try",
":",
"containersWithoutMap",
".",
"remove",
"(",
"'map'",
")",
"except",
"ValueError",
":",
"# We can safely ignore if \"map\" is not contained in 'containers'!",
"pass",
"# Create header/source-files.",
"create_more_container_files",
"(",
"headerDir",
",",
"\".hpp\"",
",",
"maxElements",
",",
"containers",
",",
"containersWithoutMap",
")",
"create_more_container_files",
"(",
"sourceDir",
",",
"\".cpp\"",
",",
"maxElements",
",",
"containers",
",",
"containersWithoutMap",
")"
] | Creates additional source- and header-files for the numbered sequence MPL-containers. | [
"Creates",
"additional",
"source",
"-",
"and",
"header",
"-",
"files",
"for",
"the",
"numbered",
"sequence",
"MPL",
"-",
"containers",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py#L56-L67 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py | adjust_container_limits_for_variadic_sequences | def adjust_container_limits_for_variadic_sequences(headerDir, containers, maxElements):
"""Adjusts the limits of variadic sequence MPL-containers."""
for container in containers:
headerFile = os.path.join( headerDir, "limits", container + ".hpp" )
regexMatch = r'(define\s+BOOST_MPL_LIMIT_' + container.upper() + r'_SIZE\s+)[0-9]+'
regexReplace = r'\g<1>' + re.escape( str(maxElements) )
for line in fileinput.input( headerFile, inplace=1, mode="rU" ):
line = re.sub(regexMatch, regexReplace, line.rstrip())
print(line) | python | def adjust_container_limits_for_variadic_sequences(headerDir, containers, maxElements):
"""Adjusts the limits of variadic sequence MPL-containers."""
for container in containers:
headerFile = os.path.join( headerDir, "limits", container + ".hpp" )
regexMatch = r'(define\s+BOOST_MPL_LIMIT_' + container.upper() + r'_SIZE\s+)[0-9]+'
regexReplace = r'\g<1>' + re.escape( str(maxElements) )
for line in fileinput.input( headerFile, inplace=1, mode="rU" ):
line = re.sub(regexMatch, regexReplace, line.rstrip())
print(line) | [
"def",
"adjust_container_limits_for_variadic_sequences",
"(",
"headerDir",
",",
"containers",
",",
"maxElements",
")",
":",
"for",
"container",
"in",
"containers",
":",
"headerFile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"headerDir",
",",
"\"limits\"",
",",
"container",
"+",
"\".hpp\"",
")",
"regexMatch",
"=",
"r'(define\\s+BOOST_MPL_LIMIT_'",
"+",
"container",
".",
"upper",
"(",
")",
"+",
"r'_SIZE\\s+)[0-9]+'",
"regexReplace",
"=",
"r'\\g<1>'",
"+",
"re",
".",
"escape",
"(",
"str",
"(",
"maxElements",
")",
")",
"for",
"line",
"in",
"fileinput",
".",
"input",
"(",
"headerFile",
",",
"inplace",
"=",
"1",
",",
"mode",
"=",
"\"rU\"",
")",
":",
"line",
"=",
"re",
".",
"sub",
"(",
"regexMatch",
",",
"regexReplace",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"print",
"(",
"line",
")"
] | Adjusts the limits of variadic sequence MPL-containers. | [
"Adjusts",
"the",
"limits",
"of",
"variadic",
"sequence",
"MPL",
"-",
"containers",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py#L70-L78 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py | current_boost_dir | def current_boost_dir():
"""Returns the (relative) path to the Boost source-directory this file is located in (if any)."""
# Path to directory containing this script.
path = os.path.dirname( os.path.realpath(__file__) )
# Making sure it is located in "${boost-dir}/libs/mpl/preprocessed".
for directory in reversed( ["libs", "mpl", "preprocessed"] ):
(head, tail) = os.path.split(path)
if tail == directory:
path = head
else:
return None
return os.path.relpath( path ) | python | def current_boost_dir():
"""Returns the (relative) path to the Boost source-directory this file is located in (if any)."""
# Path to directory containing this script.
path = os.path.dirname( os.path.realpath(__file__) )
# Making sure it is located in "${boost-dir}/libs/mpl/preprocessed".
for directory in reversed( ["libs", "mpl", "preprocessed"] ):
(head, tail) = os.path.split(path)
if tail == directory:
path = head
else:
return None
return os.path.relpath( path ) | [
"def",
"current_boost_dir",
"(",
")",
":",
"# Path to directory containing this script.",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"# Making sure it is located in \"${boost-dir}/libs/mpl/preprocessed\".",
"for",
"directory",
"in",
"reversed",
"(",
"[",
"\"libs\"",
",",
"\"mpl\"",
",",
"\"preprocessed\"",
"]",
")",
":",
"(",
"head",
",",
"tail",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"if",
"tail",
"==",
"directory",
":",
"path",
"=",
"head",
"else",
":",
"return",
"None",
"return",
"os",
".",
"path",
".",
"relpath",
"(",
"path",
")"
] | Returns the (relative) path to the Boost source-directory this file is located in (if any). | [
"Returns",
"the",
"(",
"relative",
")",
"path",
"to",
"the",
"Boost",
"source",
"-",
"directory",
"this",
"file",
"is",
"located",
"in",
"(",
"if",
"any",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py#L81-L92 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py | to_positive_multiple_of_10 | def to_positive_multiple_of_10(string):
"""Converts a string into its encoded positive integer (greater zero) or throws an exception."""
try:
value = int(string)
except ValueError:
msg = '"%r" is not a positive multiple of 10 (greater zero).' % string
raise argparse.ArgumentTypeError(msg)
if value <= 0 or value % 10 != 0:
msg = '"%r" is not a positive multiple of 10 (greater zero).' % string
raise argparse.ArgumentTypeError(msg)
return value | python | def to_positive_multiple_of_10(string):
"""Converts a string into its encoded positive integer (greater zero) or throws an exception."""
try:
value = int(string)
except ValueError:
msg = '"%r" is not a positive multiple of 10 (greater zero).' % string
raise argparse.ArgumentTypeError(msg)
if value <= 0 or value % 10 != 0:
msg = '"%r" is not a positive multiple of 10 (greater zero).' % string
raise argparse.ArgumentTypeError(msg)
return value | [
"def",
"to_positive_multiple_of_10",
"(",
"string",
")",
":",
"try",
":",
"value",
"=",
"int",
"(",
"string",
")",
"except",
"ValueError",
":",
"msg",
"=",
"'\"%r\" is not a positive multiple of 10 (greater zero).'",
"%",
"string",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"msg",
")",
"if",
"value",
"<=",
"0",
"or",
"value",
"%",
"10",
"!=",
"0",
":",
"msg",
"=",
"'\"%r\" is not a positive multiple of 10 (greater zero).'",
"%",
"string",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"msg",
")",
"return",
"value"
] | Converts a string into its encoded positive integer (greater zero) or throws an exception. | [
"Converts",
"a",
"string",
"into",
"its",
"encoded",
"positive",
"integer",
"(",
"greater",
"zero",
")",
"or",
"throws",
"an",
"exception",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py#L96-L106 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py | main | def main():
"""The main function."""
# Find the current Boost source-directory in which this script is located.
sourceDir = current_boost_dir()
if sourceDir == None:
sourceDir = ""
# Prepare and run cmdline-parser.
cmdlineParser = argparse.ArgumentParser(description="A generator-script for pre-processed Boost.MPL headers.")
cmdlineParser.add_argument("-v", "--verbose", dest='verbose', action='store_true',
help="Be a little bit more verbose.")
cmdlineParser.add_argument("-s", "--sequence-type", dest='seqType', choices=['variadic', 'numbered', 'both'],
default='both',
help="Only update pre-processed headers for the selected sequence types, "
"either 'numbered' sequences, 'variadic' sequences or 'both' sequence "
"types. (Default=both)")
cmdlineParser.add_argument("--no-vector", dest='want_vector', action='store_false',
help="Do not update pre-processed headers for Boost.MPL Vector.")
cmdlineParser.add_argument("--no-list", dest='want_list', action='store_false',
help="Do not update pre-processed headers for Boost.MPL List.")
cmdlineParser.add_argument("--no-set", dest='want_set', action='store_false',
help="Do not update pre-processed headers for Boost.MPL Set.")
cmdlineParser.add_argument("--no-map", dest='want_map', action='store_false',
help="Do not update pre-processed headers for Boost.MPL Map.")
cmdlineParser.add_argument("--num-elements", dest='numElements', metavar="<num-elements>",
type=to_positive_multiple_of_10, default=100,
help="The maximal number of elements per container sequence. (Default=100)")
cmdlineParser.add_argument(dest='sourceDir', metavar="<source-dir>", default=current_boost_dir(), nargs='?',
type=to_existing_absolute_path,
help="The source-directory of Boost. (Default=\"" + sourceDir + "\")")
args = cmdlineParser.parse_args()
# Some verbose debug output.
if args.verbose:
print "Arguments extracted from command-line:"
print " verbose = ", args.verbose
print " source directory = ", args.sourceDir
print " num elements = ", args.numElements
print " sequence type = ", args.seqType
print " want: vector = ", args.want_vector
print " want: list = ", args.want_list
print " want: set = ", args.want_set
print " want: map = ", args.want_map
# Verify that we received any source-directory.
if args.sourceDir == None:
print "You should specify a valid path to the Boost source-directory."
sys.exit(0)
# The directories for header- and source files of Boost.MPL.
# NOTE: Assuming 'args.sourceDir' is the source-directory of the entire boost project.
headerDir = os.path.join( args.sourceDir, "boost", "mpl" )
sourceDir = os.path.join( args.sourceDir, "libs", "mpl", "preprocessed" )
# Check that the header/source-directories exist.
if not os.path.exists( headerDir ) or not os.path.exists( sourceDir ):
# Maybe 'args.sourceDir' is not the source-directory of the entire boost project
# but instead of the Boost.MPL git-directory, only?
headerDir = os.path.join( args.sourceDir, "include", "boost", "mpl" )
sourceDir = os.path.join( args.sourceDir, "preprocessed" )
if not os.path.exists( headerDir ) or not os.path.exists( sourceDir ):
cmdlineParser.print_usage()
print "error: Cannot find Boost.MPL header/source files in given Boost source-directory!"
sys.exit(0)
# Some verbose debug output.
if args.verbose:
print "Chosen header-directory: ", headerDir
print "Chosen source-directory: ", sourceDir
# Create list of containers for which files shall be pre-processed.
containers = []
if args.want_vector:
containers.append('vector')
if args.want_list:
containers.append('list')
if args.want_set:
containers.append('set')
if args.want_map:
containers.append('map')
if containers == []:
print "Nothing to do."
print "(Why did you prevent generating pre-processed headers for all Boost.MPL container types?)"
sys.exit(0)
# Possibly fix the header-comments of input-files needed for pre-processing.
if args.verbose:
print "Checking if prior to pre-processing some input-files need fixing."
needFixing = fixmpl.check_input_files(headerDir, sourceDir, containers, args.seqType, args.verbose)
if needFixing:
if args.verbose:
print "Fixing of some input-files prior to pre-processing is needed."
print "Will fix them now!"
fixmpl.fix_input_files(headerDir, sourceDir, containers, args.seqType, args.verbose)
# Some verbose debug output.
if args.verbose:
print "Containers for which to pre-process headers: ", containers
# Create (additional) input files for generating pre-processed headers of numbered sequence MPL containers.
if args.seqType == "both" or args.seqType == "numbered":
create_input_for_numbered_sequences(headerDir, sourceDir, containers, args.numElements)
# Modify settings for generating pre-processed headers of variadic sequence MPL containers.
if args.seqType == "both" or args.seqType == "variadic":
adjust_container_limits_for_variadic_sequences(headerDir, containers, args.numElements)
# Generate MPL-preprocessed files.
os.chdir( sourceDir )
if args.seqType == "both" or args.seqType == "numbered":
if args.want_vector:
if args.verbose:
print "Pre-process headers for Boost.MPL numbered vectors."
os.system( "python " + os.path.join( sourceDir, "preprocess_vector.py" ) + " all " + args.sourceDir )
if args.want_list:
if args.verbose:
print "Pre-process headers for Boost.MPL numbered lists."
os.system( "python " + os.path.join( sourceDir, "preprocess_list.py" ) + " all " + args.sourceDir )
if args.want_set:
if args.verbose:
print "Pre-process headers for Boost.MPL numbered sets."
os.system( "python " + os.path.join( sourceDir, "preprocess_set.py" ) + " all " + args.sourceDir )
if args.want_map:
if args.verbose:
print "Pre-process headers for Boost.MPL numbered maps."
os.system( "python " + os.path.join( sourceDir, "preprocess_map.py" ) + " all " + args.sourceDir )
if args.seqType == "both" or args.seqType == "variadic":
if args.verbose:
print "Pre-process headers for Boost.MPL variadic containers."
os.system( "python " + os.path.join( sourceDir, "preprocess.py" ) + " all " + args.sourceDir ) | python | def main():
"""The main function."""
# Find the current Boost source-directory in which this script is located.
sourceDir = current_boost_dir()
if sourceDir == None:
sourceDir = ""
# Prepare and run cmdline-parser.
cmdlineParser = argparse.ArgumentParser(description="A generator-script for pre-processed Boost.MPL headers.")
cmdlineParser.add_argument("-v", "--verbose", dest='verbose', action='store_true',
help="Be a little bit more verbose.")
cmdlineParser.add_argument("-s", "--sequence-type", dest='seqType', choices=['variadic', 'numbered', 'both'],
default='both',
help="Only update pre-processed headers for the selected sequence types, "
"either 'numbered' sequences, 'variadic' sequences or 'both' sequence "
"types. (Default=both)")
cmdlineParser.add_argument("--no-vector", dest='want_vector', action='store_false',
help="Do not update pre-processed headers for Boost.MPL Vector.")
cmdlineParser.add_argument("--no-list", dest='want_list', action='store_false',
help="Do not update pre-processed headers for Boost.MPL List.")
cmdlineParser.add_argument("--no-set", dest='want_set', action='store_false',
help="Do not update pre-processed headers for Boost.MPL Set.")
cmdlineParser.add_argument("--no-map", dest='want_map', action='store_false',
help="Do not update pre-processed headers for Boost.MPL Map.")
cmdlineParser.add_argument("--num-elements", dest='numElements', metavar="<num-elements>",
type=to_positive_multiple_of_10, default=100,
help="The maximal number of elements per container sequence. (Default=100)")
cmdlineParser.add_argument(dest='sourceDir', metavar="<source-dir>", default=current_boost_dir(), nargs='?',
type=to_existing_absolute_path,
help="The source-directory of Boost. (Default=\"" + sourceDir + "\")")
args = cmdlineParser.parse_args()
# Some verbose debug output.
if args.verbose:
print "Arguments extracted from command-line:"
print " verbose = ", args.verbose
print " source directory = ", args.sourceDir
print " num elements = ", args.numElements
print " sequence type = ", args.seqType
print " want: vector = ", args.want_vector
print " want: list = ", args.want_list
print " want: set = ", args.want_set
print " want: map = ", args.want_map
# Verify that we received any source-directory.
if args.sourceDir == None:
print "You should specify a valid path to the Boost source-directory."
sys.exit(0)
# The directories for header- and source files of Boost.MPL.
# NOTE: Assuming 'args.sourceDir' is the source-directory of the entire boost project.
headerDir = os.path.join( args.sourceDir, "boost", "mpl" )
sourceDir = os.path.join( args.sourceDir, "libs", "mpl", "preprocessed" )
# Check that the header/source-directories exist.
if not os.path.exists( headerDir ) or not os.path.exists( sourceDir ):
# Maybe 'args.sourceDir' is not the source-directory of the entire boost project
# but instead of the Boost.MPL git-directory, only?
headerDir = os.path.join( args.sourceDir, "include", "boost", "mpl" )
sourceDir = os.path.join( args.sourceDir, "preprocessed" )
if not os.path.exists( headerDir ) or not os.path.exists( sourceDir ):
cmdlineParser.print_usage()
print "error: Cannot find Boost.MPL header/source files in given Boost source-directory!"
sys.exit(0)
# Some verbose debug output.
if args.verbose:
print "Chosen header-directory: ", headerDir
print "Chosen source-directory: ", sourceDir
# Create list of containers for which files shall be pre-processed.
containers = []
if args.want_vector:
containers.append('vector')
if args.want_list:
containers.append('list')
if args.want_set:
containers.append('set')
if args.want_map:
containers.append('map')
if containers == []:
print "Nothing to do."
print "(Why did you prevent generating pre-processed headers for all Boost.MPL container types?)"
sys.exit(0)
# Possibly fix the header-comments of input-files needed for pre-processing.
if args.verbose:
print "Checking if prior to pre-processing some input-files need fixing."
needFixing = fixmpl.check_input_files(headerDir, sourceDir, containers, args.seqType, args.verbose)
if needFixing:
if args.verbose:
print "Fixing of some input-files prior to pre-processing is needed."
print "Will fix them now!"
fixmpl.fix_input_files(headerDir, sourceDir, containers, args.seqType, args.verbose)
# Some verbose debug output.
if args.verbose:
print "Containers for which to pre-process headers: ", containers
# Create (additional) input files for generating pre-processed headers of numbered sequence MPL containers.
if args.seqType == "both" or args.seqType == "numbered":
create_input_for_numbered_sequences(headerDir, sourceDir, containers, args.numElements)
# Modify settings for generating pre-processed headers of variadic sequence MPL containers.
if args.seqType == "both" or args.seqType == "variadic":
adjust_container_limits_for_variadic_sequences(headerDir, containers, args.numElements)
# Generate MPL-preprocessed files.
os.chdir( sourceDir )
if args.seqType == "both" or args.seqType == "numbered":
if args.want_vector:
if args.verbose:
print "Pre-process headers for Boost.MPL numbered vectors."
os.system( "python " + os.path.join( sourceDir, "preprocess_vector.py" ) + " all " + args.sourceDir )
if args.want_list:
if args.verbose:
print "Pre-process headers for Boost.MPL numbered lists."
os.system( "python " + os.path.join( sourceDir, "preprocess_list.py" ) + " all " + args.sourceDir )
if args.want_set:
if args.verbose:
print "Pre-process headers for Boost.MPL numbered sets."
os.system( "python " + os.path.join( sourceDir, "preprocess_set.py" ) + " all " + args.sourceDir )
if args.want_map:
if args.verbose:
print "Pre-process headers for Boost.MPL numbered maps."
os.system( "python " + os.path.join( sourceDir, "preprocess_map.py" ) + " all " + args.sourceDir )
if args.seqType == "both" or args.seqType == "variadic":
if args.verbose:
print "Pre-process headers for Boost.MPL variadic containers."
os.system( "python " + os.path.join( sourceDir, "preprocess.py" ) + " all " + args.sourceDir ) | [
"def",
"main",
"(",
")",
":",
"# Find the current Boost source-directory in which this script is located.",
"sourceDir",
"=",
"current_boost_dir",
"(",
")",
"if",
"sourceDir",
"==",
"None",
":",
"sourceDir",
"=",
"\"\"",
"# Prepare and run cmdline-parser.",
"cmdlineParser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"A generator-script for pre-processed Boost.MPL headers.\"",
")",
"cmdlineParser",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--verbose\"",
",",
"dest",
"=",
"'verbose'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Be a little bit more verbose.\"",
")",
"cmdlineParser",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--sequence-type\"",
",",
"dest",
"=",
"'seqType'",
",",
"choices",
"=",
"[",
"'variadic'",
",",
"'numbered'",
",",
"'both'",
"]",
",",
"default",
"=",
"'both'",
",",
"help",
"=",
"\"Only update pre-processed headers for the selected sequence types, \"",
"\"either 'numbered' sequences, 'variadic' sequences or 'both' sequence \"",
"\"types. (Default=both)\"",
")",
"cmdlineParser",
".",
"add_argument",
"(",
"\"--no-vector\"",
",",
"dest",
"=",
"'want_vector'",
",",
"action",
"=",
"'store_false'",
",",
"help",
"=",
"\"Do not update pre-processed headers for Boost.MPL Vector.\"",
")",
"cmdlineParser",
".",
"add_argument",
"(",
"\"--no-list\"",
",",
"dest",
"=",
"'want_list'",
",",
"action",
"=",
"'store_false'",
",",
"help",
"=",
"\"Do not update pre-processed headers for Boost.MPL List.\"",
")",
"cmdlineParser",
".",
"add_argument",
"(",
"\"--no-set\"",
",",
"dest",
"=",
"'want_set'",
",",
"action",
"=",
"'store_false'",
",",
"help",
"=",
"\"Do not update pre-processed headers for Boost.MPL Set.\"",
")",
"cmdlineParser",
".",
"add_argument",
"(",
"\"--no-map\"",
",",
"dest",
"=",
"'want_map'",
",",
"action",
"=",
"'store_false'",
",",
"help",
"=",
"\"Do not update pre-processed headers for Boost.MPL Map.\"",
")",
"cmdlineParser",
".",
"add_argument",
"(",
"\"--num-elements\"",
",",
"dest",
"=",
"'numElements'",
",",
"metavar",
"=",
"\"<num-elements>\"",
",",
"type",
"=",
"to_positive_multiple_of_10",
",",
"default",
"=",
"100",
",",
"help",
"=",
"\"The maximal number of elements per container sequence. (Default=100)\"",
")",
"cmdlineParser",
".",
"add_argument",
"(",
"dest",
"=",
"'sourceDir'",
",",
"metavar",
"=",
"\"<source-dir>\"",
",",
"default",
"=",
"current_boost_dir",
"(",
")",
",",
"nargs",
"=",
"'?'",
",",
"type",
"=",
"to_existing_absolute_path",
",",
"help",
"=",
"\"The source-directory of Boost. (Default=\\\"\"",
"+",
"sourceDir",
"+",
"\"\\\")\"",
")",
"args",
"=",
"cmdlineParser",
".",
"parse_args",
"(",
")",
"# Some verbose debug output.",
"if",
"args",
".",
"verbose",
":",
"print",
"\"Arguments extracted from command-line:\"",
"print",
"\" verbose = \"",
",",
"args",
".",
"verbose",
"print",
"\" source directory = \"",
",",
"args",
".",
"sourceDir",
"print",
"\" num elements = \"",
",",
"args",
".",
"numElements",
"print",
"\" sequence type = \"",
",",
"args",
".",
"seqType",
"print",
"\" want: vector = \"",
",",
"args",
".",
"want_vector",
"print",
"\" want: list = \"",
",",
"args",
".",
"want_list",
"print",
"\" want: set = \"",
",",
"args",
".",
"want_set",
"print",
"\" want: map = \"",
",",
"args",
".",
"want_map",
"# Verify that we received any source-directory.",
"if",
"args",
".",
"sourceDir",
"==",
"None",
":",
"print",
"\"You should specify a valid path to the Boost source-directory.\"",
"sys",
".",
"exit",
"(",
"0",
")",
"# The directories for header- and source files of Boost.MPL.",
"# NOTE: Assuming 'args.sourceDir' is the source-directory of the entire boost project.",
"headerDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"sourceDir",
",",
"\"boost\"",
",",
"\"mpl\"",
")",
"sourceDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"sourceDir",
",",
"\"libs\"",
",",
"\"mpl\"",
",",
"\"preprocessed\"",
")",
"# Check that the header/source-directories exist.",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"headerDir",
")",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sourceDir",
")",
":",
"# Maybe 'args.sourceDir' is not the source-directory of the entire boost project",
"# but instead of the Boost.MPL git-directory, only?",
"headerDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"sourceDir",
",",
"\"include\"",
",",
"\"boost\"",
",",
"\"mpl\"",
")",
"sourceDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"sourceDir",
",",
"\"preprocessed\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"headerDir",
")",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sourceDir",
")",
":",
"cmdlineParser",
".",
"print_usage",
"(",
")",
"print",
"\"error: Cannot find Boost.MPL header/source files in given Boost source-directory!\"",
"sys",
".",
"exit",
"(",
"0",
")",
"# Some verbose debug output.",
"if",
"args",
".",
"verbose",
":",
"print",
"\"Chosen header-directory: \"",
",",
"headerDir",
"print",
"\"Chosen source-directory: \"",
",",
"sourceDir",
"# Create list of containers for which files shall be pre-processed.",
"containers",
"=",
"[",
"]",
"if",
"args",
".",
"want_vector",
":",
"containers",
".",
"append",
"(",
"'vector'",
")",
"if",
"args",
".",
"want_list",
":",
"containers",
".",
"append",
"(",
"'list'",
")",
"if",
"args",
".",
"want_set",
":",
"containers",
".",
"append",
"(",
"'set'",
")",
"if",
"args",
".",
"want_map",
":",
"containers",
".",
"append",
"(",
"'map'",
")",
"if",
"containers",
"==",
"[",
"]",
":",
"print",
"\"Nothing to do.\"",
"print",
"\"(Why did you prevent generating pre-processed headers for all Boost.MPL container types?)\"",
"sys",
".",
"exit",
"(",
"0",
")",
"# Possibly fix the header-comments of input-files needed for pre-processing.",
"if",
"args",
".",
"verbose",
":",
"print",
"\"Checking if prior to pre-processing some input-files need fixing.\"",
"needFixing",
"=",
"fixmpl",
".",
"check_input_files",
"(",
"headerDir",
",",
"sourceDir",
",",
"containers",
",",
"args",
".",
"seqType",
",",
"args",
".",
"verbose",
")",
"if",
"needFixing",
":",
"if",
"args",
".",
"verbose",
":",
"print",
"\"Fixing of some input-files prior to pre-processing is needed.\"",
"print",
"\"Will fix them now!\"",
"fixmpl",
".",
"fix_input_files",
"(",
"headerDir",
",",
"sourceDir",
",",
"containers",
",",
"args",
".",
"seqType",
",",
"args",
".",
"verbose",
")",
"# Some verbose debug output.",
"if",
"args",
".",
"verbose",
":",
"print",
"\"Containers for which to pre-process headers: \"",
",",
"containers",
"# Create (additional) input files for generating pre-processed headers of numbered sequence MPL containers.",
"if",
"args",
".",
"seqType",
"==",
"\"both\"",
"or",
"args",
".",
"seqType",
"==",
"\"numbered\"",
":",
"create_input_for_numbered_sequences",
"(",
"headerDir",
",",
"sourceDir",
",",
"containers",
",",
"args",
".",
"numElements",
")",
"# Modify settings for generating pre-processed headers of variadic sequence MPL containers.",
"if",
"args",
".",
"seqType",
"==",
"\"both\"",
"or",
"args",
".",
"seqType",
"==",
"\"variadic\"",
":",
"adjust_container_limits_for_variadic_sequences",
"(",
"headerDir",
",",
"containers",
",",
"args",
".",
"numElements",
")",
"# Generate MPL-preprocessed files.",
"os",
".",
"chdir",
"(",
"sourceDir",
")",
"if",
"args",
".",
"seqType",
"==",
"\"both\"",
"or",
"args",
".",
"seqType",
"==",
"\"numbered\"",
":",
"if",
"args",
".",
"want_vector",
":",
"if",
"args",
".",
"verbose",
":",
"print",
"\"Pre-process headers for Boost.MPL numbered vectors.\"",
"os",
".",
"system",
"(",
"\"python \"",
"+",
"os",
".",
"path",
".",
"join",
"(",
"sourceDir",
",",
"\"preprocess_vector.py\"",
")",
"+",
"\" all \"",
"+",
"args",
".",
"sourceDir",
")",
"if",
"args",
".",
"want_list",
":",
"if",
"args",
".",
"verbose",
":",
"print",
"\"Pre-process headers for Boost.MPL numbered lists.\"",
"os",
".",
"system",
"(",
"\"python \"",
"+",
"os",
".",
"path",
".",
"join",
"(",
"sourceDir",
",",
"\"preprocess_list.py\"",
")",
"+",
"\" all \"",
"+",
"args",
".",
"sourceDir",
")",
"if",
"args",
".",
"want_set",
":",
"if",
"args",
".",
"verbose",
":",
"print",
"\"Pre-process headers for Boost.MPL numbered sets.\"",
"os",
".",
"system",
"(",
"\"python \"",
"+",
"os",
".",
"path",
".",
"join",
"(",
"sourceDir",
",",
"\"preprocess_set.py\"",
")",
"+",
"\" all \"",
"+",
"args",
".",
"sourceDir",
")",
"if",
"args",
".",
"want_map",
":",
"if",
"args",
".",
"verbose",
":",
"print",
"\"Pre-process headers for Boost.MPL numbered maps.\"",
"os",
".",
"system",
"(",
"\"python \"",
"+",
"os",
".",
"path",
".",
"join",
"(",
"sourceDir",
",",
"\"preprocess_map.py\"",
")",
"+",
"\" all \"",
"+",
"args",
".",
"sourceDir",
")",
"if",
"args",
".",
"seqType",
"==",
"\"both\"",
"or",
"args",
".",
"seqType",
"==",
"\"variadic\"",
":",
"if",
"args",
".",
"verbose",
":",
"print",
"\"Pre-process headers for Boost.MPL variadic containers.\"",
"os",
".",
"system",
"(",
"\"python \"",
"+",
"os",
".",
"path",
".",
"join",
"(",
"sourceDir",
",",
"\"preprocess.py\"",
")",
"+",
"\" all \"",
"+",
"args",
".",
"sourceDir",
")"
] | The main function. | [
"The",
"main",
"function",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py#L118-L246 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py | NeuralNetworkBuilder.add_inner_product | def add_inner_product(self, name, W, b, input_channels, output_channels, has_bias,
input_name, output_name, **kwargs):
"""
Add an inner product layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array or bytes()
Weight matrix of shape (output_channels, input_channels)
If W is of type bytes(), i.e. quantized, other quantization related arguments must be provided as well (see below).
b: numpy.array
Bias vector of shape (output_channels, ).
input_channels: int
Number of input channels.
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
Quantization arguments expected in kwargs, when W is of type bytes():
quantization_type : str
When weights are quantized (i.e. W is of type bytes()), this should be either "linear" or "lut".
nbits: int
Should be between 1 and 8 (inclusive). Number of bits per weight value. Only applicable when
weights are quantized.
quant_scale: numpy.array(dtype=numpy.float32)
scale vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_bias: numpy.array(dtype=numpy.float32)
bias vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_lut: numpy.array(dtype=numpy.float32)
the LUT (look up table) to be used with LUT quantization. Must be of length 2^nbits.
See Also
--------
add_embedding, add_convolution
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.innerProduct
# Fill in the parameters
spec_layer_params.inputChannels = input_channels
spec_layer_params.outputChannels = output_channels
spec_layer_params.hasBias = has_bias
weights = spec_layer_params.weights
if len(kwargs) == 0:
weights.floatValue.extend(map(float, W.flatten()))
else:
_verify_quantization_arguments(weight=W, output_channels=output_channels, **kwargs)
_fill_quantized_weights(weights_message=weights, W=W, **kwargs)
if has_bias:
bias = spec_layer_params.bias
bias.floatValue.extend(map(float, b.flatten())) | python | def add_inner_product(self, name, W, b, input_channels, output_channels, has_bias,
input_name, output_name, **kwargs):
"""
Add an inner product layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array or bytes()
Weight matrix of shape (output_channels, input_channels)
If W is of type bytes(), i.e. quantized, other quantization related arguments must be provided as well (see below).
b: numpy.array
Bias vector of shape (output_channels, ).
input_channels: int
Number of input channels.
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
Quantization arguments expected in kwargs, when W is of type bytes():
quantization_type : str
When weights are quantized (i.e. W is of type bytes()), this should be either "linear" or "lut".
nbits: int
Should be between 1 and 8 (inclusive). Number of bits per weight value. Only applicable when
weights are quantized.
quant_scale: numpy.array(dtype=numpy.float32)
scale vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_bias: numpy.array(dtype=numpy.float32)
bias vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_lut: numpy.array(dtype=numpy.float32)
the LUT (look up table) to be used with LUT quantization. Must be of length 2^nbits.
See Also
--------
add_embedding, add_convolution
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.innerProduct
# Fill in the parameters
spec_layer_params.inputChannels = input_channels
spec_layer_params.outputChannels = output_channels
spec_layer_params.hasBias = has_bias
weights = spec_layer_params.weights
if len(kwargs) == 0:
weights.floatValue.extend(map(float, W.flatten()))
else:
_verify_quantization_arguments(weight=W, output_channels=output_channels, **kwargs)
_fill_quantized_weights(weights_message=weights, W=W, **kwargs)
if has_bias:
bias = spec_layer_params.bias
bias.floatValue.extend(map(float, b.flatten())) | [
"def",
"add_inner_product",
"(",
"self",
",",
"name",
",",
"W",
",",
"b",
",",
"input_channels",
",",
"output_channels",
",",
"has_bias",
",",
"input_name",
",",
"output_name",
",",
"*",
"*",
"kwargs",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"innerProduct",
"# Fill in the parameters",
"spec_layer_params",
".",
"inputChannels",
"=",
"input_channels",
"spec_layer_params",
".",
"outputChannels",
"=",
"output_channels",
"spec_layer_params",
".",
"hasBias",
"=",
"has_bias",
"weights",
"=",
"spec_layer_params",
".",
"weights",
"if",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"weights",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W",
".",
"flatten",
"(",
")",
")",
")",
"else",
":",
"_verify_quantization_arguments",
"(",
"weight",
"=",
"W",
",",
"output_channels",
"=",
"output_channels",
",",
"*",
"*",
"kwargs",
")",
"_fill_quantized_weights",
"(",
"weights_message",
"=",
"weights",
",",
"W",
"=",
"W",
",",
"*",
"*",
"kwargs",
")",
"if",
"has_bias",
":",
"bias",
"=",
"spec_layer_params",
".",
"bias",
"bias",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b",
".",
"flatten",
"(",
")",
")",
")"
] | Add an inner product layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array or bytes()
Weight matrix of shape (output_channels, input_channels)
If W is of type bytes(), i.e. quantized, other quantization related arguments must be provided as well (see below).
b: numpy.array
Bias vector of shape (output_channels, ).
input_channels: int
Number of input channels.
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
Quantization arguments expected in kwargs, when W is of type bytes():
quantization_type : str
When weights are quantized (i.e. W is of type bytes()), this should be either "linear" or "lut".
nbits: int
Should be between 1 and 8 (inclusive). Number of bits per weight value. Only applicable when
weights are quantized.
quant_scale: numpy.array(dtype=numpy.float32)
scale vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_bias: numpy.array(dtype=numpy.float32)
bias vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_lut: numpy.array(dtype=numpy.float32)
the LUT (look up table) to be used with LUT quantization. Must be of length 2^nbits.
See Also
--------
add_embedding, add_convolution | [
"Add",
"an",
"inner",
"product",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py#L394-L471 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py | NeuralNetworkBuilder.add_convolution | def add_convolution(self, name, kernel_channels, output_channels, height,
width, stride_height, stride_width, border_mode, groups, W, b, has_bias,
is_deconv = False, output_shape = None,
input_name = 'data', output_name = 'out',
dilation_factors = [1,1],
padding_top = 0, padding_bottom = 0, padding_left = 0, padding_right = 0,
same_padding_asymmetry_mode = 'BOTTOM_RIGHT_HEAVY',
**kwargs):
"""
Add a convolution layer to the network.
Please see the ConvolutionLayerParams in Core ML neural network
protobuf message for more information about input and output blob dimensions.
Parameters
----------
name: str
The name of this layer.
kernel_channels: int
Number of channels for the convolution kernels.
output_channels: int
Number of filter kernels. This is equal to the number of channels in the output blob.
height: int
Height of each kernel.
width: int
Width of each kernel.
stride_height: int
Stride along the height direction.
stride_width: int
Stride along the height direction.
border_mode: str
Option for the padding type and output blob shape. Can be either 'valid' or 'same'.
Kindly refer to NeuralNetwork.proto for details.
groups: int
Number of kernel groups. Input is divided into groups along the channel axis. Each kernel group share the same weights.
W: numpy.array or bytes()
Weight of the convolution kernels.
- If is_deconv is False, W should have shape (height, width, kernel_channels, output_channels), where kernel_channel = input_channels / groups
- If is_deconv is True, W should have shape (height, width, kernel_channels, output_channels / groups), where kernel_channel = input_channels
If W is of type bytes(), i.e. quantized, other quantization related arguments must be provided as well (see below).
b: numpy.array
Biases of the convolution kernels. b should have shape (outputChannels, ).
has_bias: boolean
Whether bias is ignored.
- If True, bias is not ignored.
- If False, bias is ignored.
is_deconv: boolean
Whether the convolution layer is performing a convolution or a transposed convolution (deconvolution).
- If True, the convolution layer is performing transposed convolution.
- If False, the convolution layer is performing regular convolution.
output_shape: tuple | None
Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True.
When is_deconv == False, this parameter is ignored.
If it is None, the output shape is calculated automatically using the border_mode.
Kindly refer to NeuralNetwork.proto for details.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
dilation_factors: [int]
Dilation factors across height and width directions. Must be a list of two positive integers.
Defaults to [1,1]
padding_top, padding_bottom, padding_left, padding_right: int
values of height (top, bottom) and width (left, right) padding to be used if border_more is "valid".
same_padding_asymmetry_mode : str.
Type of asymmetric padding to be used when border_mode is 'same'.
Can be either 'BOTTOM_RIGHT_HEAVY' or 'TOP_LEFT_HEAVY'.
Kindly refer to NeuralNetwork.proto for details.
Depthwise convolution is a special case of convolution, where we have:
kernel_channels = 1 (== input_channels / groups)
output_channels = channel_multiplier * input_channels
groups = input_channels
W : [Kernel_height, Kernel_width, 1, channel_multiplier * input_channels]
Quantization arguments expected in kwargs, when W is of type bytes():
quantization_type : str
When weights are quantized (i.e. W is of type bytes()), this should be either "linear" or "lut".
nbits: int
Should be between 1 and 8 (inclusive). Number of bits per weight value. Only applicable when
weights are quantized.
quant_scale: numpy.array(dtype=numpy.float32)
scale vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_bias: numpy.array(dtype=numpy.float32)
bias vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_lut: numpy.array(dtype=numpy.float32)
the LUT (look up table) to be used with LUT quantization. Must be of length 2^nbits.
See Also
--------
add_pooling, add_activation, add_batchnorm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer.convolution.MergeFromString(b'') # hack to set empty message
# Set the layer params
spec_layer_params = spec_layer.convolution
spec_layer_params.isDeconvolution = is_deconv
if is_deconv and output_shape:
spec_layer_params.outputShape.append(output_shape[0])
spec_layer_params.outputShape.append(output_shape[1])
spec_layer_params.outputChannels = output_channels
spec_layer_params.kernelChannels = kernel_channels
spec_layer_params.kernelSize.append(height)
spec_layer_params.kernelSize.append(width)
spec_layer_params.stride.append(stride_height)
spec_layer_params.stride.append(stride_width)
if border_mode == 'valid':
height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = padding_top
height_border.endEdgeSize = padding_bottom
width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = padding_left
width_border.endEdgeSize = padding_right
elif border_mode == 'same':
if not (same_padding_asymmetry_mode == 'BOTTOM_RIGHT_HEAVY' or same_padding_asymmetry_mode == 'TOP_LEFT_HEAVY'):
raise ValueError("Invalid value %d of same_padding_asymmetry_mode parameter" % same_padding_asymmetry_mode)
spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value(same_padding_asymmetry_mode)
else:
raise NotImplementedError(
'Border mode %s is not implemented.' % border_mode)
spec_layer_params.nGroups = groups
spec_layer_params.hasBias = has_bias
if len(kwargs) > 0:
_verify_quantization_arguments(weight = W, output_channels=output_channels, **kwargs)
nbits = kwargs.get('nbits', 8)
num_weights = (output_channels * kernel_channels * height * width) / groups
if nbits < 8:
byte_arr = np.frombuffer(W, dtype=np.uint8)
W = unpack_to_bytes(byte_arr, num_weights, nbits)
else:
W = np.frombuffer(W, dtype=np.uint8)
if is_deconv:
W = np.reshape(W, (height, width, kernel_channels, output_channels / groups))
else:
W = np.reshape(W, (height, width, kernel_channels, output_channels))
# Weight alignment: MLModel Spec requires following weight arrangement:
# is_deconv == False ==> (output_channels, kernel_channels, height, width), where kernel_channel = input_channels / groups
# is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels
if not is_deconv:
Wt = W.transpose((3,2,0,1))
Wt = Wt.flatten()
else:
Wt = W.transpose((2,3,0,1)).flatten()
# Assign weights
weights = spec_layer_params.weights
if len(kwargs) == 0: # no quantization
weights.floatValue.extend(map(float, Wt.flatten()))
else: # there is quantization
W_bytes = bytes()
if nbits == 8:
W_bytes += Wt.flatten().tobytes()
else:
W_bytes += _convert_array_to_nbit_quantized_bytes(Wt.flatten(), nbits).tobytes()
_fill_quantized_weights(weights_message = weights, W = W_bytes, **kwargs)
# Assign biases
if has_bias:
bias = spec_layer_params.bias
for f in range(output_channels):
bias.floatValue.append(float(b[f]))
# add dilation factors
spec_layer_params.dilationFactor.append(dilation_factors[0])
spec_layer_params.dilationFactor.append(dilation_factors[1]) | python | def add_convolution(self, name, kernel_channels, output_channels, height,
width, stride_height, stride_width, border_mode, groups, W, b, has_bias,
is_deconv = False, output_shape = None,
input_name = 'data', output_name = 'out',
dilation_factors = [1,1],
padding_top = 0, padding_bottom = 0, padding_left = 0, padding_right = 0,
same_padding_asymmetry_mode = 'BOTTOM_RIGHT_HEAVY',
**kwargs):
"""
Add a convolution layer to the network.
Please see the ConvolutionLayerParams in Core ML neural network
protobuf message for more information about input and output blob dimensions.
Parameters
----------
name: str
The name of this layer.
kernel_channels: int
Number of channels for the convolution kernels.
output_channels: int
Number of filter kernels. This is equal to the number of channels in the output blob.
height: int
Height of each kernel.
width: int
Width of each kernel.
stride_height: int
Stride along the height direction.
stride_width: int
Stride along the height direction.
border_mode: str
Option for the padding type and output blob shape. Can be either 'valid' or 'same'.
Kindly refer to NeuralNetwork.proto for details.
groups: int
Number of kernel groups. Input is divided into groups along the channel axis. Each kernel group share the same weights.
W: numpy.array or bytes()
Weight of the convolution kernels.
- If is_deconv is False, W should have shape (height, width, kernel_channels, output_channels), where kernel_channel = input_channels / groups
- If is_deconv is True, W should have shape (height, width, kernel_channels, output_channels / groups), where kernel_channel = input_channels
If W is of type bytes(), i.e. quantized, other quantization related arguments must be provided as well (see below).
b: numpy.array
Biases of the convolution kernels. b should have shape (outputChannels, ).
has_bias: boolean
Whether bias is ignored.
- If True, bias is not ignored.
- If False, bias is ignored.
is_deconv: boolean
Whether the convolution layer is performing a convolution or a transposed convolution (deconvolution).
- If True, the convolution layer is performing transposed convolution.
- If False, the convolution layer is performing regular convolution.
output_shape: tuple | None
Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True.
When is_deconv == False, this parameter is ignored.
If it is None, the output shape is calculated automatically using the border_mode.
Kindly refer to NeuralNetwork.proto for details.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
dilation_factors: [int]
Dilation factors across height and width directions. Must be a list of two positive integers.
Defaults to [1,1]
padding_top, padding_bottom, padding_left, padding_right: int
values of height (top, bottom) and width (left, right) padding to be used if border_more is "valid".
same_padding_asymmetry_mode : str.
Type of asymmetric padding to be used when border_mode is 'same'.
Can be either 'BOTTOM_RIGHT_HEAVY' or 'TOP_LEFT_HEAVY'.
Kindly refer to NeuralNetwork.proto for details.
Depthwise convolution is a special case of convolution, where we have:
kernel_channels = 1 (== input_channels / groups)
output_channels = channel_multiplier * input_channels
groups = input_channels
W : [Kernel_height, Kernel_width, 1, channel_multiplier * input_channels]
Quantization arguments expected in kwargs, when W is of type bytes():
quantization_type : str
When weights are quantized (i.e. W is of type bytes()), this should be either "linear" or "lut".
nbits: int
Should be between 1 and 8 (inclusive). Number of bits per weight value. Only applicable when
weights are quantized.
quant_scale: numpy.array(dtype=numpy.float32)
scale vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_bias: numpy.array(dtype=numpy.float32)
bias vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_lut: numpy.array(dtype=numpy.float32)
the LUT (look up table) to be used with LUT quantization. Must be of length 2^nbits.
See Also
--------
add_pooling, add_activation, add_batchnorm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer.convolution.MergeFromString(b'') # hack to set empty message
# Set the layer params
spec_layer_params = spec_layer.convolution
spec_layer_params.isDeconvolution = is_deconv
if is_deconv and output_shape:
spec_layer_params.outputShape.append(output_shape[0])
spec_layer_params.outputShape.append(output_shape[1])
spec_layer_params.outputChannels = output_channels
spec_layer_params.kernelChannels = kernel_channels
spec_layer_params.kernelSize.append(height)
spec_layer_params.kernelSize.append(width)
spec_layer_params.stride.append(stride_height)
spec_layer_params.stride.append(stride_width)
if border_mode == 'valid':
height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = padding_top
height_border.endEdgeSize = padding_bottom
width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = padding_left
width_border.endEdgeSize = padding_right
elif border_mode == 'same':
if not (same_padding_asymmetry_mode == 'BOTTOM_RIGHT_HEAVY' or same_padding_asymmetry_mode == 'TOP_LEFT_HEAVY'):
raise ValueError("Invalid value %d of same_padding_asymmetry_mode parameter" % same_padding_asymmetry_mode)
spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value(same_padding_asymmetry_mode)
else:
raise NotImplementedError(
'Border mode %s is not implemented.' % border_mode)
spec_layer_params.nGroups = groups
spec_layer_params.hasBias = has_bias
if len(kwargs) > 0:
_verify_quantization_arguments(weight = W, output_channels=output_channels, **kwargs)
nbits = kwargs.get('nbits', 8)
num_weights = (output_channels * kernel_channels * height * width) / groups
if nbits < 8:
byte_arr = np.frombuffer(W, dtype=np.uint8)
W = unpack_to_bytes(byte_arr, num_weights, nbits)
else:
W = np.frombuffer(W, dtype=np.uint8)
if is_deconv:
W = np.reshape(W, (height, width, kernel_channels, output_channels / groups))
else:
W = np.reshape(W, (height, width, kernel_channels, output_channels))
# Weight alignment: MLModel Spec requires following weight arrangement:
# is_deconv == False ==> (output_channels, kernel_channels, height, width), where kernel_channel = input_channels / groups
# is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels
if not is_deconv:
Wt = W.transpose((3,2,0,1))
Wt = Wt.flatten()
else:
Wt = W.transpose((2,3,0,1)).flatten()
# Assign weights
weights = spec_layer_params.weights
if len(kwargs) == 0: # no quantization
weights.floatValue.extend(map(float, Wt.flatten()))
else: # there is quantization
W_bytes = bytes()
if nbits == 8:
W_bytes += Wt.flatten().tobytes()
else:
W_bytes += _convert_array_to_nbit_quantized_bytes(Wt.flatten(), nbits).tobytes()
_fill_quantized_weights(weights_message = weights, W = W_bytes, **kwargs)
# Assign biases
if has_bias:
bias = spec_layer_params.bias
for f in range(output_channels):
bias.floatValue.append(float(b[f]))
# add dilation factors
spec_layer_params.dilationFactor.append(dilation_factors[0])
spec_layer_params.dilationFactor.append(dilation_factors[1]) | [
"def",
"add_convolution",
"(",
"self",
",",
"name",
",",
"kernel_channels",
",",
"output_channels",
",",
"height",
",",
"width",
",",
"stride_height",
",",
"stride_width",
",",
"border_mode",
",",
"groups",
",",
"W",
",",
"b",
",",
"has_bias",
",",
"is_deconv",
"=",
"False",
",",
"output_shape",
"=",
"None",
",",
"input_name",
"=",
"'data'",
",",
"output_name",
"=",
"'out'",
",",
"dilation_factors",
"=",
"[",
"1",
",",
"1",
"]",
",",
"padding_top",
"=",
"0",
",",
"padding_bottom",
"=",
"0",
",",
"padding_left",
"=",
"0",
",",
"padding_right",
"=",
"0",
",",
"same_padding_asymmetry_mode",
"=",
"'BOTTOM_RIGHT_HEAVY'",
",",
"*",
"*",
"kwargs",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer",
".",
"convolution",
".",
"MergeFromString",
"(",
"b''",
")",
"# hack to set empty message",
"# Set the layer params",
"spec_layer_params",
"=",
"spec_layer",
".",
"convolution",
"spec_layer_params",
".",
"isDeconvolution",
"=",
"is_deconv",
"if",
"is_deconv",
"and",
"output_shape",
":",
"spec_layer_params",
".",
"outputShape",
".",
"append",
"(",
"output_shape",
"[",
"0",
"]",
")",
"spec_layer_params",
".",
"outputShape",
".",
"append",
"(",
"output_shape",
"[",
"1",
"]",
")",
"spec_layer_params",
".",
"outputChannels",
"=",
"output_channels",
"spec_layer_params",
".",
"kernelChannels",
"=",
"kernel_channels",
"spec_layer_params",
".",
"kernelSize",
".",
"append",
"(",
"height",
")",
"spec_layer_params",
".",
"kernelSize",
".",
"append",
"(",
"width",
")",
"spec_layer_params",
".",
"stride",
".",
"append",
"(",
"stride_height",
")",
"spec_layer_params",
".",
"stride",
".",
"append",
"(",
"stride_width",
")",
"if",
"border_mode",
"==",
"'valid'",
":",
"height_border",
"=",
"spec_layer_params",
".",
"valid",
".",
"paddingAmounts",
".",
"borderAmounts",
".",
"add",
"(",
")",
"height_border",
".",
"startEdgeSize",
"=",
"padding_top",
"height_border",
".",
"endEdgeSize",
"=",
"padding_bottom",
"width_border",
"=",
"spec_layer_params",
".",
"valid",
".",
"paddingAmounts",
".",
"borderAmounts",
".",
"add",
"(",
")",
"width_border",
".",
"startEdgeSize",
"=",
"padding_left",
"width_border",
".",
"endEdgeSize",
"=",
"padding_right",
"elif",
"border_mode",
"==",
"'same'",
":",
"if",
"not",
"(",
"same_padding_asymmetry_mode",
"==",
"'BOTTOM_RIGHT_HEAVY'",
"or",
"same_padding_asymmetry_mode",
"==",
"'TOP_LEFT_HEAVY'",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid value %d of same_padding_asymmetry_mode parameter\"",
"%",
"same_padding_asymmetry_mode",
")",
"spec_layer_params",
".",
"same",
".",
"asymmetryMode",
"=",
"_NeuralNetwork_pb2",
".",
"SamePadding",
".",
"SamePaddingMode",
".",
"Value",
"(",
"same_padding_asymmetry_mode",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Border mode %s is not implemented.'",
"%",
"border_mode",
")",
"spec_layer_params",
".",
"nGroups",
"=",
"groups",
"spec_layer_params",
".",
"hasBias",
"=",
"has_bias",
"if",
"len",
"(",
"kwargs",
")",
">",
"0",
":",
"_verify_quantization_arguments",
"(",
"weight",
"=",
"W",
",",
"output_channels",
"=",
"output_channels",
",",
"*",
"*",
"kwargs",
")",
"nbits",
"=",
"kwargs",
".",
"get",
"(",
"'nbits'",
",",
"8",
")",
"num_weights",
"=",
"(",
"output_channels",
"*",
"kernel_channels",
"*",
"height",
"*",
"width",
")",
"/",
"groups",
"if",
"nbits",
"<",
"8",
":",
"byte_arr",
"=",
"np",
".",
"frombuffer",
"(",
"W",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"W",
"=",
"unpack_to_bytes",
"(",
"byte_arr",
",",
"num_weights",
",",
"nbits",
")",
"else",
":",
"W",
"=",
"np",
".",
"frombuffer",
"(",
"W",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"if",
"is_deconv",
":",
"W",
"=",
"np",
".",
"reshape",
"(",
"W",
",",
"(",
"height",
",",
"width",
",",
"kernel_channels",
",",
"output_channels",
"/",
"groups",
")",
")",
"else",
":",
"W",
"=",
"np",
".",
"reshape",
"(",
"W",
",",
"(",
"height",
",",
"width",
",",
"kernel_channels",
",",
"output_channels",
")",
")",
"# Weight alignment: MLModel Spec requires following weight arrangement:",
"# is_deconv == False ==> (output_channels, kernel_channels, height, width), where kernel_channel = input_channels / groups",
"# is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels",
"if",
"not",
"is_deconv",
":",
"Wt",
"=",
"W",
".",
"transpose",
"(",
"(",
"3",
",",
"2",
",",
"0",
",",
"1",
")",
")",
"Wt",
"=",
"Wt",
".",
"flatten",
"(",
")",
"else",
":",
"Wt",
"=",
"W",
".",
"transpose",
"(",
"(",
"2",
",",
"3",
",",
"0",
",",
"1",
")",
")",
".",
"flatten",
"(",
")",
"# Assign weights",
"weights",
"=",
"spec_layer_params",
".",
"weights",
"if",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"# no quantization",
"weights",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"Wt",
".",
"flatten",
"(",
")",
")",
")",
"else",
":",
"# there is quantization",
"W_bytes",
"=",
"bytes",
"(",
")",
"if",
"nbits",
"==",
"8",
":",
"W_bytes",
"+=",
"Wt",
".",
"flatten",
"(",
")",
".",
"tobytes",
"(",
")",
"else",
":",
"W_bytes",
"+=",
"_convert_array_to_nbit_quantized_bytes",
"(",
"Wt",
".",
"flatten",
"(",
")",
",",
"nbits",
")",
".",
"tobytes",
"(",
")",
"_fill_quantized_weights",
"(",
"weights_message",
"=",
"weights",
",",
"W",
"=",
"W_bytes",
",",
"*",
"*",
"kwargs",
")",
"# Assign biases",
"if",
"has_bias",
":",
"bias",
"=",
"spec_layer_params",
".",
"bias",
"for",
"f",
"in",
"range",
"(",
"output_channels",
")",
":",
"bias",
".",
"floatValue",
".",
"append",
"(",
"float",
"(",
"b",
"[",
"f",
"]",
")",
")",
"# add dilation factors",
"spec_layer_params",
".",
"dilationFactor",
".",
"append",
"(",
"dilation_factors",
"[",
"0",
"]",
")",
"spec_layer_params",
".",
"dilationFactor",
".",
"append",
"(",
"dilation_factors",
"[",
"1",
"]",
")"
] | Add a convolution layer to the network.
Please see the ConvolutionLayerParams in Core ML neural network
protobuf message for more information about input and output blob dimensions.
Parameters
----------
name: str
The name of this layer.
kernel_channels: int
Number of channels for the convolution kernels.
output_channels: int
Number of filter kernels. This is equal to the number of channels in the output blob.
height: int
Height of each kernel.
width: int
Width of each kernel.
stride_height: int
Stride along the height direction.
stride_width: int
Stride along the height direction.
border_mode: str
Option for the padding type and output blob shape. Can be either 'valid' or 'same'.
Kindly refer to NeuralNetwork.proto for details.
groups: int
Number of kernel groups. Input is divided into groups along the channel axis. Each kernel group share the same weights.
W: numpy.array or bytes()
Weight of the convolution kernels.
- If is_deconv is False, W should have shape (height, width, kernel_channels, output_channels), where kernel_channel = input_channels / groups
- If is_deconv is True, W should have shape (height, width, kernel_channels, output_channels / groups), where kernel_channel = input_channels
If W is of type bytes(), i.e. quantized, other quantization related arguments must be provided as well (see below).
b: numpy.array
Biases of the convolution kernels. b should have shape (outputChannels, ).
has_bias: boolean
Whether bias is ignored.
- If True, bias is not ignored.
- If False, bias is ignored.
is_deconv: boolean
Whether the convolution layer is performing a convolution or a transposed convolution (deconvolution).
- If True, the convolution layer is performing transposed convolution.
- If False, the convolution layer is performing regular convolution.
output_shape: tuple | None
Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True.
When is_deconv == False, this parameter is ignored.
If it is None, the output shape is calculated automatically using the border_mode.
Kindly refer to NeuralNetwork.proto for details.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
dilation_factors: [int]
Dilation factors across height and width directions. Must be a list of two positive integers.
Defaults to [1,1]
padding_top, padding_bottom, padding_left, padding_right: int
values of height (top, bottom) and width (left, right) padding to be used if border_more is "valid".
same_padding_asymmetry_mode : str.
Type of asymmetric padding to be used when border_mode is 'same'.
Can be either 'BOTTOM_RIGHT_HEAVY' or 'TOP_LEFT_HEAVY'.
Kindly refer to NeuralNetwork.proto for details.
Depthwise convolution is a special case of convolution, where we have:
kernel_channels = 1 (== input_channels / groups)
output_channels = channel_multiplier * input_channels
groups = input_channels
W : [Kernel_height, Kernel_width, 1, channel_multiplier * input_channels]
Quantization arguments expected in kwargs, when W is of type bytes():
quantization_type : str
When weights are quantized (i.e. W is of type bytes()), this should be either "linear" or "lut".
nbits: int
Should be between 1 and 8 (inclusive). Number of bits per weight value. Only applicable when
weights are quantized.
quant_scale: numpy.array(dtype=numpy.float32)
scale vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_bias: numpy.array(dtype=numpy.float32)
bias vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_lut: numpy.array(dtype=numpy.float32)
the LUT (look up table) to be used with LUT quantization. Must be of length 2^nbits.
See Also
--------
add_pooling, add_activation, add_batchnorm | [
"Add",
"a",
"convolution",
"layer",
"to",
"the",
"network",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py#L967-L1167 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py | NeuralNetworkBuilder.add_resize_bilinear | def add_resize_bilinear(self, name, input_name, output_name, target_height=1, target_width=1,
mode='ALIGN_ENDPOINTS_MODE'):
"""
Add resize bilinear layer to the model. A layer that resizes the input to a given spatial size using bilinear interpolation.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
target_height: int
Output height dimension.
target_width: int
Output width dimension.
mode: str
Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'.
This parameter determines the sampling grid used for bilinear interpolation. Kindly refer to NeuralNetwork.proto for details.
See Also
--------
add_upsample
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new inner-product layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.resizeBilinear
spec_layer_params.targetSize.append(target_height)
spec_layer_params.targetSize.append(target_width)
if mode == 'ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ALIGN_ENDPOINTS_MODE')
elif mode == 'STRICT_ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('STRICT_ALIGN_ENDPOINTS_MODE')
elif mode == 'UPSAMPLE_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('UPSAMPLE_MODE')
elif mode == 'ROI_ALIGN_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ROI_ALIGN_MODE')
else:
raise ValueError("Unspported resize bilinear mode %s" % mode) | python | def add_resize_bilinear(self, name, input_name, output_name, target_height=1, target_width=1,
mode='ALIGN_ENDPOINTS_MODE'):
"""
Add resize bilinear layer to the model. A layer that resizes the input to a given spatial size using bilinear interpolation.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
target_height: int
Output height dimension.
target_width: int
Output width dimension.
mode: str
Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'.
This parameter determines the sampling grid used for bilinear interpolation. Kindly refer to NeuralNetwork.proto for details.
See Also
--------
add_upsample
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new inner-product layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.resizeBilinear
spec_layer_params.targetSize.append(target_height)
spec_layer_params.targetSize.append(target_width)
if mode == 'ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ALIGN_ENDPOINTS_MODE')
elif mode == 'STRICT_ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('STRICT_ALIGN_ENDPOINTS_MODE')
elif mode == 'UPSAMPLE_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('UPSAMPLE_MODE')
elif mode == 'ROI_ALIGN_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ROI_ALIGN_MODE')
else:
raise ValueError("Unspported resize bilinear mode %s" % mode) | [
"def",
"add_resize_bilinear",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"target_height",
"=",
"1",
",",
"target_width",
"=",
"1",
",",
"mode",
"=",
"'ALIGN_ENDPOINTS_MODE'",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new inner-product layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"resizeBilinear",
"spec_layer_params",
".",
"targetSize",
".",
"append",
"(",
"target_height",
")",
"spec_layer_params",
".",
"targetSize",
".",
"append",
"(",
"target_width",
")",
"if",
"mode",
"==",
"'ALIGN_ENDPOINTS_MODE'",
":",
"spec_layer_params",
".",
"mode",
".",
"samplingMethod",
"=",
"_NeuralNetwork_pb2",
".",
"SamplingMode",
".",
"Method",
".",
"Value",
"(",
"'ALIGN_ENDPOINTS_MODE'",
")",
"elif",
"mode",
"==",
"'STRICT_ALIGN_ENDPOINTS_MODE'",
":",
"spec_layer_params",
".",
"mode",
".",
"samplingMethod",
"=",
"_NeuralNetwork_pb2",
".",
"SamplingMode",
".",
"Method",
".",
"Value",
"(",
"'STRICT_ALIGN_ENDPOINTS_MODE'",
")",
"elif",
"mode",
"==",
"'UPSAMPLE_MODE'",
":",
"spec_layer_params",
".",
"mode",
".",
"samplingMethod",
"=",
"_NeuralNetwork_pb2",
".",
"SamplingMode",
".",
"Method",
".",
"Value",
"(",
"'UPSAMPLE_MODE'",
")",
"elif",
"mode",
"==",
"'ROI_ALIGN_MODE'",
":",
"spec_layer_params",
".",
"mode",
".",
"samplingMethod",
"=",
"_NeuralNetwork_pb2",
".",
"SamplingMode",
".",
"Method",
".",
"Value",
"(",
"'ROI_ALIGN_MODE'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unspported resize bilinear mode %s\"",
"%",
"mode",
")"
] | Add resize bilinear layer to the model. A layer that resizes the input to a given spatial size using bilinear interpolation.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
target_height: int
Output height dimension.
target_width: int
Output width dimension.
mode: str
Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'.
This parameter determines the sampling grid used for bilinear interpolation. Kindly refer to NeuralNetwork.proto for details.
See Also
--------
add_upsample | [
"Add",
"resize",
"bilinear",
"layer",
"to",
"the",
"model",
".",
"A",
"layer",
"that",
"resizes",
"the",
"input",
"to",
"a",
"given",
"spatial",
"size",
"using",
"bilinear",
"interpolation",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py#L2612-L2657 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py | NeuralNetworkBuilder.add_crop_resize | def add_crop_resize(self, name, input_names, output_name, target_height=1, target_width=1,
mode='STRICT_ALIGN_ENDPOINTS_MODE',
normalized_roi=False,
box_indices_mode='CORNERS_HEIGHT_FIRST',
spatial_scale=1.0):
"""
Add crop resize layer to the model. A layer that extracts cropped spatial patches or RoIs (regions of interest)
from the input and resizes them to a pre-specified size using bilinear interpolation.
Note that RoI Align layer can be implemented with this layer followed by a pooling layer.
Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
input_names: [str]
Must be a list of two names: image feature map and crop indices/RoI input.
First input corresponds to a blob with shape ``[1, Batch, C, H_in, W_in]``. This represents a batch of input image feature data with C channels.
The second input shape must be ``[N, 1, 4, 1, 1]`` or ``[N, 1, 5, 1, 1]``. This represents the bounding box coordinates for N patches/RoIs.
N: number of patches/RoIs to be extracted
If RoI shape = [N, 1, 4, 1, 1]
The channel axis corresponds to the four coordinates specifying the bounding box.
All the N RoIs are extracted from all the batches of the input.
If RoI shape = [N, 1, 5, 1, 1]
The first element of the channel axis specifies the input batch id from which to extract the RoI and
must be in the interval ``[0, Batch - 1]``. That is, n-th RoI is extracted from the RoI[n,0,0,0]-th input batch id.
The last four elements of the channel axis specify the bounding box coordinates.
output_name: str
The output blob name of this layer.
target_height: int
Output height dimension.
target_width: int
Output width dimension.
mode: str
Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'.
This parameter determines the sampling grid used for bilinear interpolation. Kindly refer to NeuralNetwork.proto for details.
normalized_roi: bool
If true the bounding box coordinates must be in the interval [0, 1].
They are scaled by (input_height - 1), (input_width - 1), i.e. based on the input spatial dimensions.
If false the bounding box coordinates must be in the interval
[0, input_height - 1] and [0, input_width - 1], respectively for height and width dimensions.
box_indices_mode: str
Following values are supported: 'CORNERS_HEIGHT_FIRST', 'CORNERS_WIDTH_FIRST', 'CENTER_SIZE_HEIGHT_FIRST', 'CENTER_SIZE_WIDTH_FIRST'
Representation used to interpret the bounding box coordinates (RoI) input. Kindly refer to NeuralNetwork.proto for details.
'CORNERS_HEIGHT_FIRST': [h_start, w_start, h_end, w_end]
'CORNERS_WIDTH_FIRST': [w_start, h_start, w_end, h_end]
'CENTER_SIZE_HEIGHT_FIRST': [h_center, w_center, box_height, box_width]
'CENTER_SIZE_WIDTH_FIRST': [w_center, h_center, box_width, box_height]
spatial_scale: float
Additional spatial scale that multiplies the bounding box coordinates.
Generally used while implementing the RoI Align layer,
which uses unnormalized RoI coordinates along with a spatial scale less than or equal to 1.
See Also
--------
add_resize_bilinear, add_crop
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new inner-product layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
if len(input_names) != 2:
raise ValueError("crop resize layer must have exactly two inputs")
for input_name in input_names:
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.cropResize
spec_layer_params.targetSize.append(target_height)
spec_layer_params.targetSize.append(target_width)
spec_layer_params.normalizedCoordinates = normalized_roi
spec_layer_params.spatialScale = spatial_scale
if mode == 'ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ALIGN_ENDPOINTS_MODE')
elif mode == 'STRICT_ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('STRICT_ALIGN_ENDPOINTS_MODE')
elif mode == 'UPSAMPLE_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('UPSAMPLE_MODE')
elif mode == 'ROI_ALIGN_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ROI_ALIGN_MODE')
else:
raise ValueError("Unuspported crop resize mode %s" % mode)
if box_indices_mode == 'CORNERS_HEIGHT_FIRST':
spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value('CORNERS_HEIGHT_FIRST')
elif box_indices_mode == 'CORNERS_WIDTH_FIRST':
spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value('CORNERS_WIDTH_FIRST')
elif box_indices_mode == 'CENTER_SIZE_HEIGHT_FIRST':
spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value('CENTER_SIZE_HEIGHT_FIRST')
elif box_indices_mode == 'CENTER_SIZE_WIDTH_FIRST':
spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value('CENTER_SIZE_WIDTH_FIRST')
else:
raise ValueError("Unsupported crop resize box indices mode %s" % box_indices_mode) | python | def add_crop_resize(self, name, input_names, output_name, target_height=1, target_width=1,
mode='STRICT_ALIGN_ENDPOINTS_MODE',
normalized_roi=False,
box_indices_mode='CORNERS_HEIGHT_FIRST',
spatial_scale=1.0):
"""
Add crop resize layer to the model. A layer that extracts cropped spatial patches or RoIs (regions of interest)
from the input and resizes them to a pre-specified size using bilinear interpolation.
Note that RoI Align layer can be implemented with this layer followed by a pooling layer.
Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
input_names: [str]
Must be a list of two names: image feature map and crop indices/RoI input.
First input corresponds to a blob with shape ``[1, Batch, C, H_in, W_in]``. This represents a batch of input image feature data with C channels.
The second input shape must be ``[N, 1, 4, 1, 1]`` or ``[N, 1, 5, 1, 1]``. This represents the bounding box coordinates for N patches/RoIs.
N: number of patches/RoIs to be extracted
If RoI shape = [N, 1, 4, 1, 1]
The channel axis corresponds to the four coordinates specifying the bounding box.
All the N RoIs are extracted from all the batches of the input.
If RoI shape = [N, 1, 5, 1, 1]
The first element of the channel axis specifies the input batch id from which to extract the RoI and
must be in the interval ``[0, Batch - 1]``. That is, n-th RoI is extracted from the RoI[n,0,0,0]-th input batch id.
The last four elements of the channel axis specify the bounding box coordinates.
output_name: str
The output blob name of this layer.
target_height: int
Output height dimension.
target_width: int
Output width dimension.
mode: str
Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'.
This parameter determines the sampling grid used for bilinear interpolation. Kindly refer to NeuralNetwork.proto for details.
normalized_roi: bool
If true the bounding box coordinates must be in the interval [0, 1].
They are scaled by (input_height - 1), (input_width - 1), i.e. based on the input spatial dimensions.
If false the bounding box coordinates must be in the interval
[0, input_height - 1] and [0, input_width - 1], respectively for height and width dimensions.
box_indices_mode: str
Following values are supported: 'CORNERS_HEIGHT_FIRST', 'CORNERS_WIDTH_FIRST', 'CENTER_SIZE_HEIGHT_FIRST', 'CENTER_SIZE_WIDTH_FIRST'
Representation used to interpret the bounding box coordinates (RoI) input. Kindly refer to NeuralNetwork.proto for details.
'CORNERS_HEIGHT_FIRST': [h_start, w_start, h_end, w_end]
'CORNERS_WIDTH_FIRST': [w_start, h_start, w_end, h_end]
'CENTER_SIZE_HEIGHT_FIRST': [h_center, w_center, box_height, box_width]
'CENTER_SIZE_WIDTH_FIRST': [w_center, h_center, box_width, box_height]
spatial_scale: float
Additional spatial scale that multiplies the bounding box coordinates.
Generally used while implementing the RoI Align layer,
which uses unnormalized RoI coordinates along with a spatial scale less than or equal to 1.
See Also
--------
add_resize_bilinear, add_crop
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new inner-product layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
if len(input_names) != 2:
raise ValueError("crop resize layer must have exactly two inputs")
for input_name in input_names:
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.cropResize
spec_layer_params.targetSize.append(target_height)
spec_layer_params.targetSize.append(target_width)
spec_layer_params.normalizedCoordinates = normalized_roi
spec_layer_params.spatialScale = spatial_scale
if mode == 'ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ALIGN_ENDPOINTS_MODE')
elif mode == 'STRICT_ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('STRICT_ALIGN_ENDPOINTS_MODE')
elif mode == 'UPSAMPLE_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('UPSAMPLE_MODE')
elif mode == 'ROI_ALIGN_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ROI_ALIGN_MODE')
else:
raise ValueError("Unuspported crop resize mode %s" % mode)
if box_indices_mode == 'CORNERS_HEIGHT_FIRST':
spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value('CORNERS_HEIGHT_FIRST')
elif box_indices_mode == 'CORNERS_WIDTH_FIRST':
spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value('CORNERS_WIDTH_FIRST')
elif box_indices_mode == 'CENTER_SIZE_HEIGHT_FIRST':
spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value('CENTER_SIZE_HEIGHT_FIRST')
elif box_indices_mode == 'CENTER_SIZE_WIDTH_FIRST':
spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value('CENTER_SIZE_WIDTH_FIRST')
else:
raise ValueError("Unsupported crop resize box indices mode %s" % box_indices_mode) | [
"def",
"add_crop_resize",
"(",
"self",
",",
"name",
",",
"input_names",
",",
"output_name",
",",
"target_height",
"=",
"1",
",",
"target_width",
"=",
"1",
",",
"mode",
"=",
"'STRICT_ALIGN_ENDPOINTS_MODE'",
",",
"normalized_roi",
"=",
"False",
",",
"box_indices_mode",
"=",
"'CORNERS_HEIGHT_FIRST'",
",",
"spatial_scale",
"=",
"1.0",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new inner-product layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"if",
"len",
"(",
"input_names",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"crop resize layer must have exactly two inputs\"",
")",
"for",
"input_name",
"in",
"input_names",
":",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"cropResize",
"spec_layer_params",
".",
"targetSize",
".",
"append",
"(",
"target_height",
")",
"spec_layer_params",
".",
"targetSize",
".",
"append",
"(",
"target_width",
")",
"spec_layer_params",
".",
"normalizedCoordinates",
"=",
"normalized_roi",
"spec_layer_params",
".",
"spatialScale",
"=",
"spatial_scale",
"if",
"mode",
"==",
"'ALIGN_ENDPOINTS_MODE'",
":",
"spec_layer_params",
".",
"mode",
".",
"samplingMethod",
"=",
"_NeuralNetwork_pb2",
".",
"SamplingMode",
".",
"Method",
".",
"Value",
"(",
"'ALIGN_ENDPOINTS_MODE'",
")",
"elif",
"mode",
"==",
"'STRICT_ALIGN_ENDPOINTS_MODE'",
":",
"spec_layer_params",
".",
"mode",
".",
"samplingMethod",
"=",
"_NeuralNetwork_pb2",
".",
"SamplingMode",
".",
"Method",
".",
"Value",
"(",
"'STRICT_ALIGN_ENDPOINTS_MODE'",
")",
"elif",
"mode",
"==",
"'UPSAMPLE_MODE'",
":",
"spec_layer_params",
".",
"mode",
".",
"samplingMethod",
"=",
"_NeuralNetwork_pb2",
".",
"SamplingMode",
".",
"Method",
".",
"Value",
"(",
"'UPSAMPLE_MODE'",
")",
"elif",
"mode",
"==",
"'ROI_ALIGN_MODE'",
":",
"spec_layer_params",
".",
"mode",
".",
"samplingMethod",
"=",
"_NeuralNetwork_pb2",
".",
"SamplingMode",
".",
"Method",
".",
"Value",
"(",
"'ROI_ALIGN_MODE'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unuspported crop resize mode %s\"",
"%",
"mode",
")",
"if",
"box_indices_mode",
"==",
"'CORNERS_HEIGHT_FIRST'",
":",
"spec_layer_params",
".",
"boxIndicesMode",
".",
"boxMode",
"=",
"_NeuralNetwork_pb2",
".",
"BoxCoordinatesMode",
".",
"Coordinates",
".",
"Value",
"(",
"'CORNERS_HEIGHT_FIRST'",
")",
"elif",
"box_indices_mode",
"==",
"'CORNERS_WIDTH_FIRST'",
":",
"spec_layer_params",
".",
"boxIndicesMode",
".",
"boxMode",
"=",
"_NeuralNetwork_pb2",
".",
"BoxCoordinatesMode",
".",
"Coordinates",
".",
"Value",
"(",
"'CORNERS_WIDTH_FIRST'",
")",
"elif",
"box_indices_mode",
"==",
"'CENTER_SIZE_HEIGHT_FIRST'",
":",
"spec_layer_params",
".",
"boxIndicesMode",
".",
"boxMode",
"=",
"_NeuralNetwork_pb2",
".",
"BoxCoordinatesMode",
".",
"Coordinates",
".",
"Value",
"(",
"'CENTER_SIZE_HEIGHT_FIRST'",
")",
"elif",
"box_indices_mode",
"==",
"'CENTER_SIZE_WIDTH_FIRST'",
":",
"spec_layer_params",
".",
"boxIndicesMode",
".",
"boxMode",
"=",
"_NeuralNetwork_pb2",
".",
"BoxCoordinatesMode",
".",
"Coordinates",
".",
"Value",
"(",
"'CENTER_SIZE_WIDTH_FIRST'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported crop resize box indices mode %s\"",
"%",
"box_indices_mode",
")"
] | Add crop resize layer to the model. A layer that extracts cropped spatial patches or RoIs (regions of interest)
from the input and resizes them to a pre-specified size using bilinear interpolation.
Note that RoI Align layer can be implemented with this layer followed by a pooling layer.
Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
input_names: [str]
Must be a list of two names: image feature map and crop indices/RoI input.
First input corresponds to a blob with shape ``[1, Batch, C, H_in, W_in]``. This represents a batch of input image feature data with C channels.
The second input shape must be ``[N, 1, 4, 1, 1]`` or ``[N, 1, 5, 1, 1]``. This represents the bounding box coordinates for N patches/RoIs.
N: number of patches/RoIs to be extracted
If RoI shape = [N, 1, 4, 1, 1]
The channel axis corresponds to the four coordinates specifying the bounding box.
All the N RoIs are extracted from all the batches of the input.
If RoI shape = [N, 1, 5, 1, 1]
The first element of the channel axis specifies the input batch id from which to extract the RoI and
must be in the interval ``[0, Batch - 1]``. That is, n-th RoI is extracted from the RoI[n,0,0,0]-th input batch id.
The last four elements of the channel axis specify the bounding box coordinates.
output_name: str
The output blob name of this layer.
target_height: int
Output height dimension.
target_width: int
Output width dimension.
mode: str
Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'.
This parameter determines the sampling grid used for bilinear interpolation. Kindly refer to NeuralNetwork.proto for details.
normalized_roi: bool
If true the bounding box coordinates must be in the interval [0, 1].
They are scaled by (input_height - 1), (input_width - 1), i.e. based on the input spatial dimensions.
If false the bounding box coordinates must be in the interval
[0, input_height - 1] and [0, input_width - 1], respectively for height and width dimensions.
box_indices_mode: str
Following values are supported: 'CORNERS_HEIGHT_FIRST', 'CORNERS_WIDTH_FIRST', 'CENTER_SIZE_HEIGHT_FIRST', 'CENTER_SIZE_WIDTH_FIRST'
Representation used to interpret the bounding box coordinates (RoI) input. Kindly refer to NeuralNetwork.proto for details.
'CORNERS_HEIGHT_FIRST': [h_start, w_start, h_end, w_end]
'CORNERS_WIDTH_FIRST': [w_start, h_start, w_end, h_end]
'CENTER_SIZE_HEIGHT_FIRST': [h_center, w_center, box_height, box_width]
'CENTER_SIZE_WIDTH_FIRST': [w_center, h_center, box_width, box_height]
spatial_scale: float
Additional spatial scale that multiplies the bounding box coordinates.
Generally used while implementing the RoI Align layer,
which uses unnormalized RoI coordinates along with a spatial scale less than or equal to 1.
See Also
--------
add_resize_bilinear, add_crop | [
"Add",
"crop",
"resize",
"layer",
"to",
"the",
"model",
".",
"A",
"layer",
"that",
"extracts",
"cropped",
"spatial",
"patches",
"or",
"RoIs",
"(",
"regions",
"of",
"interest",
")",
"from",
"the",
"input",
"and",
"resizes",
"them",
"to",
"a",
"pre",
"-",
"specified",
"size",
"using",
"bilinear",
"interpolation",
".",
"Note",
"that",
"RoI",
"Align",
"layer",
"can",
"be",
"implemented",
"with",
"this",
"layer",
"followed",
"by",
"a",
"pooling",
"layer",
".",
"Kindly",
"refer",
"to",
"NeuralNetwork",
".",
"proto",
"for",
"details",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py#L2659-L2753 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _toolkit_serialize_summary_struct | def _toolkit_serialize_summary_struct(model, sections, section_titles):
"""
Serialize model summary into a dict with ordered lists of sections and section titles
Parameters
----------
model : Model object
sections : Ordered list of lists (sections) of tuples (field,value)
[
[(field1, value1), (field2, value2)],
[(field3, value3), (field4, value4)],
]
section_titles : Ordered list of section titles
Returns
-------
output_dict : A dict with two entries:
'sections' : ordered list with tuples of the form ('label',value)
'section_titles' : ordered list of section labels
"""
output_dict = dict()
output_dict['sections'] = [ [ ( field[0], __extract_model_summary_value(model, field[1]) ) \
for field in section ]
for section in sections ]
output_dict['section_titles'] = section_titles
return output_dict | python | def _toolkit_serialize_summary_struct(model, sections, section_titles):
"""
Serialize model summary into a dict with ordered lists of sections and section titles
Parameters
----------
model : Model object
sections : Ordered list of lists (sections) of tuples (field,value)
[
[(field1, value1), (field2, value2)],
[(field3, value3), (field4, value4)],
]
section_titles : Ordered list of section titles
Returns
-------
output_dict : A dict with two entries:
'sections' : ordered list with tuples of the form ('label',value)
'section_titles' : ordered list of section labels
"""
output_dict = dict()
output_dict['sections'] = [ [ ( field[0], __extract_model_summary_value(model, field[1]) ) \
for field in section ]
for section in sections ]
output_dict['section_titles'] = section_titles
return output_dict | [
"def",
"_toolkit_serialize_summary_struct",
"(",
"model",
",",
"sections",
",",
"section_titles",
")",
":",
"output_dict",
"=",
"dict",
"(",
")",
"output_dict",
"[",
"'sections'",
"]",
"=",
"[",
"[",
"(",
"field",
"[",
"0",
"]",
",",
"__extract_model_summary_value",
"(",
"model",
",",
"field",
"[",
"1",
"]",
")",
")",
"for",
"field",
"in",
"section",
"]",
"for",
"section",
"in",
"sections",
"]",
"output_dict",
"[",
"'section_titles'",
"]",
"=",
"section_titles",
"return",
"output_dict"
] | Serialize model summary into a dict with ordered lists of sections and section titles
Parameters
----------
model : Model object
sections : Ordered list of lists (sections) of tuples (field,value)
[
[(field1, value1), (field2, value2)],
[(field3, value3), (field4, value4)],
]
section_titles : Ordered list of section titles
Returns
-------
output_dict : A dict with two entries:
'sections' : ordered list with tuples of the form ('label',value)
'section_titles' : ordered list of section labels | [
"Serialize",
"model",
"summary",
"into",
"a",
"dict",
"with",
"ordered",
"lists",
"of",
"sections",
"and",
"section",
"titles"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L34-L61 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _add_docstring | def _add_docstring(format_dict):
"""
Format a doc-string on the fly.
@arg format_dict: A dictionary to format the doc-strings
Example:
@add_docstring({'context': __doc_string_context})
def predict(x):
'''
{context}
>> model.predict(data)
'''
return x
"""
def add_docstring_context(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.__doc__ = func.__doc__.format(**format_dict)
return wrapper
return add_docstring_context | python | def _add_docstring(format_dict):
"""
Format a doc-string on the fly.
@arg format_dict: A dictionary to format the doc-strings
Example:
@add_docstring({'context': __doc_string_context})
def predict(x):
'''
{context}
>> model.predict(data)
'''
return x
"""
def add_docstring_context(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.__doc__ = func.__doc__.format(**format_dict)
return wrapper
return add_docstring_context | [
"def",
"_add_docstring",
"(",
"format_dict",
")",
":",
"def",
"add_docstring_context",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"wrapper",
".",
"__doc__",
"=",
"func",
".",
"__doc__",
".",
"format",
"(",
"*",
"*",
"format_dict",
")",
"return",
"wrapper",
"return",
"add_docstring_context"
] | Format a doc-string on the fly.
@arg format_dict: A dictionary to format the doc-strings
Example:
@add_docstring({'context': __doc_string_context})
def predict(x):
'''
{context}
>> model.predict(data)
'''
return x | [
"Format",
"a",
"doc",
"-",
"string",
"on",
"the",
"fly",
".",
"@arg",
"format_dict",
":",
"A",
"dictionary",
"to",
"format",
"the",
"doc",
"-",
"strings",
"Example",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L64-L83 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _find_only_column_of_type | def _find_only_column_of_type(sframe, target_type, type_name, col_name):
"""
Finds the only column in `SFrame` with a type specified by `target_type`.
If there are zero or more than one such columns, an exception will be
raised. The name and type of the target column should be provided as
strings for the purpose of error feedback.
"""
image_column_name = None
if type(target_type) != list:
target_type = [target_type]
for name, ctype in zip(sframe.column_names(), sframe.column_types()):
if ctype in target_type:
if image_column_name is not None:
raise ToolkitError('No "{col_name}" column specified and more than one {type_name} column in "dataset". Can not infer correct {col_name} column.'.format(col_name=col_name, type_name=type_name))
image_column_name = name
if image_column_name is None:
raise ToolkitError('No %s column in "dataset".' % type_name)
return image_column_name | python | def _find_only_column_of_type(sframe, target_type, type_name, col_name):
"""
Finds the only column in `SFrame` with a type specified by `target_type`.
If there are zero or more than one such columns, an exception will be
raised. The name and type of the target column should be provided as
strings for the purpose of error feedback.
"""
image_column_name = None
if type(target_type) != list:
target_type = [target_type]
for name, ctype in zip(sframe.column_names(), sframe.column_types()):
if ctype in target_type:
if image_column_name is not None:
raise ToolkitError('No "{col_name}" column specified and more than one {type_name} column in "dataset". Can not infer correct {col_name} column.'.format(col_name=col_name, type_name=type_name))
image_column_name = name
if image_column_name is None:
raise ToolkitError('No %s column in "dataset".' % type_name)
return image_column_name | [
"def",
"_find_only_column_of_type",
"(",
"sframe",
",",
"target_type",
",",
"type_name",
",",
"col_name",
")",
":",
"image_column_name",
"=",
"None",
"if",
"type",
"(",
"target_type",
")",
"!=",
"list",
":",
"target_type",
"=",
"[",
"target_type",
"]",
"for",
"name",
",",
"ctype",
"in",
"zip",
"(",
"sframe",
".",
"column_names",
"(",
")",
",",
"sframe",
".",
"column_types",
"(",
")",
")",
":",
"if",
"ctype",
"in",
"target_type",
":",
"if",
"image_column_name",
"is",
"not",
"None",
":",
"raise",
"ToolkitError",
"(",
"'No \"{col_name}\" column specified and more than one {type_name} column in \"dataset\". Can not infer correct {col_name} column.'",
".",
"format",
"(",
"col_name",
"=",
"col_name",
",",
"type_name",
"=",
"type_name",
")",
")",
"image_column_name",
"=",
"name",
"if",
"image_column_name",
"is",
"None",
":",
"raise",
"ToolkitError",
"(",
"'No %s column in \"dataset\".'",
"%",
"type_name",
")",
"return",
"image_column_name"
] | Finds the only column in `SFrame` with a type specified by `target_type`.
If there are zero or more than one such columns, an exception will be
raised. The name and type of the target column should be provided as
strings for the purpose of error feedback. | [
"Finds",
"the",
"only",
"column",
"in",
"SFrame",
"with",
"a",
"type",
"specified",
"by",
"target_type",
".",
"If",
"there",
"are",
"zero",
"or",
"more",
"than",
"one",
"such",
"columns",
"an",
"exception",
"will",
"be",
"raised",
".",
"The",
"name",
"and",
"type",
"of",
"the",
"target",
"column",
"should",
"be",
"provided",
"as",
"strings",
"for",
"the",
"purpose",
"of",
"error",
"feedback",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L86-L103 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _find_only_image_column | def _find_only_image_column(sframe):
"""
Finds the only column in `sframe` with a type of turicreate.Image.
If there are zero or more than one image columns, an exception will
be raised.
"""
from turicreate import Image
return _find_only_column_of_type(sframe, target_type=Image,
type_name='image', col_name='feature') | python | def _find_only_image_column(sframe):
"""
Finds the only column in `sframe` with a type of turicreate.Image.
If there are zero or more than one image columns, an exception will
be raised.
"""
from turicreate import Image
return _find_only_column_of_type(sframe, target_type=Image,
type_name='image', col_name='feature') | [
"def",
"_find_only_image_column",
"(",
"sframe",
")",
":",
"from",
"turicreate",
"import",
"Image",
"return",
"_find_only_column_of_type",
"(",
"sframe",
",",
"target_type",
"=",
"Image",
",",
"type_name",
"=",
"'image'",
",",
"col_name",
"=",
"'feature'",
")"
] | Finds the only column in `sframe` with a type of turicreate.Image.
If there are zero or more than one image columns, an exception will
be raised. | [
"Finds",
"the",
"only",
"column",
"in",
"sframe",
"with",
"a",
"type",
"of",
"turicreate",
".",
"Image",
".",
"If",
"there",
"are",
"zero",
"or",
"more",
"than",
"one",
"image",
"columns",
"an",
"exception",
"will",
"be",
"raised",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L105-L113 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _find_only_drawing_column | def _find_only_drawing_column(sframe):
"""
Finds the only column that can be interpreted as a drawing feature column.
A drawing column can be a stroke-based drawing column (with dtype list)
or a bitmap-based drawing column (with dtype turicreate.Image)
If there are zero or more than one drawing columns, an exception will be
raised.
"""
from turicreate import Image
bitmap_success, stroke_success = False, False
bitmap_error, stroke_error = None, None
feature = None
try:
feature = _find_only_column_of_type(sframe,
target_type=Image, type_name='drawing', col_name='feature')
bitmap_success = True
except ToolkitError as err_from_bitmap_search:
bitmap_error = err_from_bitmap_search
try:
feature = _find_only_column_of_type(sframe,
target_type=list, type_name='drawing', col_name='feature')
stroke_success = True
except ToolkitError as err_from_stroke_search:
stroke_error = err_from_stroke_search
more_than_one_image_columns = ("more than one" in str(bitmap_error)
if not bitmap_success else False)
more_than_one_stroke_columns = ("more than one" in str(stroke_error)
if not stroke_success else False)
corrective_action_for_user = ("\nThe feature column must contain either "
+ "bitmap-based drawings or stroke-based drawings but not both.\n"
+ "Bitmap-based drawing input must be a grayscale "
+ "tc.Image of any size.\n"
+ "Stroke-based drawing input must be in the following format:\n"
+ "Every drawing must be represented by a list of strokes, where each "
+ "stroke must be a list of points in the order in which they were "
+ "drawn on the canvas. "
+ "Every point must be a dictionary with two keys, 'x' and 'y', and "
+ "their respective values must be numerical, "
+ "i.e. either integer or float.")
error_message = (lambda num1, type1, input1, num2, type2, input2:
(("No 'feature' column specified. Found {num1} column with type "
+ "{type1} (for {input1}-based drawing input) and "
+ "{num2} column with type {type2} (for {input2}-based drawing "
+ "input) in 'input_dataset'. "
+ "Can not infer correct 'feature' column.").format(
num1=num1, input1=input1, type1=type1,
num2=num2, input2=input2, type2=type2)
)
)
if (bitmap_success ^ stroke_success
and not more_than_one_image_columns
and not more_than_one_stroke_columns):
# success!
# found exactly one of bitmap-based drawing column and
# stroke-based drawing column, and found none of the other.
return feature
elif bitmap_success and stroke_success:
raise ToolkitError(error_message(
"one", "turicreate.Image", "bitmap", "one", "list", "stroke")
+ corrective_action_for_user)
else:
if more_than_one_image_columns and more_than_one_stroke_columns:
raise ToolkitError(error_message(
"more than one", "turicreate.Image", "bitmap",
"more than one", "list", "stroke")
+ corrective_action_for_user)
elif more_than_one_image_columns and not more_than_one_stroke_columns:
raise ToolkitError(error_message(
"more than one", "turicreate.Image", "bitmap",
"no", "list", "stroke")
+ corrective_action_for_user)
elif not more_than_one_image_columns and more_than_one_stroke_columns:
raise ToolkitError(error_message(
"more than one", "list", "stroke",
"no", "turicreate.Image", "bitmap")
+ corrective_action_for_user)
else:
raise ToolkitError(error_message(
"no", "list", "stroke",
"no", "turicreate.Image", "bitmap")
+ corrective_action_for_user) | python | def _find_only_drawing_column(sframe):
"""
Finds the only column that can be interpreted as a drawing feature column.
A drawing column can be a stroke-based drawing column (with dtype list)
or a bitmap-based drawing column (with dtype turicreate.Image)
If there are zero or more than one drawing columns, an exception will be
raised.
"""
from turicreate import Image
bitmap_success, stroke_success = False, False
bitmap_error, stroke_error = None, None
feature = None
try:
feature = _find_only_column_of_type(sframe,
target_type=Image, type_name='drawing', col_name='feature')
bitmap_success = True
except ToolkitError as err_from_bitmap_search:
bitmap_error = err_from_bitmap_search
try:
feature = _find_only_column_of_type(sframe,
target_type=list, type_name='drawing', col_name='feature')
stroke_success = True
except ToolkitError as err_from_stroke_search:
stroke_error = err_from_stroke_search
more_than_one_image_columns = ("more than one" in str(bitmap_error)
if not bitmap_success else False)
more_than_one_stroke_columns = ("more than one" in str(stroke_error)
if not stroke_success else False)
corrective_action_for_user = ("\nThe feature column must contain either "
+ "bitmap-based drawings or stroke-based drawings but not both.\n"
+ "Bitmap-based drawing input must be a grayscale "
+ "tc.Image of any size.\n"
+ "Stroke-based drawing input must be in the following format:\n"
+ "Every drawing must be represented by a list of strokes, where each "
+ "stroke must be a list of points in the order in which they were "
+ "drawn on the canvas. "
+ "Every point must be a dictionary with two keys, 'x' and 'y', and "
+ "their respective values must be numerical, "
+ "i.e. either integer or float.")
error_message = (lambda num1, type1, input1, num2, type2, input2:
(("No 'feature' column specified. Found {num1} column with type "
+ "{type1} (for {input1}-based drawing input) and "
+ "{num2} column with type {type2} (for {input2}-based drawing "
+ "input) in 'input_dataset'. "
+ "Can not infer correct 'feature' column.").format(
num1=num1, input1=input1, type1=type1,
num2=num2, input2=input2, type2=type2)
)
)
if (bitmap_success ^ stroke_success
and not more_than_one_image_columns
and not more_than_one_stroke_columns):
# success!
# found exactly one of bitmap-based drawing column and
# stroke-based drawing column, and found none of the other.
return feature
elif bitmap_success and stroke_success:
raise ToolkitError(error_message(
"one", "turicreate.Image", "bitmap", "one", "list", "stroke")
+ corrective_action_for_user)
else:
if more_than_one_image_columns and more_than_one_stroke_columns:
raise ToolkitError(error_message(
"more than one", "turicreate.Image", "bitmap",
"more than one", "list", "stroke")
+ corrective_action_for_user)
elif more_than_one_image_columns and not more_than_one_stroke_columns:
raise ToolkitError(error_message(
"more than one", "turicreate.Image", "bitmap",
"no", "list", "stroke")
+ corrective_action_for_user)
elif not more_than_one_image_columns and more_than_one_stroke_columns:
raise ToolkitError(error_message(
"more than one", "list", "stroke",
"no", "turicreate.Image", "bitmap")
+ corrective_action_for_user)
else:
raise ToolkitError(error_message(
"no", "list", "stroke",
"no", "turicreate.Image", "bitmap")
+ corrective_action_for_user) | [
"def",
"_find_only_drawing_column",
"(",
"sframe",
")",
":",
"from",
"turicreate",
"import",
"Image",
"bitmap_success",
",",
"stroke_success",
"=",
"False",
",",
"False",
"bitmap_error",
",",
"stroke_error",
"=",
"None",
",",
"None",
"feature",
"=",
"None",
"try",
":",
"feature",
"=",
"_find_only_column_of_type",
"(",
"sframe",
",",
"target_type",
"=",
"Image",
",",
"type_name",
"=",
"'drawing'",
",",
"col_name",
"=",
"'feature'",
")",
"bitmap_success",
"=",
"True",
"except",
"ToolkitError",
"as",
"err_from_bitmap_search",
":",
"bitmap_error",
"=",
"err_from_bitmap_search",
"try",
":",
"feature",
"=",
"_find_only_column_of_type",
"(",
"sframe",
",",
"target_type",
"=",
"list",
",",
"type_name",
"=",
"'drawing'",
",",
"col_name",
"=",
"'feature'",
")",
"stroke_success",
"=",
"True",
"except",
"ToolkitError",
"as",
"err_from_stroke_search",
":",
"stroke_error",
"=",
"err_from_stroke_search",
"more_than_one_image_columns",
"=",
"(",
"\"more than one\"",
"in",
"str",
"(",
"bitmap_error",
")",
"if",
"not",
"bitmap_success",
"else",
"False",
")",
"more_than_one_stroke_columns",
"=",
"(",
"\"more than one\"",
"in",
"str",
"(",
"stroke_error",
")",
"if",
"not",
"stroke_success",
"else",
"False",
")",
"corrective_action_for_user",
"=",
"(",
"\"\\nThe feature column must contain either \"",
"+",
"\"bitmap-based drawings or stroke-based drawings but not both.\\n\"",
"+",
"\"Bitmap-based drawing input must be a grayscale \"",
"+",
"\"tc.Image of any size.\\n\"",
"+",
"\"Stroke-based drawing input must be in the following format:\\n\"",
"+",
"\"Every drawing must be represented by a list of strokes, where each \"",
"+",
"\"stroke must be a list of points in the order in which they were \"",
"+",
"\"drawn on the canvas. \"",
"+",
"\"Every point must be a dictionary with two keys, 'x' and 'y', and \"",
"+",
"\"their respective values must be numerical, \"",
"+",
"\"i.e. either integer or float.\"",
")",
"error_message",
"=",
"(",
"lambda",
"num1",
",",
"type1",
",",
"input1",
",",
"num2",
",",
"type2",
",",
"input2",
":",
"(",
"(",
"\"No 'feature' column specified. Found {num1} column with type \"",
"+",
"\"{type1} (for {input1}-based drawing input) and \"",
"+",
"\"{num2} column with type {type2} (for {input2}-based drawing \"",
"+",
"\"input) in 'input_dataset'. \"",
"+",
"\"Can not infer correct 'feature' column.\"",
")",
".",
"format",
"(",
"num1",
"=",
"num1",
",",
"input1",
"=",
"input1",
",",
"type1",
"=",
"type1",
",",
"num2",
"=",
"num2",
",",
"input2",
"=",
"input2",
",",
"type2",
"=",
"type2",
")",
")",
")",
"if",
"(",
"bitmap_success",
"^",
"stroke_success",
"and",
"not",
"more_than_one_image_columns",
"and",
"not",
"more_than_one_stroke_columns",
")",
":",
"# success! ",
"# found exactly one of bitmap-based drawing column and",
"# stroke-based drawing column, and found none of the other.",
"return",
"feature",
"elif",
"bitmap_success",
"and",
"stroke_success",
":",
"raise",
"ToolkitError",
"(",
"error_message",
"(",
"\"one\"",
",",
"\"turicreate.Image\"",
",",
"\"bitmap\"",
",",
"\"one\"",
",",
"\"list\"",
",",
"\"stroke\"",
")",
"+",
"corrective_action_for_user",
")",
"else",
":",
"if",
"more_than_one_image_columns",
"and",
"more_than_one_stroke_columns",
":",
"raise",
"ToolkitError",
"(",
"error_message",
"(",
"\"more than one\"",
",",
"\"turicreate.Image\"",
",",
"\"bitmap\"",
",",
"\"more than one\"",
",",
"\"list\"",
",",
"\"stroke\"",
")",
"+",
"corrective_action_for_user",
")",
"elif",
"more_than_one_image_columns",
"and",
"not",
"more_than_one_stroke_columns",
":",
"raise",
"ToolkitError",
"(",
"error_message",
"(",
"\"more than one\"",
",",
"\"turicreate.Image\"",
",",
"\"bitmap\"",
",",
"\"no\"",
",",
"\"list\"",
",",
"\"stroke\"",
")",
"+",
"corrective_action_for_user",
")",
"elif",
"not",
"more_than_one_image_columns",
"and",
"more_than_one_stroke_columns",
":",
"raise",
"ToolkitError",
"(",
"error_message",
"(",
"\"more than one\"",
",",
"\"list\"",
",",
"\"stroke\"",
",",
"\"no\"",
",",
"\"turicreate.Image\"",
",",
"\"bitmap\"",
")",
"+",
"corrective_action_for_user",
")",
"else",
":",
"raise",
"ToolkitError",
"(",
"error_message",
"(",
"\"no\"",
",",
"\"list\"",
",",
"\"stroke\"",
",",
"\"no\"",
",",
"\"turicreate.Image\"",
",",
"\"bitmap\"",
")",
"+",
"corrective_action_for_user",
")"
] | Finds the only column that can be interpreted as a drawing feature column.
A drawing column can be a stroke-based drawing column (with dtype list)
or a bitmap-based drawing column (with dtype turicreate.Image)
If there are zero or more than one drawing columns, an exception will be
raised. | [
"Finds",
"the",
"only",
"column",
"that",
"can",
"be",
"interpreted",
"as",
"a",
"drawing",
"feature",
"column",
".",
"A",
"drawing",
"column",
"can",
"be",
"a",
"stroke",
"-",
"based",
"drawing",
"column",
"(",
"with",
"dtype",
"list",
")",
"or",
"a",
"bitmap",
"-",
"based",
"drawing",
"column",
"(",
"with",
"dtype",
"turicreate",
".",
"Image",
")",
"If",
"there",
"are",
"zero",
"or",
"more",
"than",
"one",
"drawing",
"columns",
"an",
"exception",
"will",
"be",
"raised",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L115-L201 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _SGraphFromJsonTree | def _SGraphFromJsonTree(json_str):
"""
Convert the Json Tree to SGraph
"""
g = json.loads(json_str)
vertices = [_Vertex(x['id'],
dict([(str(k), v) for k, v in _six.iteritems(x) if k != 'id']))
for x in g['vertices']]
edges = [_Edge(x['src'], x['dst'],
dict([(str(k), v) for k, v in _six.iteritems(x) if k != 'src' and k != 'dst']))
for x in g['edges']]
sg = _SGraph().add_vertices(vertices)
if len(edges) > 0:
sg = sg.add_edges(edges)
return sg | python | def _SGraphFromJsonTree(json_str):
"""
Convert the Json Tree to SGraph
"""
g = json.loads(json_str)
vertices = [_Vertex(x['id'],
dict([(str(k), v) for k, v in _six.iteritems(x) if k != 'id']))
for x in g['vertices']]
edges = [_Edge(x['src'], x['dst'],
dict([(str(k), v) for k, v in _six.iteritems(x) if k != 'src' and k != 'dst']))
for x in g['edges']]
sg = _SGraph().add_vertices(vertices)
if len(edges) > 0:
sg = sg.add_edges(edges)
return sg | [
"def",
"_SGraphFromJsonTree",
"(",
"json_str",
")",
":",
"g",
"=",
"json",
".",
"loads",
"(",
"json_str",
")",
"vertices",
"=",
"[",
"_Vertex",
"(",
"x",
"[",
"'id'",
"]",
",",
"dict",
"(",
"[",
"(",
"str",
"(",
"k",
")",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"_six",
".",
"iteritems",
"(",
"x",
")",
"if",
"k",
"!=",
"'id'",
"]",
")",
")",
"for",
"x",
"in",
"g",
"[",
"'vertices'",
"]",
"]",
"edges",
"=",
"[",
"_Edge",
"(",
"x",
"[",
"'src'",
"]",
",",
"x",
"[",
"'dst'",
"]",
",",
"dict",
"(",
"[",
"(",
"str",
"(",
"k",
")",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"_six",
".",
"iteritems",
"(",
"x",
")",
"if",
"k",
"!=",
"'src'",
"and",
"k",
"!=",
"'dst'",
"]",
")",
")",
"for",
"x",
"in",
"g",
"[",
"'edges'",
"]",
"]",
"sg",
"=",
"_SGraph",
"(",
")",
".",
"add_vertices",
"(",
"vertices",
")",
"if",
"len",
"(",
"edges",
")",
">",
"0",
":",
"sg",
"=",
"sg",
".",
"add_edges",
"(",
"edges",
")",
"return",
"sg"
] | Convert the Json Tree to SGraph | [
"Convert",
"the",
"Json",
"Tree",
"to",
"SGraph"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L203-L217 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _summarize_coefficients | def _summarize_coefficients(top_coefs, bottom_coefs):
"""
Return a tuple of sections and section titles.
Sections are pretty print of model coefficients
Parameters
----------
top_coefs : SFrame of top k coefficients
bottom_coefs : SFrame of bottom k coefficients
Returns
-------
(sections, section_titles) : tuple
sections : list
summary sections for top/bottom k coefficients
section_titles : list
summary section titles
"""
def get_row_name(row):
if row['index'] is None:
return row['name']
else:
return "%s[%s]" % (row['name'], row['index'])
if len(top_coefs) == 0:
top_coefs_list = [('No Positive Coefficients', _precomputed_field('') )]
else:
top_coefs_list = [ (get_row_name(row),
_precomputed_field(row['value'])) \
for row in top_coefs ]
if len(bottom_coefs) == 0:
bottom_coefs_list = [('No Negative Coefficients', _precomputed_field(''))]
else:
bottom_coefs_list = [ (get_row_name(row),
_precomputed_field(row['value'])) \
for row in bottom_coefs ]
return ([top_coefs_list, bottom_coefs_list], \
[ 'Highest Positive Coefficients', 'Lowest Negative Coefficients'] ) | python | def _summarize_coefficients(top_coefs, bottom_coefs):
"""
Return a tuple of sections and section titles.
Sections are pretty print of model coefficients
Parameters
----------
top_coefs : SFrame of top k coefficients
bottom_coefs : SFrame of bottom k coefficients
Returns
-------
(sections, section_titles) : tuple
sections : list
summary sections for top/bottom k coefficients
section_titles : list
summary section titles
"""
def get_row_name(row):
if row['index'] is None:
return row['name']
else:
return "%s[%s]" % (row['name'], row['index'])
if len(top_coefs) == 0:
top_coefs_list = [('No Positive Coefficients', _precomputed_field('') )]
else:
top_coefs_list = [ (get_row_name(row),
_precomputed_field(row['value'])) \
for row in top_coefs ]
if len(bottom_coefs) == 0:
bottom_coefs_list = [('No Negative Coefficients', _precomputed_field(''))]
else:
bottom_coefs_list = [ (get_row_name(row),
_precomputed_field(row['value'])) \
for row in bottom_coefs ]
return ([top_coefs_list, bottom_coefs_list], \
[ 'Highest Positive Coefficients', 'Lowest Negative Coefficients'] ) | [
"def",
"_summarize_coefficients",
"(",
"top_coefs",
",",
"bottom_coefs",
")",
":",
"def",
"get_row_name",
"(",
"row",
")",
":",
"if",
"row",
"[",
"'index'",
"]",
"is",
"None",
":",
"return",
"row",
"[",
"'name'",
"]",
"else",
":",
"return",
"\"%s[%s]\"",
"%",
"(",
"row",
"[",
"'name'",
"]",
",",
"row",
"[",
"'index'",
"]",
")",
"if",
"len",
"(",
"top_coefs",
")",
"==",
"0",
":",
"top_coefs_list",
"=",
"[",
"(",
"'No Positive Coefficients'",
",",
"_precomputed_field",
"(",
"''",
")",
")",
"]",
"else",
":",
"top_coefs_list",
"=",
"[",
"(",
"get_row_name",
"(",
"row",
")",
",",
"_precomputed_field",
"(",
"row",
"[",
"'value'",
"]",
")",
")",
"for",
"row",
"in",
"top_coefs",
"]",
"if",
"len",
"(",
"bottom_coefs",
")",
"==",
"0",
":",
"bottom_coefs_list",
"=",
"[",
"(",
"'No Negative Coefficients'",
",",
"_precomputed_field",
"(",
"''",
")",
")",
"]",
"else",
":",
"bottom_coefs_list",
"=",
"[",
"(",
"get_row_name",
"(",
"row",
")",
",",
"_precomputed_field",
"(",
"row",
"[",
"'value'",
"]",
")",
")",
"for",
"row",
"in",
"bottom_coefs",
"]",
"return",
"(",
"[",
"top_coefs_list",
",",
"bottom_coefs_list",
"]",
",",
"[",
"'Highest Positive Coefficients'",
",",
"'Lowest Negative Coefficients'",
"]",
")"
] | Return a tuple of sections and section titles.
Sections are pretty print of model coefficients
Parameters
----------
top_coefs : SFrame of top k coefficients
bottom_coefs : SFrame of bottom k coefficients
Returns
-------
(sections, section_titles) : tuple
sections : list
summary sections for top/bottom k coefficients
section_titles : list
summary section titles | [
"Return",
"a",
"tuple",
"of",
"sections",
"and",
"section",
"titles",
".",
"Sections",
"are",
"pretty",
"print",
"of",
"model",
"coefficients"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L223-L264 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _toolkit_get_topk_bottomk | def _toolkit_get_topk_bottomk(values, k=5):
"""
Returns a tuple of the top k values from the positive and
negative values in a SArray
Parameters
----------
values : SFrame of model coefficients
k: Maximum number of largest positive and k lowest negative numbers to return
Returns
-------
(topk_positive, bottomk_positive) : tuple
topk_positive : list
floats that represent the top 'k' ( or less ) positive
values
bottomk_positive : list
floats that represent the top 'k' ( or less ) negative
values
"""
top_values = values.topk('value', k=k)
top_values = top_values[top_values['value'] > 0]
bottom_values = values.topk('value', k=k, reverse=True)
bottom_values = bottom_values[bottom_values['value'] < 0]
return (top_values, bottom_values) | python | def _toolkit_get_topk_bottomk(values, k=5):
"""
Returns a tuple of the top k values from the positive and
negative values in a SArray
Parameters
----------
values : SFrame of model coefficients
k: Maximum number of largest positive and k lowest negative numbers to return
Returns
-------
(topk_positive, bottomk_positive) : tuple
topk_positive : list
floats that represent the top 'k' ( or less ) positive
values
bottomk_positive : list
floats that represent the top 'k' ( or less ) negative
values
"""
top_values = values.topk('value', k=k)
top_values = top_values[top_values['value'] > 0]
bottom_values = values.topk('value', k=k, reverse=True)
bottom_values = bottom_values[bottom_values['value'] < 0]
return (top_values, bottom_values) | [
"def",
"_toolkit_get_topk_bottomk",
"(",
"values",
",",
"k",
"=",
"5",
")",
":",
"top_values",
"=",
"values",
".",
"topk",
"(",
"'value'",
",",
"k",
"=",
"k",
")",
"top_values",
"=",
"top_values",
"[",
"top_values",
"[",
"'value'",
"]",
">",
"0",
"]",
"bottom_values",
"=",
"values",
".",
"topk",
"(",
"'value'",
",",
"k",
"=",
"k",
",",
"reverse",
"=",
"True",
")",
"bottom_values",
"=",
"bottom_values",
"[",
"bottom_values",
"[",
"'value'",
"]",
"<",
"0",
"]",
"return",
"(",
"top_values",
",",
"bottom_values",
")"
] | Returns a tuple of the top k values from the positive and
negative values in a SArray
Parameters
----------
values : SFrame of model coefficients
k: Maximum number of largest positive and k lowest negative numbers to return
Returns
-------
(topk_positive, bottomk_positive) : tuple
topk_positive : list
floats that represent the top 'k' ( or less ) positive
values
bottomk_positive : list
floats that represent the top 'k' ( or less ) negative
values | [
"Returns",
"a",
"tuple",
"of",
"the",
"top",
"k",
"values",
"from",
"the",
"positive",
"and",
"negative",
"values",
"in",
"a",
"SArray"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L266-L294 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | __extract_model_summary_value | def __extract_model_summary_value(model, value):
"""
Extract a model summary field value
"""
field_value = None
if isinstance(value, _precomputed_field):
field_value = value.field
else:
field_value = model._get(value)
if isinstance(field_value, float):
try:
field_value = round(field_value, 4)
except:
pass
return field_value | python | def __extract_model_summary_value(model, value):
"""
Extract a model summary field value
"""
field_value = None
if isinstance(value, _precomputed_field):
field_value = value.field
else:
field_value = model._get(value)
if isinstance(field_value, float):
try:
field_value = round(field_value, 4)
except:
pass
return field_value | [
"def",
"__extract_model_summary_value",
"(",
"model",
",",
"value",
")",
":",
"field_value",
"=",
"None",
"if",
"isinstance",
"(",
"value",
",",
"_precomputed_field",
")",
":",
"field_value",
"=",
"value",
".",
"field",
"else",
":",
"field_value",
"=",
"model",
".",
"_get",
"(",
"value",
")",
"if",
"isinstance",
"(",
"field_value",
",",
"float",
")",
":",
"try",
":",
"field_value",
"=",
"round",
"(",
"field_value",
",",
"4",
")",
"except",
":",
"pass",
"return",
"field_value"
] | Extract a model summary field value | [
"Extract",
"a",
"model",
"summary",
"field",
"value"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L320-L334 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _make_repr_table_from_sframe | def _make_repr_table_from_sframe(X):
"""
Serializes an SFrame to a list of strings, that, when printed, creates a well-formatted table.
"""
assert isinstance(X, _SFrame)
column_names = X.column_names()
out_data = [ [None]*len(column_names) for i in range(X.num_rows())]
column_sizes = [len(s) for s in column_names]
for i, c in enumerate(column_names):
for j, e in enumerate(X[c]):
out_data[j][i] = str(e)
column_sizes[i] = max(column_sizes[i], len(e))
# now, go through and pad everything.
out_data = ([ [cn.ljust(k, ' ') for cn, k in zip(column_names, column_sizes)],
["-"*k for k in column_sizes] ]
+ [ [e.ljust(k, ' ') for e, k in zip(row, column_sizes)] for row in out_data] )
return [' '.join(row) for row in out_data] | python | def _make_repr_table_from_sframe(X):
"""
Serializes an SFrame to a list of strings, that, when printed, creates a well-formatted table.
"""
assert isinstance(X, _SFrame)
column_names = X.column_names()
out_data = [ [None]*len(column_names) for i in range(X.num_rows())]
column_sizes = [len(s) for s in column_names]
for i, c in enumerate(column_names):
for j, e in enumerate(X[c]):
out_data[j][i] = str(e)
column_sizes[i] = max(column_sizes[i], len(e))
# now, go through and pad everything.
out_data = ([ [cn.ljust(k, ' ') for cn, k in zip(column_names, column_sizes)],
["-"*k for k in column_sizes] ]
+ [ [e.ljust(k, ' ') for e, k in zip(row, column_sizes)] for row in out_data] )
return [' '.join(row) for row in out_data] | [
"def",
"_make_repr_table_from_sframe",
"(",
"X",
")",
":",
"assert",
"isinstance",
"(",
"X",
",",
"_SFrame",
")",
"column_names",
"=",
"X",
".",
"column_names",
"(",
")",
"out_data",
"=",
"[",
"[",
"None",
"]",
"*",
"len",
"(",
"column_names",
")",
"for",
"i",
"in",
"range",
"(",
"X",
".",
"num_rows",
"(",
")",
")",
"]",
"column_sizes",
"=",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"column_names",
"]",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"column_names",
")",
":",
"for",
"j",
",",
"e",
"in",
"enumerate",
"(",
"X",
"[",
"c",
"]",
")",
":",
"out_data",
"[",
"j",
"]",
"[",
"i",
"]",
"=",
"str",
"(",
"e",
")",
"column_sizes",
"[",
"i",
"]",
"=",
"max",
"(",
"column_sizes",
"[",
"i",
"]",
",",
"len",
"(",
"e",
")",
")",
"# now, go through and pad everything.",
"out_data",
"=",
"(",
"[",
"[",
"cn",
".",
"ljust",
"(",
"k",
",",
"' '",
")",
"for",
"cn",
",",
"k",
"in",
"zip",
"(",
"column_names",
",",
"column_sizes",
")",
"]",
",",
"[",
"\"-\"",
"*",
"k",
"for",
"k",
"in",
"column_sizes",
"]",
"]",
"+",
"[",
"[",
"e",
".",
"ljust",
"(",
"k",
",",
"' '",
")",
"for",
"e",
",",
"k",
"in",
"zip",
"(",
"row",
",",
"column_sizes",
")",
"]",
"for",
"row",
"in",
"out_data",
"]",
")",
"return",
"[",
"' '",
".",
"join",
"(",
"row",
")",
"for",
"row",
"in",
"out_data",
"]"
] | Serializes an SFrame to a list of strings, that, when printed, creates a well-formatted table. | [
"Serializes",
"an",
"SFrame",
"to",
"a",
"list",
"of",
"strings",
"that",
"when",
"printed",
"creates",
"a",
"well",
"-",
"formatted",
"table",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L336-L359 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _toolkit_repr_print | def _toolkit_repr_print(model, fields, section_titles, width = None):
"""
Display a toolkit repr according to some simple rules.
Parameters
----------
model : Turi Create model
fields: List of lists of tuples
Each tuple should be (display_name, field_name), where field_name can
be a string or a _precomputed_field object.
section_titles: List of section titles, one per list in the fields arg.
Example
-------
model_fields = [
("L1 penalty", 'l1_penalty'),
("L2 penalty", 'l2_penalty'),
("Examples", 'num_examples'),
("Features", 'num_features'),
("Coefficients", 'num_coefficients')]
solver_fields = [
("Solver", 'solver'),
("Solver iterations", 'training_iterations'),
("Solver status", 'training_solver_status'),
("Training time (sec)", 'training_time')]
training_fields = [
("Log-likelihood", 'training_loss')]
fields = [model_fields, solver_fields, training_fields]:
section_titles = ['Model description',
'Solver description',
'Training information']
_toolkit_repr_print(model, fields, section_titles)
"""
assert len(section_titles) == len(fields), \
"The number of section titles ({0}) ".format(len(section_titles)) +\
"doesn't match the number of groups of fields, {0}.".format(len(fields))
out_fields = [ ("Class", model.__class__.__name__), ""]
# Record the max_width so that if width is not provided, we calculate it.
max_width = len("Class")
for index, (section_title, field_list) in enumerate(zip(section_titles, fields)):
# Add in the section header.
out_fields += [section_title, "-"*len(section_title)]
# Add in all the key-value pairs
for f in field_list:
if isinstance(f, tuple):
f = (str(f[0]), f[1])
out_fields.append( (f[0], __extract_model_summary_value(model, f[1])) )
max_width = max(max_width, len(f[0]))
elif isinstance(f, _SFrame):
out_fields.append("")
out_fields += _make_repr_table_from_sframe(f)
out_fields.append("")
else:
raise TypeError("Type of field %s not recognized." % str(f))
# Add in the empty footer.
out_fields.append("")
if width is None:
width = max_width
# Now, go through and format the key_value pairs nicely.
def format_key_pair(key, value):
if type(key) is list:
key = ','.join(str(k) for k in key)
return key.ljust(width, ' ') + ' : ' + str(value)
out_fields = [s if type(s) is str else format_key_pair(*s) for s in out_fields]
return '\n'.join(out_fields) | python | def _toolkit_repr_print(model, fields, section_titles, width = None):
"""
Display a toolkit repr according to some simple rules.
Parameters
----------
model : Turi Create model
fields: List of lists of tuples
Each tuple should be (display_name, field_name), where field_name can
be a string or a _precomputed_field object.
section_titles: List of section titles, one per list in the fields arg.
Example
-------
model_fields = [
("L1 penalty", 'l1_penalty'),
("L2 penalty", 'l2_penalty'),
("Examples", 'num_examples'),
("Features", 'num_features'),
("Coefficients", 'num_coefficients')]
solver_fields = [
("Solver", 'solver'),
("Solver iterations", 'training_iterations'),
("Solver status", 'training_solver_status'),
("Training time (sec)", 'training_time')]
training_fields = [
("Log-likelihood", 'training_loss')]
fields = [model_fields, solver_fields, training_fields]:
section_titles = ['Model description',
'Solver description',
'Training information']
_toolkit_repr_print(model, fields, section_titles)
"""
assert len(section_titles) == len(fields), \
"The number of section titles ({0}) ".format(len(section_titles)) +\
"doesn't match the number of groups of fields, {0}.".format(len(fields))
out_fields = [ ("Class", model.__class__.__name__), ""]
# Record the max_width so that if width is not provided, we calculate it.
max_width = len("Class")
for index, (section_title, field_list) in enumerate(zip(section_titles, fields)):
# Add in the section header.
out_fields += [section_title, "-"*len(section_title)]
# Add in all the key-value pairs
for f in field_list:
if isinstance(f, tuple):
f = (str(f[0]), f[1])
out_fields.append( (f[0], __extract_model_summary_value(model, f[1])) )
max_width = max(max_width, len(f[0]))
elif isinstance(f, _SFrame):
out_fields.append("")
out_fields += _make_repr_table_from_sframe(f)
out_fields.append("")
else:
raise TypeError("Type of field %s not recognized." % str(f))
# Add in the empty footer.
out_fields.append("")
if width is None:
width = max_width
# Now, go through and format the key_value pairs nicely.
def format_key_pair(key, value):
if type(key) is list:
key = ','.join(str(k) for k in key)
return key.ljust(width, ' ') + ' : ' + str(value)
out_fields = [s if type(s) is str else format_key_pair(*s) for s in out_fields]
return '\n'.join(out_fields) | [
"def",
"_toolkit_repr_print",
"(",
"model",
",",
"fields",
",",
"section_titles",
",",
"width",
"=",
"None",
")",
":",
"assert",
"len",
"(",
"section_titles",
")",
"==",
"len",
"(",
"fields",
")",
",",
"\"The number of section titles ({0}) \"",
".",
"format",
"(",
"len",
"(",
"section_titles",
")",
")",
"+",
"\"doesn't match the number of groups of fields, {0}.\"",
".",
"format",
"(",
"len",
"(",
"fields",
")",
")",
"out_fields",
"=",
"[",
"(",
"\"Class\"",
",",
"model",
".",
"__class__",
".",
"__name__",
")",
",",
"\"\"",
"]",
"# Record the max_width so that if width is not provided, we calculate it.",
"max_width",
"=",
"len",
"(",
"\"Class\"",
")",
"for",
"index",
",",
"(",
"section_title",
",",
"field_list",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"section_titles",
",",
"fields",
")",
")",
":",
"# Add in the section header.",
"out_fields",
"+=",
"[",
"section_title",
",",
"\"-\"",
"*",
"len",
"(",
"section_title",
")",
"]",
"# Add in all the key-value pairs",
"for",
"f",
"in",
"field_list",
":",
"if",
"isinstance",
"(",
"f",
",",
"tuple",
")",
":",
"f",
"=",
"(",
"str",
"(",
"f",
"[",
"0",
"]",
")",
",",
"f",
"[",
"1",
"]",
")",
"out_fields",
".",
"append",
"(",
"(",
"f",
"[",
"0",
"]",
",",
"__extract_model_summary_value",
"(",
"model",
",",
"f",
"[",
"1",
"]",
")",
")",
")",
"max_width",
"=",
"max",
"(",
"max_width",
",",
"len",
"(",
"f",
"[",
"0",
"]",
")",
")",
"elif",
"isinstance",
"(",
"f",
",",
"_SFrame",
")",
":",
"out_fields",
".",
"append",
"(",
"\"\"",
")",
"out_fields",
"+=",
"_make_repr_table_from_sframe",
"(",
"f",
")",
"out_fields",
".",
"append",
"(",
"\"\"",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Type of field %s not recognized.\"",
"%",
"str",
"(",
"f",
")",
")",
"# Add in the empty footer.",
"out_fields",
".",
"append",
"(",
"\"\"",
")",
"if",
"width",
"is",
"None",
":",
"width",
"=",
"max_width",
"# Now, go through and format the key_value pairs nicely.",
"def",
"format_key_pair",
"(",
"key",
",",
"value",
")",
":",
"if",
"type",
"(",
"key",
")",
"is",
"list",
":",
"key",
"=",
"','",
".",
"join",
"(",
"str",
"(",
"k",
")",
"for",
"k",
"in",
"key",
")",
"return",
"key",
".",
"ljust",
"(",
"width",
",",
"' '",
")",
"+",
"' : '",
"+",
"str",
"(",
"value",
")",
"out_fields",
"=",
"[",
"s",
"if",
"type",
"(",
"s",
")",
"is",
"str",
"else",
"format_key_pair",
"(",
"*",
"s",
")",
"for",
"s",
"in",
"out_fields",
"]",
"return",
"'\\n'",
".",
"join",
"(",
"out_fields",
")"
] | Display a toolkit repr according to some simple rules.
Parameters
----------
model : Turi Create model
fields: List of lists of tuples
Each tuple should be (display_name, field_name), where field_name can
be a string or a _precomputed_field object.
section_titles: List of section titles, one per list in the fields arg.
Example
-------
model_fields = [
("L1 penalty", 'l1_penalty'),
("L2 penalty", 'l2_penalty'),
("Examples", 'num_examples'),
("Features", 'num_features'),
("Coefficients", 'num_coefficients')]
solver_fields = [
("Solver", 'solver'),
("Solver iterations", 'training_iterations'),
("Solver status", 'training_solver_status'),
("Training time (sec)", 'training_time')]
training_fields = [
("Log-likelihood", 'training_loss')]
fields = [model_fields, solver_fields, training_fields]:
section_titles = ['Model description',
'Solver description',
'Training information']
_toolkit_repr_print(model, fields, section_titles) | [
"Display",
"a",
"toolkit",
"repr",
"according",
"to",
"some",
"simple",
"rules",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L362-L446 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _map_unity_proxy_to_object | def _map_unity_proxy_to_object(value):
"""
Map returning value, if it is unity SFrame, SArray, map it
"""
vtype = type(value)
if vtype in _proxy_map:
return _proxy_map[vtype](value)
elif vtype == list:
return [_map_unity_proxy_to_object(v) for v in value]
elif vtype == dict:
return {k:_map_unity_proxy_to_object(v) for k,v in value.items()}
else:
return value | python | def _map_unity_proxy_to_object(value):
"""
Map returning value, if it is unity SFrame, SArray, map it
"""
vtype = type(value)
if vtype in _proxy_map:
return _proxy_map[vtype](value)
elif vtype == list:
return [_map_unity_proxy_to_object(v) for v in value]
elif vtype == dict:
return {k:_map_unity_proxy_to_object(v) for k,v in value.items()}
else:
return value | [
"def",
"_map_unity_proxy_to_object",
"(",
"value",
")",
":",
"vtype",
"=",
"type",
"(",
"value",
")",
"if",
"vtype",
"in",
"_proxy_map",
":",
"return",
"_proxy_map",
"[",
"vtype",
"]",
"(",
"value",
")",
"elif",
"vtype",
"==",
"list",
":",
"return",
"[",
"_map_unity_proxy_to_object",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]",
"elif",
"vtype",
"==",
"dict",
":",
"return",
"{",
"k",
":",
"_map_unity_proxy_to_object",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
"}",
"else",
":",
"return",
"value"
] | Map returning value, if it is unity SFrame, SArray, map it | [
"Map",
"returning",
"value",
"if",
"it",
"is",
"unity",
"SFrame",
"SArray",
"map",
"it"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L448-L460 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _toolkits_select_columns | def _toolkits_select_columns(dataset, columns):
"""
Same as select columns but redirect runtime error to ToolkitError.
"""
try:
return dataset.select_columns(columns)
except RuntimeError:
missing_features = list(set(columns).difference(set(dataset.column_names())))
raise ToolkitError("Input data does not contain the following columns: " +
"{}".format(missing_features)) | python | def _toolkits_select_columns(dataset, columns):
"""
Same as select columns but redirect runtime error to ToolkitError.
"""
try:
return dataset.select_columns(columns)
except RuntimeError:
missing_features = list(set(columns).difference(set(dataset.column_names())))
raise ToolkitError("Input data does not contain the following columns: " +
"{}".format(missing_features)) | [
"def",
"_toolkits_select_columns",
"(",
"dataset",
",",
"columns",
")",
":",
"try",
":",
"return",
"dataset",
".",
"select_columns",
"(",
"columns",
")",
"except",
"RuntimeError",
":",
"missing_features",
"=",
"list",
"(",
"set",
"(",
"columns",
")",
".",
"difference",
"(",
"set",
"(",
"dataset",
".",
"column_names",
"(",
")",
")",
")",
")",
"raise",
"ToolkitError",
"(",
"\"Input data does not contain the following columns: \"",
"+",
"\"{}\"",
".",
"format",
"(",
"missing_features",
")",
")"
] | Same as select columns but redirect runtime error to ToolkitError. | [
"Same",
"as",
"select",
"columns",
"but",
"redirect",
"runtime",
"error",
"to",
"ToolkitError",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L462-L471 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _raise_error_if_column_exists | def _raise_error_if_column_exists(dataset, column_name = 'dataset',
dataset_variable_name = 'dataset',
column_name_error_message_name = 'column_name'):
"""
Check if a column exists in an SFrame with error message.
"""
err_msg = 'The SFrame {0} must contain the column {1}.'.format(
dataset_variable_name,
column_name_error_message_name)
if column_name not in dataset.column_names():
raise ToolkitError(str(err_msg)) | python | def _raise_error_if_column_exists(dataset, column_name = 'dataset',
dataset_variable_name = 'dataset',
column_name_error_message_name = 'column_name'):
"""
Check if a column exists in an SFrame with error message.
"""
err_msg = 'The SFrame {0} must contain the column {1}.'.format(
dataset_variable_name,
column_name_error_message_name)
if column_name not in dataset.column_names():
raise ToolkitError(str(err_msg)) | [
"def",
"_raise_error_if_column_exists",
"(",
"dataset",
",",
"column_name",
"=",
"'dataset'",
",",
"dataset_variable_name",
"=",
"'dataset'",
",",
"column_name_error_message_name",
"=",
"'column_name'",
")",
":",
"err_msg",
"=",
"'The SFrame {0} must contain the column {1}.'",
".",
"format",
"(",
"dataset_variable_name",
",",
"column_name_error_message_name",
")",
"if",
"column_name",
"not",
"in",
"dataset",
".",
"column_names",
"(",
")",
":",
"raise",
"ToolkitError",
"(",
"str",
"(",
"err_msg",
")",
")"
] | Check if a column exists in an SFrame with error message. | [
"Check",
"if",
"a",
"column",
"exists",
"in",
"an",
"SFrame",
"with",
"error",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L473-L483 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _check_categorical_option_type | def _check_categorical_option_type(option_name, option_value, possible_values):
"""
Check whether or not the requested option is one of the allowed values.
"""
err_msg = '{0} is not a valid option for {1}. '.format(option_value, option_name)
err_msg += ' Expected one of: '.format(possible_values)
err_msg += ', '.join(map(str, possible_values))
if option_value not in possible_values:
raise ToolkitError(err_msg) | python | def _check_categorical_option_type(option_name, option_value, possible_values):
"""
Check whether or not the requested option is one of the allowed values.
"""
err_msg = '{0} is not a valid option for {1}. '.format(option_value, option_name)
err_msg += ' Expected one of: '.format(possible_values)
err_msg += ', '.join(map(str, possible_values))
if option_value not in possible_values:
raise ToolkitError(err_msg) | [
"def",
"_check_categorical_option_type",
"(",
"option_name",
",",
"option_value",
",",
"possible_values",
")",
":",
"err_msg",
"=",
"'{0} is not a valid option for {1}. '",
".",
"format",
"(",
"option_value",
",",
"option_name",
")",
"err_msg",
"+=",
"' Expected one of: '",
".",
"format",
"(",
"possible_values",
")",
"err_msg",
"+=",
"', '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"possible_values",
")",
")",
"if",
"option_value",
"not",
"in",
"possible_values",
":",
"raise",
"ToolkitError",
"(",
"err_msg",
")"
] | Check whether or not the requested option is one of the allowed values. | [
"Check",
"whether",
"or",
"not",
"the",
"requested",
"option",
"is",
"one",
"of",
"the",
"allowed",
"values",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L485-L494 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _raise_error_if_not_sarray | def _raise_error_if_not_sarray(dataset, variable_name="SArray"):
"""
Check if the input is an SArray. Provide a proper error
message otherwise.
"""
err_msg = "Input %s is not an SArray."
if not isinstance(dataset, _SArray):
raise ToolkitError(err_msg % variable_name) | python | def _raise_error_if_not_sarray(dataset, variable_name="SArray"):
"""
Check if the input is an SArray. Provide a proper error
message otherwise.
"""
err_msg = "Input %s is not an SArray."
if not isinstance(dataset, _SArray):
raise ToolkitError(err_msg % variable_name) | [
"def",
"_raise_error_if_not_sarray",
"(",
"dataset",
",",
"variable_name",
"=",
"\"SArray\"",
")",
":",
"err_msg",
"=",
"\"Input %s is not an SArray.\"",
"if",
"not",
"isinstance",
"(",
"dataset",
",",
"_SArray",
")",
":",
"raise",
"ToolkitError",
"(",
"err_msg",
"%",
"variable_name",
")"
] | Check if the input is an SArray. Provide a proper error
message otherwise. | [
"Check",
"if",
"the",
"input",
"is",
"an",
"SArray",
".",
"Provide",
"a",
"proper",
"error",
"message",
"otherwise",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L496-L503 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _raise_error_if_not_sframe | def _raise_error_if_not_sframe(dataset, variable_name="SFrame"):
"""
Check if the input is an SFrame. Provide a proper error
message otherwise.
"""
err_msg = "Input %s is not an SFrame. If it is a Pandas DataFrame,"
err_msg += " you may use the to_sframe() function to convert it to an SFrame."
if not isinstance(dataset, _SFrame):
raise ToolkitError(err_msg % variable_name) | python | def _raise_error_if_not_sframe(dataset, variable_name="SFrame"):
"""
Check if the input is an SFrame. Provide a proper error
message otherwise.
"""
err_msg = "Input %s is not an SFrame. If it is a Pandas DataFrame,"
err_msg += " you may use the to_sframe() function to convert it to an SFrame."
if not isinstance(dataset, _SFrame):
raise ToolkitError(err_msg % variable_name) | [
"def",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"variable_name",
"=",
"\"SFrame\"",
")",
":",
"err_msg",
"=",
"\"Input %s is not an SFrame. If it is a Pandas DataFrame,\"",
"err_msg",
"+=",
"\" you may use the to_sframe() function to convert it to an SFrame.\"",
"if",
"not",
"isinstance",
"(",
"dataset",
",",
"_SFrame",
")",
":",
"raise",
"ToolkitError",
"(",
"err_msg",
"%",
"variable_name",
")"
] | Check if the input is an SFrame. Provide a proper error
message otherwise. | [
"Check",
"if",
"the",
"input",
"is",
"an",
"SFrame",
".",
"Provide",
"a",
"proper",
"error",
"message",
"otherwise",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L511-L520 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _raise_error_if_sframe_empty | def _raise_error_if_sframe_empty(dataset, variable_name="SFrame"):
"""
Check if the input is empty.
"""
err_msg = "Input %s either has no rows or no columns. A non-empty SFrame "
err_msg += "is required."
if dataset.num_rows() == 0 or dataset.num_columns() == 0:
raise ToolkitError(err_msg % variable_name) | python | def _raise_error_if_sframe_empty(dataset, variable_name="SFrame"):
"""
Check if the input is empty.
"""
err_msg = "Input %s either has no rows or no columns. A non-empty SFrame "
err_msg += "is required."
if dataset.num_rows() == 0 or dataset.num_columns() == 0:
raise ToolkitError(err_msg % variable_name) | [
"def",
"_raise_error_if_sframe_empty",
"(",
"dataset",
",",
"variable_name",
"=",
"\"SFrame\"",
")",
":",
"err_msg",
"=",
"\"Input %s either has no rows or no columns. A non-empty SFrame \"",
"err_msg",
"+=",
"\"is required.\"",
"if",
"dataset",
".",
"num_rows",
"(",
")",
"==",
"0",
"or",
"dataset",
".",
"num_columns",
"(",
")",
"==",
"0",
":",
"raise",
"ToolkitError",
"(",
"err_msg",
"%",
"variable_name",
")"
] | Check if the input is empty. | [
"Check",
"if",
"the",
"input",
"is",
"empty",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L522-L530 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _raise_error_evaluation_metric_is_valid | def _raise_error_evaluation_metric_is_valid(metric, allowed_metrics):
"""
Check if the input is an SFrame. Provide a proper error
message otherwise.
"""
err_msg = "Evaluation metric '%s' not recognized. The supported evaluation"
err_msg += " metrics are (%s)."
if metric not in allowed_metrics:
raise ToolkitError(err_msg % (metric,
', '.join(map(lambda x: "'%s'" % x, allowed_metrics)))) | python | def _raise_error_evaluation_metric_is_valid(metric, allowed_metrics):
"""
Check if the input is an SFrame. Provide a proper error
message otherwise.
"""
err_msg = "Evaluation metric '%s' not recognized. The supported evaluation"
err_msg += " metrics are (%s)."
if metric not in allowed_metrics:
raise ToolkitError(err_msg % (metric,
', '.join(map(lambda x: "'%s'" % x, allowed_metrics)))) | [
"def",
"_raise_error_evaluation_metric_is_valid",
"(",
"metric",
",",
"allowed_metrics",
")",
":",
"err_msg",
"=",
"\"Evaluation metric '%s' not recognized. The supported evaluation\"",
"err_msg",
"+=",
"\" metrics are (%s).\"",
"if",
"metric",
"not",
"in",
"allowed_metrics",
":",
"raise",
"ToolkitError",
"(",
"err_msg",
"%",
"(",
"metric",
",",
"', '",
".",
"join",
"(",
"map",
"(",
"lambda",
"x",
":",
"\"'%s'\"",
"%",
"x",
",",
"allowed_metrics",
")",
")",
")",
")"
] | Check if the input is an SFrame. Provide a proper error
message otherwise. | [
"Check",
"if",
"the",
"input",
"is",
"an",
"SFrame",
".",
"Provide",
"a",
"proper",
"error",
"message",
"otherwise",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L541-L552 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _numeric_param_check_range | def _numeric_param_check_range(variable_name, variable_value, range_bottom, range_top):
"""
Checks if numeric parameter is within given range
"""
err_msg = "%s must be between %i and %i"
if variable_value < range_bottom or variable_value > range_top:
raise ToolkitError(err_msg % (variable_name, range_bottom, range_top)) | python | def _numeric_param_check_range(variable_name, variable_value, range_bottom, range_top):
"""
Checks if numeric parameter is within given range
"""
err_msg = "%s must be between %i and %i"
if variable_value < range_bottom or variable_value > range_top:
raise ToolkitError(err_msg % (variable_name, range_bottom, range_top)) | [
"def",
"_numeric_param_check_range",
"(",
"variable_name",
",",
"variable_value",
",",
"range_bottom",
",",
"range_top",
")",
":",
"err_msg",
"=",
"\"%s must be between %i and %i\"",
"if",
"variable_value",
"<",
"range_bottom",
"or",
"variable_value",
">",
"range_top",
":",
"raise",
"ToolkitError",
"(",
"err_msg",
"%",
"(",
"variable_name",
",",
"range_bottom",
",",
"range_top",
")",
")"
] | Checks if numeric parameter is within given range | [
"Checks",
"if",
"numeric",
"parameter",
"is",
"within",
"given",
"range"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L554-L561 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _validate_data | def _validate_data(dataset, target, features=None, validation_set='auto'):
"""
Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception.
"""
_raise_error_if_not_sframe(dataset, "training dataset")
# Determine columns to keep
if features is None:
features = [feat for feat in dataset.column_names() if feat != target]
if not hasattr(features, '__iter__'):
raise TypeError("Input 'features' must be a list.")
if not all([isinstance(x, str) for x in features]):
raise TypeError(
"Invalid feature %s: Feature names must be of type str" % x)
# Check validation_set argument
if isinstance(validation_set, str):
# Only string value allowed is 'auto'
if validation_set != 'auto':
raise TypeError('Unrecognized value for validation_set.')
elif isinstance(validation_set, _SFrame):
# Attempt to append the two datasets together to check schema
validation_set.head().append(dataset.head())
# Reduce validation set to requested columns
validation_set = _toolkits_select_columns(
validation_set, features + [target])
elif not validation_set is None:
raise TypeError("validation_set must be either 'auto', None, or an "
"SFrame matching the training data.")
# Reduce training set to requested columns
dataset = _toolkits_select_columns(dataset, features + [target])
return dataset, validation_set | python | def _validate_data(dataset, target, features=None, validation_set='auto'):
"""
Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception.
"""
_raise_error_if_not_sframe(dataset, "training dataset")
# Determine columns to keep
if features is None:
features = [feat for feat in dataset.column_names() if feat != target]
if not hasattr(features, '__iter__'):
raise TypeError("Input 'features' must be a list.")
if not all([isinstance(x, str) for x in features]):
raise TypeError(
"Invalid feature %s: Feature names must be of type str" % x)
# Check validation_set argument
if isinstance(validation_set, str):
# Only string value allowed is 'auto'
if validation_set != 'auto':
raise TypeError('Unrecognized value for validation_set.')
elif isinstance(validation_set, _SFrame):
# Attempt to append the two datasets together to check schema
validation_set.head().append(dataset.head())
# Reduce validation set to requested columns
validation_set = _toolkits_select_columns(
validation_set, features + [target])
elif not validation_set is None:
raise TypeError("validation_set must be either 'auto', None, or an "
"SFrame matching the training data.")
# Reduce training set to requested columns
dataset = _toolkits_select_columns(dataset, features + [target])
return dataset, validation_set | [
"def",
"_validate_data",
"(",
"dataset",
",",
"target",
",",
"features",
"=",
"None",
",",
"validation_set",
"=",
"'auto'",
")",
":",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"training dataset\"",
")",
"# Determine columns to keep",
"if",
"features",
"is",
"None",
":",
"features",
"=",
"[",
"feat",
"for",
"feat",
"in",
"dataset",
".",
"column_names",
"(",
")",
"if",
"feat",
"!=",
"target",
"]",
"if",
"not",
"hasattr",
"(",
"features",
",",
"'__iter__'",
")",
":",
"raise",
"TypeError",
"(",
"\"Input 'features' must be a list.\"",
")",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"x",
",",
"str",
")",
"for",
"x",
"in",
"features",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid feature %s: Feature names must be of type str\"",
"%",
"x",
")",
"# Check validation_set argument",
"if",
"isinstance",
"(",
"validation_set",
",",
"str",
")",
":",
"# Only string value allowed is 'auto'",
"if",
"validation_set",
"!=",
"'auto'",
":",
"raise",
"TypeError",
"(",
"'Unrecognized value for validation_set.'",
")",
"elif",
"isinstance",
"(",
"validation_set",
",",
"_SFrame",
")",
":",
"# Attempt to append the two datasets together to check schema",
"validation_set",
".",
"head",
"(",
")",
".",
"append",
"(",
"dataset",
".",
"head",
"(",
")",
")",
"# Reduce validation set to requested columns",
"validation_set",
"=",
"_toolkits_select_columns",
"(",
"validation_set",
",",
"features",
"+",
"[",
"target",
"]",
")",
"elif",
"not",
"validation_set",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"validation_set must be either 'auto', None, or an \"",
"\"SFrame matching the training data.\"",
")",
"# Reduce training set to requested columns",
"dataset",
"=",
"_toolkits_select_columns",
"(",
"dataset",
",",
"features",
"+",
"[",
"target",
"]",
")",
"return",
"dataset",
",",
"validation_set"
] | Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception. | [
"Validate",
"and",
"canonicalize",
"training",
"and",
"validation",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L563-L625 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _validate_row_label | def _validate_row_label(dataset, label=None, default_label='__id'):
"""
Validate a row label column. If the row label is not specified, a column is
created with row numbers, named with the string in the `default_label`
parameter.
Parameters
----------
dataset : SFrame
Input dataset.
label : str, optional
Name of the column containing row labels.
default_label : str, optional
The default column name if `label` is not specified. A column with row
numbers is added to the output SFrame in this case.
Returns
-------
dataset : SFrame
The input dataset, but with an additional row label column, *if* there
was no input label.
label : str
The final label column name.
"""
## If no label is provided, set it to be a default and add a row number to
# dataset. Check that this new name does not conflict with an existing
# name.
if not label:
## Try a bunch of variations of the default label to find one that's not
# already a column name.
label_name_base = default_label
label = default_label
i = 1
while label in dataset.column_names():
label = label_name_base + '.{}'.format(i)
i += 1
dataset = dataset.add_row_number(column_name=label)
## Validate the label name and types.
if not isinstance(label, str):
raise TypeError("The row label column name '{}' must be a string.".format(label))
if not label in dataset.column_names():
raise ToolkitError("Row label column '{}' not found in the dataset.".format(label))
if not dataset[label].dtype in (str, int):
raise TypeError("Row labels must be integers or strings.")
## Return the modified dataset and label
return dataset, label | python | def _validate_row_label(dataset, label=None, default_label='__id'):
"""
Validate a row label column. If the row label is not specified, a column is
created with row numbers, named with the string in the `default_label`
parameter.
Parameters
----------
dataset : SFrame
Input dataset.
label : str, optional
Name of the column containing row labels.
default_label : str, optional
The default column name if `label` is not specified. A column with row
numbers is added to the output SFrame in this case.
Returns
-------
dataset : SFrame
The input dataset, but with an additional row label column, *if* there
was no input label.
label : str
The final label column name.
"""
## If no label is provided, set it to be a default and add a row number to
# dataset. Check that this new name does not conflict with an existing
# name.
if not label:
## Try a bunch of variations of the default label to find one that's not
# already a column name.
label_name_base = default_label
label = default_label
i = 1
while label in dataset.column_names():
label = label_name_base + '.{}'.format(i)
i += 1
dataset = dataset.add_row_number(column_name=label)
## Validate the label name and types.
if not isinstance(label, str):
raise TypeError("The row label column name '{}' must be a string.".format(label))
if not label in dataset.column_names():
raise ToolkitError("Row label column '{}' not found in the dataset.".format(label))
if not dataset[label].dtype in (str, int):
raise TypeError("Row labels must be integers or strings.")
## Return the modified dataset and label
return dataset, label | [
"def",
"_validate_row_label",
"(",
"dataset",
",",
"label",
"=",
"None",
",",
"default_label",
"=",
"'__id'",
")",
":",
"## If no label is provided, set it to be a default and add a row number to",
"# dataset. Check that this new name does not conflict with an existing",
"# name.",
"if",
"not",
"label",
":",
"## Try a bunch of variations of the default label to find one that's not",
"# already a column name.",
"label_name_base",
"=",
"default_label",
"label",
"=",
"default_label",
"i",
"=",
"1",
"while",
"label",
"in",
"dataset",
".",
"column_names",
"(",
")",
":",
"label",
"=",
"label_name_base",
"+",
"'.{}'",
".",
"format",
"(",
"i",
")",
"i",
"+=",
"1",
"dataset",
"=",
"dataset",
".",
"add_row_number",
"(",
"column_name",
"=",
"label",
")",
"## Validate the label name and types.",
"if",
"not",
"isinstance",
"(",
"label",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"The row label column name '{}' must be a string.\"",
".",
"format",
"(",
"label",
")",
")",
"if",
"not",
"label",
"in",
"dataset",
".",
"column_names",
"(",
")",
":",
"raise",
"ToolkitError",
"(",
"\"Row label column '{}' not found in the dataset.\"",
".",
"format",
"(",
"label",
")",
")",
"if",
"not",
"dataset",
"[",
"label",
"]",
".",
"dtype",
"in",
"(",
"str",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"Row labels must be integers or strings.\"",
")",
"## Return the modified dataset and label",
"return",
"dataset",
",",
"label"
] | Validate a row label column. If the row label is not specified, a column is
created with row numbers, named with the string in the `default_label`
parameter.
Parameters
----------
dataset : SFrame
Input dataset.
label : str, optional
Name of the column containing row labels.
default_label : str, optional
The default column name if `label` is not specified. A column with row
numbers is added to the output SFrame in this case.
Returns
-------
dataset : SFrame
The input dataset, but with an additional row label column, *if* there
was no input label.
label : str
The final label column name. | [
"Validate",
"a",
"row",
"label",
"column",
".",
"If",
"the",
"row",
"label",
"is",
"not",
"specified",
"a",
"column",
"is",
"created",
"with",
"row",
"numbers",
"named",
"with",
"the",
"string",
"in",
"the",
"default_label",
"parameter",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L627-L682 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _mac_ver | def _mac_ver():
"""
Returns Mac version as a tuple of integers, making it easy to do proper
version comparisons. On non-Macs, it returns an empty tuple.
"""
import platform
import sys
if sys.platform == 'darwin':
ver_str = platform.mac_ver()[0]
return tuple([int(v) for v in ver_str.split('.')])
else:
return () | python | def _mac_ver():
"""
Returns Mac version as a tuple of integers, making it easy to do proper
version comparisons. On non-Macs, it returns an empty tuple.
"""
import platform
import sys
if sys.platform == 'darwin':
ver_str = platform.mac_ver()[0]
return tuple([int(v) for v in ver_str.split('.')])
else:
return () | [
"def",
"_mac_ver",
"(",
")",
":",
"import",
"platform",
"import",
"sys",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"ver_str",
"=",
"platform",
".",
"mac_ver",
"(",
")",
"[",
"0",
"]",
"return",
"tuple",
"(",
"[",
"int",
"(",
"v",
")",
"for",
"v",
"in",
"ver_str",
".",
"split",
"(",
"'.'",
")",
"]",
")",
"else",
":",
"return",
"(",
")"
] | Returns Mac version as a tuple of integers, making it easy to do proper
version comparisons. On non-Macs, it returns an empty tuple. | [
"Returns",
"Mac",
"version",
"as",
"a",
"tuple",
"of",
"integers",
"making",
"it",
"easy",
"to",
"do",
"proper",
"version",
"comparisons",
".",
"On",
"non",
"-",
"Macs",
"it",
"returns",
"an",
"empty",
"tuple",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L698-L709 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _print_neural_compute_device | def _print_neural_compute_device(cuda_gpus, use_mps, cuda_mem_req=None, has_mps_impl=True):
"""
Print a message making it clear to the user what compute resource is used in
neural network training.
"""
num_cuda_gpus = len(cuda_gpus)
if num_cuda_gpus >= 1:
gpu_names = ', '.join(gpu['name'] for gpu in cuda_gpus)
if use_mps:
from ._mps_utils import mps_device_name
print('Using GPU to create model ({})'.format(mps_device_name()))
elif num_cuda_gpus >= 1:
from . import _mxnet_utils
plural = 's' if num_cuda_gpus >= 2 else ''
print('Using GPU{} to create model ({})'.format(plural, gpu_names))
if cuda_mem_req is not None:
_mxnet_utils._warn_if_less_than_cuda_free_memory(cuda_mem_req, max_devices=num_cuda_gpus)
else:
import sys
print('Using CPU to create model')
if sys.platform == 'darwin' and _mac_ver() < (10, 14) and has_mps_impl:
print('NOTE: If available, an AMD GPU can be leveraged on macOS 10.14+ for faster model creation') | python | def _print_neural_compute_device(cuda_gpus, use_mps, cuda_mem_req=None, has_mps_impl=True):
"""
Print a message making it clear to the user what compute resource is used in
neural network training.
"""
num_cuda_gpus = len(cuda_gpus)
if num_cuda_gpus >= 1:
gpu_names = ', '.join(gpu['name'] for gpu in cuda_gpus)
if use_mps:
from ._mps_utils import mps_device_name
print('Using GPU to create model ({})'.format(mps_device_name()))
elif num_cuda_gpus >= 1:
from . import _mxnet_utils
plural = 's' if num_cuda_gpus >= 2 else ''
print('Using GPU{} to create model ({})'.format(plural, gpu_names))
if cuda_mem_req is not None:
_mxnet_utils._warn_if_less_than_cuda_free_memory(cuda_mem_req, max_devices=num_cuda_gpus)
else:
import sys
print('Using CPU to create model')
if sys.platform == 'darwin' and _mac_ver() < (10, 14) and has_mps_impl:
print('NOTE: If available, an AMD GPU can be leveraged on macOS 10.14+ for faster model creation') | [
"def",
"_print_neural_compute_device",
"(",
"cuda_gpus",
",",
"use_mps",
",",
"cuda_mem_req",
"=",
"None",
",",
"has_mps_impl",
"=",
"True",
")",
":",
"num_cuda_gpus",
"=",
"len",
"(",
"cuda_gpus",
")",
"if",
"num_cuda_gpus",
">=",
"1",
":",
"gpu_names",
"=",
"', '",
".",
"join",
"(",
"gpu",
"[",
"'name'",
"]",
"for",
"gpu",
"in",
"cuda_gpus",
")",
"if",
"use_mps",
":",
"from",
".",
"_mps_utils",
"import",
"mps_device_name",
"print",
"(",
"'Using GPU to create model ({})'",
".",
"format",
"(",
"mps_device_name",
"(",
")",
")",
")",
"elif",
"num_cuda_gpus",
">=",
"1",
":",
"from",
".",
"import",
"_mxnet_utils",
"plural",
"=",
"'s'",
"if",
"num_cuda_gpus",
">=",
"2",
"else",
"''",
"print",
"(",
"'Using GPU{} to create model ({})'",
".",
"format",
"(",
"plural",
",",
"gpu_names",
")",
")",
"if",
"cuda_mem_req",
"is",
"not",
"None",
":",
"_mxnet_utils",
".",
"_warn_if_less_than_cuda_free_memory",
"(",
"cuda_mem_req",
",",
"max_devices",
"=",
"num_cuda_gpus",
")",
"else",
":",
"import",
"sys",
"print",
"(",
"'Using CPU to create model'",
")",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
"and",
"_mac_ver",
"(",
")",
"<",
"(",
"10",
",",
"14",
")",
"and",
"has_mps_impl",
":",
"print",
"(",
"'NOTE: If available, an AMD GPU can be leveraged on macOS 10.14+ for faster model creation'",
")"
] | Print a message making it clear to the user what compute resource is used in
neural network training. | [
"Print",
"a",
"message",
"making",
"it",
"clear",
"to",
"the",
"user",
"what",
"compute",
"resource",
"is",
"used",
"in",
"neural",
"network",
"training",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L711-L733 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/proto_builder.py | _GetMessageFromFactory | def _GetMessageFromFactory(factory, full_name):
"""Get a proto class from the MessageFactory by name.
Args:
factory: a MessageFactory instance.
full_name: str, the fully qualified name of the proto type.
Returns:
A class, for the type identified by full_name.
Raises:
KeyError, if the proto is not found in the factory's descriptor pool.
"""
proto_descriptor = factory.pool.FindMessageTypeByName(full_name)
proto_cls = factory.GetPrototype(proto_descriptor)
return proto_cls | python | def _GetMessageFromFactory(factory, full_name):
"""Get a proto class from the MessageFactory by name.
Args:
factory: a MessageFactory instance.
full_name: str, the fully qualified name of the proto type.
Returns:
A class, for the type identified by full_name.
Raises:
KeyError, if the proto is not found in the factory's descriptor pool.
"""
proto_descriptor = factory.pool.FindMessageTypeByName(full_name)
proto_cls = factory.GetPrototype(proto_descriptor)
return proto_cls | [
"def",
"_GetMessageFromFactory",
"(",
"factory",
",",
"full_name",
")",
":",
"proto_descriptor",
"=",
"factory",
".",
"pool",
".",
"FindMessageTypeByName",
"(",
"full_name",
")",
"proto_cls",
"=",
"factory",
".",
"GetPrototype",
"(",
"proto_descriptor",
")",
"return",
"proto_cls"
] | Get a proto class from the MessageFactory by name.
Args:
factory: a MessageFactory instance.
full_name: str, the fully qualified name of the proto type.
Returns:
A class, for the type identified by full_name.
Raises:
KeyError, if the proto is not found in the factory's descriptor pool. | [
"Get",
"a",
"proto",
"class",
"from",
"the",
"MessageFactory",
"by",
"name",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/proto_builder.py#L44-L57 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/proto_builder.py | MakeSimpleProtoClass | def MakeSimpleProtoClass(fields, full_name=None, pool=None):
"""Create a Protobuf class whose fields are basic types.
Note: this doesn't validate field names!
Args:
fields: dict of {name: field_type} mappings for each field in the proto. If
this is an OrderedDict the order will be maintained, otherwise the
fields will be sorted by name.
full_name: optional str, the fully-qualified name of the proto type.
pool: optional DescriptorPool instance.
Returns:
a class, the new protobuf class with a FileDescriptor.
"""
factory = message_factory.MessageFactory(pool=pool)
if full_name is not None:
try:
proto_cls = _GetMessageFromFactory(factory, full_name)
return proto_cls
except KeyError:
# The factory's DescriptorPool doesn't know about this class yet.
pass
# Get a list of (name, field_type) tuples from the fields dict. If fields was
# an OrderedDict we keep the order, but otherwise we sort the field to ensure
# consistent ordering.
field_items = fields.items()
if not isinstance(fields, OrderedDict):
field_items = sorted(field_items)
# Use a consistent file name that is unlikely to conflict with any imported
# proto files.
fields_hash = hashlib.sha1()
for f_name, f_type in field_items:
fields_hash.update(f_name.encode('utf-8'))
fields_hash.update(str(f_type).encode('utf-8'))
proto_file_name = fields_hash.hexdigest() + '.proto'
# If the proto is anonymous, use the same hash to name it.
if full_name is None:
full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' +
fields_hash.hexdigest())
try:
proto_cls = _GetMessageFromFactory(factory, full_name)
return proto_cls
except KeyError:
# The factory's DescriptorPool doesn't know about this class yet.
pass
# This is the first time we see this proto: add a new descriptor to the pool.
factory.pool.Add(
_MakeFileDescriptorProto(proto_file_name, full_name, field_items))
return _GetMessageFromFactory(factory, full_name) | python | def MakeSimpleProtoClass(fields, full_name=None, pool=None):
"""Create a Protobuf class whose fields are basic types.
Note: this doesn't validate field names!
Args:
fields: dict of {name: field_type} mappings for each field in the proto. If
this is an OrderedDict the order will be maintained, otherwise the
fields will be sorted by name.
full_name: optional str, the fully-qualified name of the proto type.
pool: optional DescriptorPool instance.
Returns:
a class, the new protobuf class with a FileDescriptor.
"""
factory = message_factory.MessageFactory(pool=pool)
if full_name is not None:
try:
proto_cls = _GetMessageFromFactory(factory, full_name)
return proto_cls
except KeyError:
# The factory's DescriptorPool doesn't know about this class yet.
pass
# Get a list of (name, field_type) tuples from the fields dict. If fields was
# an OrderedDict we keep the order, but otherwise we sort the field to ensure
# consistent ordering.
field_items = fields.items()
if not isinstance(fields, OrderedDict):
field_items = sorted(field_items)
# Use a consistent file name that is unlikely to conflict with any imported
# proto files.
fields_hash = hashlib.sha1()
for f_name, f_type in field_items:
fields_hash.update(f_name.encode('utf-8'))
fields_hash.update(str(f_type).encode('utf-8'))
proto_file_name = fields_hash.hexdigest() + '.proto'
# If the proto is anonymous, use the same hash to name it.
if full_name is None:
full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' +
fields_hash.hexdigest())
try:
proto_cls = _GetMessageFromFactory(factory, full_name)
return proto_cls
except KeyError:
# The factory's DescriptorPool doesn't know about this class yet.
pass
# This is the first time we see this proto: add a new descriptor to the pool.
factory.pool.Add(
_MakeFileDescriptorProto(proto_file_name, full_name, field_items))
return _GetMessageFromFactory(factory, full_name) | [
"def",
"MakeSimpleProtoClass",
"(",
"fields",
",",
"full_name",
"=",
"None",
",",
"pool",
"=",
"None",
")",
":",
"factory",
"=",
"message_factory",
".",
"MessageFactory",
"(",
"pool",
"=",
"pool",
")",
"if",
"full_name",
"is",
"not",
"None",
":",
"try",
":",
"proto_cls",
"=",
"_GetMessageFromFactory",
"(",
"factory",
",",
"full_name",
")",
"return",
"proto_cls",
"except",
"KeyError",
":",
"# The factory's DescriptorPool doesn't know about this class yet.",
"pass",
"# Get a list of (name, field_type) tuples from the fields dict. If fields was",
"# an OrderedDict we keep the order, but otherwise we sort the field to ensure",
"# consistent ordering.",
"field_items",
"=",
"fields",
".",
"items",
"(",
")",
"if",
"not",
"isinstance",
"(",
"fields",
",",
"OrderedDict",
")",
":",
"field_items",
"=",
"sorted",
"(",
"field_items",
")",
"# Use a consistent file name that is unlikely to conflict with any imported",
"# proto files.",
"fields_hash",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"for",
"f_name",
",",
"f_type",
"in",
"field_items",
":",
"fields_hash",
".",
"update",
"(",
"f_name",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"fields_hash",
".",
"update",
"(",
"str",
"(",
"f_type",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"proto_file_name",
"=",
"fields_hash",
".",
"hexdigest",
"(",
")",
"+",
"'.proto'",
"# If the proto is anonymous, use the same hash to name it.",
"if",
"full_name",
"is",
"None",
":",
"full_name",
"=",
"(",
"'net.proto2.python.public.proto_builder.AnonymousProto_'",
"+",
"fields_hash",
".",
"hexdigest",
"(",
")",
")",
"try",
":",
"proto_cls",
"=",
"_GetMessageFromFactory",
"(",
"factory",
",",
"full_name",
")",
"return",
"proto_cls",
"except",
"KeyError",
":",
"# The factory's DescriptorPool doesn't know about this class yet.",
"pass",
"# This is the first time we see this proto: add a new descriptor to the pool.",
"factory",
".",
"pool",
".",
"Add",
"(",
"_MakeFileDescriptorProto",
"(",
"proto_file_name",
",",
"full_name",
",",
"field_items",
")",
")",
"return",
"_GetMessageFromFactory",
"(",
"factory",
",",
"full_name",
")"
] | Create a Protobuf class whose fields are basic types.
Note: this doesn't validate field names!
Args:
fields: dict of {name: field_type} mappings for each field in the proto. If
this is an OrderedDict the order will be maintained, otherwise the
fields will be sorted by name.
full_name: optional str, the fully-qualified name of the proto type.
pool: optional DescriptorPool instance.
Returns:
a class, the new protobuf class with a FileDescriptor. | [
"Create",
"a",
"Protobuf",
"class",
"whose",
"fields",
"are",
"basic",
"types",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/proto_builder.py#L60-L113 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/proto_builder.py | _MakeFileDescriptorProto | def _MakeFileDescriptorProto(proto_file_name, full_name, field_items):
"""Populate FileDescriptorProto for MessageFactory's DescriptorPool."""
package, name = full_name.rsplit('.', 1)
file_proto = descriptor_pb2.FileDescriptorProto()
file_proto.name = os.path.join(package.replace('.', '/'), proto_file_name)
file_proto.package = package
desc_proto = file_proto.message_type.add()
desc_proto.name = name
for f_number, (f_name, f_type) in enumerate(field_items, 1):
field_proto = desc_proto.field.add()
field_proto.name = f_name
field_proto.number = f_number
field_proto.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
field_proto.type = f_type
return file_proto | python | def _MakeFileDescriptorProto(proto_file_name, full_name, field_items):
"""Populate FileDescriptorProto for MessageFactory's DescriptorPool."""
package, name = full_name.rsplit('.', 1)
file_proto = descriptor_pb2.FileDescriptorProto()
file_proto.name = os.path.join(package.replace('.', '/'), proto_file_name)
file_proto.package = package
desc_proto = file_proto.message_type.add()
desc_proto.name = name
for f_number, (f_name, f_type) in enumerate(field_items, 1):
field_proto = desc_proto.field.add()
field_proto.name = f_name
field_proto.number = f_number
field_proto.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
field_proto.type = f_type
return file_proto | [
"def",
"_MakeFileDescriptorProto",
"(",
"proto_file_name",
",",
"full_name",
",",
"field_items",
")",
":",
"package",
",",
"name",
"=",
"full_name",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"file_proto",
"=",
"descriptor_pb2",
".",
"FileDescriptorProto",
"(",
")",
"file_proto",
".",
"name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"package",
".",
"replace",
"(",
"'.'",
",",
"'/'",
")",
",",
"proto_file_name",
")",
"file_proto",
".",
"package",
"=",
"package",
"desc_proto",
"=",
"file_proto",
".",
"message_type",
".",
"add",
"(",
")",
"desc_proto",
".",
"name",
"=",
"name",
"for",
"f_number",
",",
"(",
"f_name",
",",
"f_type",
")",
"in",
"enumerate",
"(",
"field_items",
",",
"1",
")",
":",
"field_proto",
"=",
"desc_proto",
".",
"field",
".",
"add",
"(",
")",
"field_proto",
".",
"name",
"=",
"f_name",
"field_proto",
".",
"number",
"=",
"f_number",
"field_proto",
".",
"label",
"=",
"descriptor_pb2",
".",
"FieldDescriptorProto",
".",
"LABEL_OPTIONAL",
"field_proto",
".",
"type",
"=",
"f_type",
"return",
"file_proto"
] | Populate FileDescriptorProto for MessageFactory's DescriptorPool. | [
"Populate",
"FileDescriptorProto",
"for",
"MessageFactory",
"s",
"DescriptorPool",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/proto_builder.py#L116-L130 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_decision_tree_classifier.py | convert | def convert(model, input_name, output_features):
"""Convert a decision tree model to protobuf format.
Parameters
----------
decision_tree : DecisionTreeClassifier
A trained scikit-learn tree model.
input_name: str
Name of the input columns.
output_name: str
Name of the output columns.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
_sklearn_util.check_expected_type(model, _tree.DecisionTreeClassifier)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'tree_') and model.tree_ is not None)
return _MLModel(convert_tree_ensemble(model, input_name, output_features,
mode = 'classifier',
class_labels = model.classes_)) | python | def convert(model, input_name, output_features):
"""Convert a decision tree model to protobuf format.
Parameters
----------
decision_tree : DecisionTreeClassifier
A trained scikit-learn tree model.
input_name: str
Name of the input columns.
output_name: str
Name of the output columns.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
_sklearn_util.check_expected_type(model, _tree.DecisionTreeClassifier)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'tree_') and model.tree_ is not None)
return _MLModel(convert_tree_ensemble(model, input_name, output_features,
mode = 'classifier',
class_labels = model.classes_)) | [
"def",
"convert",
"(",
"model",
",",
"input_name",
",",
"output_features",
")",
":",
"if",
"not",
"(",
"HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"_sklearn_util",
".",
"check_expected_type",
"(",
"model",
",",
"_tree",
".",
"DecisionTreeClassifier",
")",
"_sklearn_util",
".",
"check_fitted",
"(",
"model",
",",
"lambda",
"m",
":",
"hasattr",
"(",
"m",
",",
"'tree_'",
")",
"and",
"model",
".",
"tree_",
"is",
"not",
"None",
")",
"return",
"_MLModel",
"(",
"convert_tree_ensemble",
"(",
"model",
",",
"input_name",
",",
"output_features",
",",
"mode",
"=",
"'classifier'",
",",
"class_labels",
"=",
"model",
".",
"classes_",
")",
")"
] | Convert a decision tree model to protobuf format.
Parameters
----------
decision_tree : DecisionTreeClassifier
A trained scikit-learn tree model.
input_name: str
Name of the input columns.
output_name: str
Name of the output columns.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"decision",
"tree",
"model",
"to",
"protobuf",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_decision_tree_classifier.py#L18-L45 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_coreml_utils.py | _get_model_metadata | def _get_model_metadata(model_class, metadata, version=None):
"""
Returns user-defined metadata, making sure information all models should
have is also available, as a dictionary
"""
from turicreate import __version__
info = {
'turicreate_version': __version__,
'type': model_class,
}
if version is not None:
info['version'] = str(version)
info.update(metadata)
return info | python | def _get_model_metadata(model_class, metadata, version=None):
"""
Returns user-defined metadata, making sure information all models should
have is also available, as a dictionary
"""
from turicreate import __version__
info = {
'turicreate_version': __version__,
'type': model_class,
}
if version is not None:
info['version'] = str(version)
info.update(metadata)
return info | [
"def",
"_get_model_metadata",
"(",
"model_class",
",",
"metadata",
",",
"version",
"=",
"None",
")",
":",
"from",
"turicreate",
"import",
"__version__",
"info",
"=",
"{",
"'turicreate_version'",
":",
"__version__",
",",
"'type'",
":",
"model_class",
",",
"}",
"if",
"version",
"is",
"not",
"None",
":",
"info",
"[",
"'version'",
"]",
"=",
"str",
"(",
"version",
")",
"info",
".",
"update",
"(",
"metadata",
")",
"return",
"info"
] | Returns user-defined metadata, making sure information all models should
have is also available, as a dictionary | [
"Returns",
"user",
"-",
"defined",
"metadata",
"making",
"sure",
"information",
"all",
"models",
"should",
"have",
"is",
"also",
"available",
"as",
"a",
"dictionary"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_coreml_utils.py#L16-L29 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_coreml_utils.py | _set_model_metadata | def _set_model_metadata(mlmodel, model_class, metadata, version=None):
"""
Sets user-defined metadata, making sure information all models should have
is also available
"""
info = _get_model_metadata(model_class, metadata, version)
mlmodel.user_defined_metadata.update(info) | python | def _set_model_metadata(mlmodel, model_class, metadata, version=None):
"""
Sets user-defined metadata, making sure information all models should have
is also available
"""
info = _get_model_metadata(model_class, metadata, version)
mlmodel.user_defined_metadata.update(info) | [
"def",
"_set_model_metadata",
"(",
"mlmodel",
",",
"model_class",
",",
"metadata",
",",
"version",
"=",
"None",
")",
":",
"info",
"=",
"_get_model_metadata",
"(",
"model_class",
",",
"metadata",
",",
"version",
")",
"mlmodel",
".",
"user_defined_metadata",
".",
"update",
"(",
"info",
")"
] | Sets user-defined metadata, making sure information all models should have
is also available | [
"Sets",
"user",
"-",
"defined",
"metadata",
"making",
"sure",
"information",
"all",
"models",
"should",
"have",
"is",
"also",
"available"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_coreml_utils.py#L32-L38 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py | _ToCamelCase | def _ToCamelCase(name):
"""Converts name to camel-case and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
if result:
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
# Lower-case the first letter.
if result and result[0].isupper():
result[0] = result[0].lower()
return ''.join(result) | python | def _ToCamelCase(name):
"""Converts name to camel-case and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
if result:
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
# Lower-case the first letter.
if result and result[0].isupper():
result[0] = result[0].lower()
return ''.join(result) | [
"def",
"_ToCamelCase",
"(",
"name",
")",
":",
"capitalize_next",
"=",
"False",
"result",
"=",
"[",
"]",
"for",
"c",
"in",
"name",
":",
"if",
"c",
"==",
"'_'",
":",
"if",
"result",
":",
"capitalize_next",
"=",
"True",
"elif",
"capitalize_next",
":",
"result",
".",
"append",
"(",
"c",
".",
"upper",
"(",
")",
")",
"capitalize_next",
"=",
"False",
"else",
":",
"result",
"+=",
"c",
"# Lower-case the first letter.",
"if",
"result",
"and",
"result",
"[",
"0",
"]",
".",
"isupper",
"(",
")",
":",
"result",
"[",
"0",
"]",
"=",
"result",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"return",
"''",
".",
"join",
"(",
"result",
")"
] | Converts name to camel-case and returns it. | [
"Converts",
"name",
"to",
"camel",
"-",
"case",
"and",
"returns",
"it",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py#L873-L891 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py | _ToJsonName | def _ToJsonName(name):
"""Converts name to Json name and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
return ''.join(result) | python | def _ToJsonName(name):
"""Converts name to Json name and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
return ''.join(result) | [
"def",
"_ToJsonName",
"(",
"name",
")",
":",
"capitalize_next",
"=",
"False",
"result",
"=",
"[",
"]",
"for",
"c",
"in",
"name",
":",
"if",
"c",
"==",
"'_'",
":",
"capitalize_next",
"=",
"True",
"elif",
"capitalize_next",
":",
"result",
".",
"append",
"(",
"c",
".",
"upper",
"(",
")",
")",
"capitalize_next",
"=",
"False",
"else",
":",
"result",
"+=",
"c",
"return",
"''",
".",
"join",
"(",
"result",
")"
] | Converts name to Json name and returns it. | [
"Converts",
"name",
"to",
"Json",
"name",
"and",
"returns",
"it",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py#L902-L916 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py | DescriptorBase._SetOptions | def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None | python | def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None | [
"def",
"_SetOptions",
"(",
"self",
",",
"options",
",",
"options_class_name",
")",
":",
"self",
".",
"_options",
"=",
"options",
"self",
".",
"_options_class_name",
"=",
"options_class_name",
"# Does this descriptor have non-default options?",
"self",
".",
"has_options",
"=",
"options",
"is",
"not",
"None"
] | Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2. | [
"Sets",
"the",
"descriptor",
"s",
"options"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py#L106-L116 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py | DescriptorBase.GetOptions | def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options | python | def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options | [
"def",
"GetOptions",
"(",
"self",
")",
":",
"if",
"self",
".",
"_options",
":",
"return",
"self",
".",
"_options",
"from",
"google",
".",
"protobuf",
"import",
"descriptor_pb2",
"try",
":",
"options_class",
"=",
"getattr",
"(",
"descriptor_pb2",
",",
"self",
".",
"_options_class_name",
")",
"except",
"AttributeError",
":",
"raise",
"RuntimeError",
"(",
"'Unknown options class name %s!'",
"%",
"(",
"self",
".",
"_options_class_name",
")",
")",
"self",
".",
"_options",
"=",
"options_class",
"(",
")",
"return",
"self",
".",
"_options"
] | Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor. | [
"Retrieves",
"descriptor",
"options",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py#L118-L133 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py | _NestedDescriptorBase.CopyToProto | def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.') | python | def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.') | [
"def",
"CopyToProto",
"(",
"self",
",",
"proto",
")",
":",
"if",
"(",
"self",
".",
"file",
"is",
"not",
"None",
"and",
"self",
".",
"_serialized_start",
"is",
"not",
"None",
"and",
"self",
".",
"_serialized_end",
"is",
"not",
"None",
")",
":",
"proto",
".",
"ParseFromString",
"(",
"self",
".",
"file",
".",
"serialized_pb",
"[",
"self",
".",
"_serialized_start",
":",
"self",
".",
"_serialized_end",
"]",
")",
"else",
":",
"raise",
"Error",
"(",
"'Descriptor does not contain serialization.'",
")"
] | Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments. | [
"Copies",
"this",
"to",
"the",
"matching",
"proto",
"in",
"descriptor_pb2",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py#L174-L189 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py | Descriptor.EnumValueName | def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
return self.enum_types_by_name[enum].values_by_number[value].name | python | def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
return self.enum_types_by_name[enum].values_by_number[value].name | [
"def",
"EnumValueName",
"(",
"self",
",",
"enum",
",",
"value",
")",
":",
"return",
"self",
".",
"enum_types_by_name",
"[",
"enum",
"]",
".",
"values_by_number",
"[",
"value",
"]",
".",
"name"
] | Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum. | [
"Returns",
"the",
"string",
"name",
"of",
"an",
"enum",
"value",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py#L321-L337 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | resolve_reference | def resolve_reference(target_reference, project):
""" Given a target_reference, made in context of 'project',
returns the AbstractTarget instance that is referred to, as well
as properties explicitly specified for this reference.
"""
# Separate target name from properties override
assert isinstance(target_reference, basestring)
assert isinstance(project, ProjectTarget)
split = _re_separate_target_from_properties.match (target_reference)
if not split:
raise BaseException ("Invalid reference: '%s'" % target_reference)
id = split.group (1)
sproperties = []
if split.group (3):
sproperties = property.create_from_strings(feature.split(split.group(3)))
sproperties = feature.expand_composites(sproperties)
# Find the target
target = project.find (id)
return (target, property_set.create(sproperties)) | python | def resolve_reference(target_reference, project):
""" Given a target_reference, made in context of 'project',
returns the AbstractTarget instance that is referred to, as well
as properties explicitly specified for this reference.
"""
# Separate target name from properties override
assert isinstance(target_reference, basestring)
assert isinstance(project, ProjectTarget)
split = _re_separate_target_from_properties.match (target_reference)
if not split:
raise BaseException ("Invalid reference: '%s'" % target_reference)
id = split.group (1)
sproperties = []
if split.group (3):
sproperties = property.create_from_strings(feature.split(split.group(3)))
sproperties = feature.expand_composites(sproperties)
# Find the target
target = project.find (id)
return (target, property_set.create(sproperties)) | [
"def",
"resolve_reference",
"(",
"target_reference",
",",
"project",
")",
":",
"# Separate target name from properties override",
"assert",
"isinstance",
"(",
"target_reference",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"project",
",",
"ProjectTarget",
")",
"split",
"=",
"_re_separate_target_from_properties",
".",
"match",
"(",
"target_reference",
")",
"if",
"not",
"split",
":",
"raise",
"BaseException",
"(",
"\"Invalid reference: '%s'\"",
"%",
"target_reference",
")",
"id",
"=",
"split",
".",
"group",
"(",
"1",
")",
"sproperties",
"=",
"[",
"]",
"if",
"split",
".",
"group",
"(",
"3",
")",
":",
"sproperties",
"=",
"property",
".",
"create_from_strings",
"(",
"feature",
".",
"split",
"(",
"split",
".",
"group",
"(",
"3",
")",
")",
")",
"sproperties",
"=",
"feature",
".",
"expand_composites",
"(",
"sproperties",
")",
"# Find the target",
"target",
"=",
"project",
".",
"find",
"(",
"id",
")",
"return",
"(",
"target",
",",
"property_set",
".",
"create",
"(",
"sproperties",
")",
")"
] | Given a target_reference, made in context of 'project',
returns the AbstractTarget instance that is referred to, as well
as properties explicitly specified for this reference. | [
"Given",
"a",
"target_reference",
"made",
"in",
"context",
"of",
"project",
"returns",
"the",
"AbstractTarget",
"instance",
"that",
"is",
"referred",
"to",
"as",
"well",
"as",
"properties",
"explicitly",
"specified",
"for",
"this",
"reference",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L841-L864 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | generate_from_reference | def generate_from_reference(target_reference, project, property_set_):
""" Attempts to generate the target given by target reference, which
can refer both to a main target or to a file.
Returns a list consisting of
- usage requirements
- generated virtual targets, if any
target_reference: Target reference
project: Project where the reference is made
property_set: Properties of the main target that makes the reference
"""
assert isinstance(target_reference, basestring)
assert isinstance(project, ProjectTarget)
assert isinstance(property_set_, property_set.PropertySet)
target, sproperties = resolve_reference(target_reference, project)
# Take properties which should be propagated and refine them
# with source-specific requirements.
propagated = property_set_.propagated()
rproperties = propagated.refine(sproperties)
return target.generate(rproperties) | python | def generate_from_reference(target_reference, project, property_set_):
""" Attempts to generate the target given by target reference, which
can refer both to a main target or to a file.
Returns a list consisting of
- usage requirements
- generated virtual targets, if any
target_reference: Target reference
project: Project where the reference is made
property_set: Properties of the main target that makes the reference
"""
assert isinstance(target_reference, basestring)
assert isinstance(project, ProjectTarget)
assert isinstance(property_set_, property_set.PropertySet)
target, sproperties = resolve_reference(target_reference, project)
# Take properties which should be propagated and refine them
# with source-specific requirements.
propagated = property_set_.propagated()
rproperties = propagated.refine(sproperties)
return target.generate(rproperties) | [
"def",
"generate_from_reference",
"(",
"target_reference",
",",
"project",
",",
"property_set_",
")",
":",
"assert",
"isinstance",
"(",
"target_reference",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"project",
",",
"ProjectTarget",
")",
"assert",
"isinstance",
"(",
"property_set_",
",",
"property_set",
".",
"PropertySet",
")",
"target",
",",
"sproperties",
"=",
"resolve_reference",
"(",
"target_reference",
",",
"project",
")",
"# Take properties which should be propagated and refine them",
"# with source-specific requirements.",
"propagated",
"=",
"property_set_",
".",
"propagated",
"(",
")",
"rproperties",
"=",
"propagated",
".",
"refine",
"(",
"sproperties",
")",
"return",
"target",
".",
"generate",
"(",
"rproperties",
")"
] | Attempts to generate the target given by target reference, which
can refer both to a main target or to a file.
Returns a list consisting of
- usage requirements
- generated virtual targets, if any
target_reference: Target reference
project: Project where the reference is made
property_set: Properties of the main target that makes the reference | [
"Attempts",
"to",
"generate",
"the",
"target",
"given",
"by",
"target",
"reference",
"which",
"can",
"refer",
"both",
"to",
"a",
"main",
"target",
"or",
"to",
"a",
"file",
".",
"Returns",
"a",
"list",
"consisting",
"of",
"-",
"usage",
"requirements",
"-",
"generated",
"virtual",
"targets",
"if",
"any",
"target_reference",
":",
"Target",
"reference",
"project",
":",
"Project",
"where",
"the",
"reference",
"is",
"made",
"property_set",
":",
"Properties",
"of",
"the",
"main",
"target",
"that",
"makes",
"the",
"reference"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L866-L886 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | TargetRegistry.main_target_alternative | def main_target_alternative (self, target):
""" Registers the specified target as a main target alternatives.
Returns 'target'.
"""
assert isinstance(target, AbstractTarget)
target.project ().add_alternative (target)
return target | python | def main_target_alternative (self, target):
""" Registers the specified target as a main target alternatives.
Returns 'target'.
"""
assert isinstance(target, AbstractTarget)
target.project ().add_alternative (target)
return target | [
"def",
"main_target_alternative",
"(",
"self",
",",
"target",
")",
":",
"assert",
"isinstance",
"(",
"target",
",",
"AbstractTarget",
")",
"target",
".",
"project",
"(",
")",
".",
"add_alternative",
"(",
"target",
")",
"return",
"target"
] | Registers the specified target as a main target alternatives.
Returns 'target'. | [
"Registers",
"the",
"specified",
"target",
"as",
"a",
"main",
"target",
"alternatives",
".",
"Returns",
"target",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L107-L113 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | TargetRegistry.main_target_sources | def main_target_sources (self, sources, main_target_name, no_renaming=0):
"""Return the list of sources to use, if main target rule is invoked
with 'sources'. If there are any objects in 'sources', they are treated
as main target instances, and the name of such targets are adjusted to
be '<name_of_this_target>__<name_of_source_target>'. Such renaming
is disabled is non-empty value is passed for 'no-renaming' parameter."""
assert is_iterable_typed(sources, basestring)
assert isinstance(main_target_name, basestring)
assert isinstance(no_renaming, (int, bool))
result = []
for t in sources:
t = b2.util.jam_to_value_maybe(t)
if isinstance (t, AbstractTarget):
name = t.name ()
if not no_renaming:
name = main_target_name + '__' + name
t.rename (name)
# Inline targets are not built by default.
p = t.project()
p.mark_targets_as_explicit([name])
result.append(name)
else:
result.append (t)
return result | python | def main_target_sources (self, sources, main_target_name, no_renaming=0):
"""Return the list of sources to use, if main target rule is invoked
with 'sources'. If there are any objects in 'sources', they are treated
as main target instances, and the name of such targets are adjusted to
be '<name_of_this_target>__<name_of_source_target>'. Such renaming
is disabled is non-empty value is passed for 'no-renaming' parameter."""
assert is_iterable_typed(sources, basestring)
assert isinstance(main_target_name, basestring)
assert isinstance(no_renaming, (int, bool))
result = []
for t in sources:
t = b2.util.jam_to_value_maybe(t)
if isinstance (t, AbstractTarget):
name = t.name ()
if not no_renaming:
name = main_target_name + '__' + name
t.rename (name)
# Inline targets are not built by default.
p = t.project()
p.mark_targets_as_explicit([name])
result.append(name)
else:
result.append (t)
return result | [
"def",
"main_target_sources",
"(",
"self",
",",
"sources",
",",
"main_target_name",
",",
"no_renaming",
"=",
"0",
")",
":",
"assert",
"is_iterable_typed",
"(",
"sources",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"main_target_name",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"no_renaming",
",",
"(",
"int",
",",
"bool",
")",
")",
"result",
"=",
"[",
"]",
"for",
"t",
"in",
"sources",
":",
"t",
"=",
"b2",
".",
"util",
".",
"jam_to_value_maybe",
"(",
"t",
")",
"if",
"isinstance",
"(",
"t",
",",
"AbstractTarget",
")",
":",
"name",
"=",
"t",
".",
"name",
"(",
")",
"if",
"not",
"no_renaming",
":",
"name",
"=",
"main_target_name",
"+",
"'__'",
"+",
"name",
"t",
".",
"rename",
"(",
"name",
")",
"# Inline targets are not built by default.",
"p",
"=",
"t",
".",
"project",
"(",
")",
"p",
".",
"mark_targets_as_explicit",
"(",
"[",
"name",
"]",
")",
"result",
".",
"append",
"(",
"name",
")",
"else",
":",
"result",
".",
"append",
"(",
"t",
")",
"return",
"result"
] | Return the list of sources to use, if main target rule is invoked
with 'sources'. If there are any objects in 'sources', they are treated
as main target instances, and the name of such targets are adjusted to
be '<name_of_this_target>__<name_of_source_target>'. Such renaming
is disabled is non-empty value is passed for 'no-renaming' parameter. | [
"Return",
"the",
"list",
"of",
"sources",
"to",
"use",
"if",
"main",
"target",
"rule",
"is",
"invoked",
"with",
"sources",
".",
"If",
"there",
"are",
"any",
"objects",
"in",
"sources",
"they",
"are",
"treated",
"as",
"main",
"target",
"instances",
"and",
"the",
"name",
"of",
"such",
"targets",
"are",
"adjusted",
"to",
"be",
"<name_of_this_target",
">",
"__<name_of_source_target",
">",
".",
"Such",
"renaming",
"is",
"disabled",
"is",
"non",
"-",
"empty",
"value",
"is",
"passed",
"for",
"no",
"-",
"renaming",
"parameter",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L115-L145 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | TargetRegistry.main_target_requirements | def main_target_requirements(self, specification, project):
"""Returns the requirement to use when declaring a main target,
which are obtained by
- translating all specified property paths, and
- refining project requirements with the one specified for the target
'specification' are the properties xplicitly specified for a
main target
'project' is the project where the main taret is to be declared."""
assert is_iterable_typed(specification, basestring)
assert isinstance(project, ProjectTarget)
# create a copy since the list is being modified
specification = list(specification)
specification.extend(toolset.requirements())
requirements = property_set.refine_from_user_input(
project.get("requirements"), specification,
project.project_module(), project.get("location"))
return requirements | python | def main_target_requirements(self, specification, project):
"""Returns the requirement to use when declaring a main target,
which are obtained by
- translating all specified property paths, and
- refining project requirements with the one specified for the target
'specification' are the properties xplicitly specified for a
main target
'project' is the project where the main taret is to be declared."""
assert is_iterable_typed(specification, basestring)
assert isinstance(project, ProjectTarget)
# create a copy since the list is being modified
specification = list(specification)
specification.extend(toolset.requirements())
requirements = property_set.refine_from_user_input(
project.get("requirements"), specification,
project.project_module(), project.get("location"))
return requirements | [
"def",
"main_target_requirements",
"(",
"self",
",",
"specification",
",",
"project",
")",
":",
"assert",
"is_iterable_typed",
"(",
"specification",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"project",
",",
"ProjectTarget",
")",
"# create a copy since the list is being modified",
"specification",
"=",
"list",
"(",
"specification",
")",
"specification",
".",
"extend",
"(",
"toolset",
".",
"requirements",
"(",
")",
")",
"requirements",
"=",
"property_set",
".",
"refine_from_user_input",
"(",
"project",
".",
"get",
"(",
"\"requirements\"",
")",
",",
"specification",
",",
"project",
".",
"project_module",
"(",
")",
",",
"project",
".",
"get",
"(",
"\"location\"",
")",
")",
"return",
"requirements"
] | Returns the requirement to use when declaring a main target,
which are obtained by
- translating all specified property paths, and
- refining project requirements with the one specified for the target
'specification' are the properties xplicitly specified for a
main target
'project' is the project where the main taret is to be declared. | [
"Returns",
"the",
"requirement",
"to",
"use",
"when",
"declaring",
"a",
"main",
"target",
"which",
"are",
"obtained",
"by",
"-",
"translating",
"all",
"specified",
"property",
"paths",
"and",
"-",
"refining",
"project",
"requirements",
"with",
"the",
"one",
"specified",
"for",
"the",
"target"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L148-L167 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | TargetRegistry.main_target_usage_requirements | def main_target_usage_requirements (self, specification, project):
""" Returns the use requirement to use when declaraing a main target,
which are obtained by
- translating all specified property paths, and
- adding project's usage requirements
specification: Use-properties explicitly specified for a main target
project: Project where the main target is to be declared
"""
assert is_iterable_typed(specification, basestring)
assert isinstance(project, ProjectTarget)
project_usage_requirements = project.get ('usage-requirements')
# We don't use 'refine-from-user-input' because I'm not sure if:
# - removing of parent's usage requirements makes sense
# - refining of usage requirements is not needed, since usage requirements
# are always free.
usage_requirements = property_set.create_from_user_input(
specification, project.project_module(), project.get("location"))
return project_usage_requirements.add (usage_requirements) | python | def main_target_usage_requirements (self, specification, project):
""" Returns the use requirement to use when declaraing a main target,
which are obtained by
- translating all specified property paths, and
- adding project's usage requirements
specification: Use-properties explicitly specified for a main target
project: Project where the main target is to be declared
"""
assert is_iterable_typed(specification, basestring)
assert isinstance(project, ProjectTarget)
project_usage_requirements = project.get ('usage-requirements')
# We don't use 'refine-from-user-input' because I'm not sure if:
# - removing of parent's usage requirements makes sense
# - refining of usage requirements is not needed, since usage requirements
# are always free.
usage_requirements = property_set.create_from_user_input(
specification, project.project_module(), project.get("location"))
return project_usage_requirements.add (usage_requirements) | [
"def",
"main_target_usage_requirements",
"(",
"self",
",",
"specification",
",",
"project",
")",
":",
"assert",
"is_iterable_typed",
"(",
"specification",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"project",
",",
"ProjectTarget",
")",
"project_usage_requirements",
"=",
"project",
".",
"get",
"(",
"'usage-requirements'",
")",
"# We don't use 'refine-from-user-input' because I'm not sure if:",
"# - removing of parent's usage requirements makes sense",
"# - refining of usage requirements is not needed, since usage requirements",
"# are always free.",
"usage_requirements",
"=",
"property_set",
".",
"create_from_user_input",
"(",
"specification",
",",
"project",
".",
"project_module",
"(",
")",
",",
"project",
".",
"get",
"(",
"\"location\"",
")",
")",
"return",
"project_usage_requirements",
".",
"add",
"(",
"usage_requirements",
")"
] | Returns the use requirement to use when declaraing a main target,
which are obtained by
- translating all specified property paths, and
- adding project's usage requirements
specification: Use-properties explicitly specified for a main target
project: Project where the main target is to be declared | [
"Returns",
"the",
"use",
"requirement",
"to",
"use",
"when",
"declaraing",
"a",
"main",
"target",
"which",
"are",
"obtained",
"by",
"-",
"translating",
"all",
"specified",
"property",
"paths",
"and",
"-",
"adding",
"project",
"s",
"usage",
"requirements",
"specification",
":",
"Use",
"-",
"properties",
"explicitly",
"specified",
"for",
"a",
"main",
"target",
"project",
":",
"Project",
"where",
"the",
"main",
"target",
"is",
"to",
"be",
"declared"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L169-L188 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | TargetRegistry.main_target_default_build | def main_target_default_build (self, specification, project):
""" Return the default build value to use when declaring a main target,
which is obtained by using specified value if not empty and parent's
default build attribute otherwise.
specification: Default build explicitly specified for a main target
project: Project where the main target is to be declared
"""
assert is_iterable_typed(specification, basestring)
assert isinstance(project, ProjectTarget)
if specification:
return property_set.create_with_validation(specification)
else:
return project.get ('default-build') | python | def main_target_default_build (self, specification, project):
""" Return the default build value to use when declaring a main target,
which is obtained by using specified value if not empty and parent's
default build attribute otherwise.
specification: Default build explicitly specified for a main target
project: Project where the main target is to be declared
"""
assert is_iterable_typed(specification, basestring)
assert isinstance(project, ProjectTarget)
if specification:
return property_set.create_with_validation(specification)
else:
return project.get ('default-build') | [
"def",
"main_target_default_build",
"(",
"self",
",",
"specification",
",",
"project",
")",
":",
"assert",
"is_iterable_typed",
"(",
"specification",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"project",
",",
"ProjectTarget",
")",
"if",
"specification",
":",
"return",
"property_set",
".",
"create_with_validation",
"(",
"specification",
")",
"else",
":",
"return",
"project",
".",
"get",
"(",
"'default-build'",
")"
] | Return the default build value to use when declaring a main target,
which is obtained by using specified value if not empty and parent's
default build attribute otherwise.
specification: Default build explicitly specified for a main target
project: Project where the main target is to be declared | [
"Return",
"the",
"default",
"build",
"value",
"to",
"use",
"when",
"declaring",
"a",
"main",
"target",
"which",
"is",
"obtained",
"by",
"using",
"specified",
"value",
"if",
"not",
"empty",
"and",
"parent",
"s",
"default",
"build",
"attribute",
"otherwise",
".",
"specification",
":",
"Default",
"build",
"explicitly",
"specified",
"for",
"a",
"main",
"target",
"project",
":",
"Project",
"where",
"the",
"main",
"target",
"is",
"to",
"be",
"declared"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L190-L202 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | TargetRegistry.start_building | def start_building (self, main_target_instance):
""" Helper rules to detect cycles in main target references.
"""
assert isinstance(main_target_instance, MainTarget)
if id(main_target_instance) in self.targets_being_built_:
names = []
for t in self.targets_being_built_.values() + [main_target_instance]:
names.append (t.full_name())
get_manager().errors()("Recursion in main target references\n")
self.targets_being_built_[id(main_target_instance)] = main_target_instance | python | def start_building (self, main_target_instance):
""" Helper rules to detect cycles in main target references.
"""
assert isinstance(main_target_instance, MainTarget)
if id(main_target_instance) in self.targets_being_built_:
names = []
for t in self.targets_being_built_.values() + [main_target_instance]:
names.append (t.full_name())
get_manager().errors()("Recursion in main target references\n")
self.targets_being_built_[id(main_target_instance)] = main_target_instance | [
"def",
"start_building",
"(",
"self",
",",
"main_target_instance",
")",
":",
"assert",
"isinstance",
"(",
"main_target_instance",
",",
"MainTarget",
")",
"if",
"id",
"(",
"main_target_instance",
")",
"in",
"self",
".",
"targets_being_built_",
":",
"names",
"=",
"[",
"]",
"for",
"t",
"in",
"self",
".",
"targets_being_built_",
".",
"values",
"(",
")",
"+",
"[",
"main_target_instance",
"]",
":",
"names",
".",
"append",
"(",
"t",
".",
"full_name",
"(",
")",
")",
"get_manager",
"(",
")",
".",
"errors",
"(",
")",
"(",
"\"Recursion in main target references\\n\"",
")",
"self",
".",
"targets_being_built_",
"[",
"id",
"(",
"main_target_instance",
")",
"]",
"=",
"main_target_instance"
] | Helper rules to detect cycles in main target references. | [
"Helper",
"rules",
"to",
"detect",
"cycles",
"in",
"main",
"target",
"references",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L204-L215 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | TargetRegistry.create_typed_target | def create_typed_target (self, type, project, name, sources, requirements, default_build, usage_requirements):
""" Creates a TypedTarget with the specified properties.
The 'name', 'sources', 'requirements', 'default_build' and
'usage_requirements' are assumed to be in the form specified
by the user in Jamfile corresponding to 'project'.
"""
assert isinstance(type, basestring)
assert isinstance(project, ProjectTarget)
assert is_iterable_typed(sources, basestring)
assert is_iterable_typed(requirements, basestring)
assert is_iterable_typed(default_build, basestring)
return self.main_target_alternative (TypedTarget (name, project, type,
self.main_target_sources (sources, name),
self.main_target_requirements (requirements, project),
self.main_target_default_build (default_build, project),
self.main_target_usage_requirements (usage_requirements, project))) | python | def create_typed_target (self, type, project, name, sources, requirements, default_build, usage_requirements):
""" Creates a TypedTarget with the specified properties.
The 'name', 'sources', 'requirements', 'default_build' and
'usage_requirements' are assumed to be in the form specified
by the user in Jamfile corresponding to 'project'.
"""
assert isinstance(type, basestring)
assert isinstance(project, ProjectTarget)
assert is_iterable_typed(sources, basestring)
assert is_iterable_typed(requirements, basestring)
assert is_iterable_typed(default_build, basestring)
return self.main_target_alternative (TypedTarget (name, project, type,
self.main_target_sources (sources, name),
self.main_target_requirements (requirements, project),
self.main_target_default_build (default_build, project),
self.main_target_usage_requirements (usage_requirements, project))) | [
"def",
"create_typed_target",
"(",
"self",
",",
"type",
",",
"project",
",",
"name",
",",
"sources",
",",
"requirements",
",",
"default_build",
",",
"usage_requirements",
")",
":",
"assert",
"isinstance",
"(",
"type",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"project",
",",
"ProjectTarget",
")",
"assert",
"is_iterable_typed",
"(",
"sources",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"requirements",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"default_build",
",",
"basestring",
")",
"return",
"self",
".",
"main_target_alternative",
"(",
"TypedTarget",
"(",
"name",
",",
"project",
",",
"type",
",",
"self",
".",
"main_target_sources",
"(",
"sources",
",",
"name",
")",
",",
"self",
".",
"main_target_requirements",
"(",
"requirements",
",",
"project",
")",
",",
"self",
".",
"main_target_default_build",
"(",
"default_build",
",",
"project",
")",
",",
"self",
".",
"main_target_usage_requirements",
"(",
"usage_requirements",
",",
"project",
")",
")",
")"
] | Creates a TypedTarget with the specified properties.
The 'name', 'sources', 'requirements', 'default_build' and
'usage_requirements' are assumed to be in the form specified
by the user in Jamfile corresponding to 'project'. | [
"Creates",
"a",
"TypedTarget",
"with",
"the",
"specified",
"properties",
".",
"The",
"name",
"sources",
"requirements",
"default_build",
"and",
"usage_requirements",
"are",
"assumed",
"to",
"be",
"in",
"the",
"form",
"specified",
"by",
"the",
"user",
"in",
"Jamfile",
"corresponding",
"to",
"project",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L222-L237 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | ProjectTarget.generate | def generate (self, ps):
""" Generates all possible targets contained in this project.
"""
assert isinstance(ps, property_set.PropertySet)
self.manager_.targets().log(
"Building project '%s' with '%s'" % (self.name (), str(ps)))
self.manager_.targets().increase_indent ()
result = GenerateResult ()
for t in self.targets_to_build ():
g = t.generate (ps)
result.extend (g)
self.manager_.targets().decrease_indent ()
return result | python | def generate (self, ps):
""" Generates all possible targets contained in this project.
"""
assert isinstance(ps, property_set.PropertySet)
self.manager_.targets().log(
"Building project '%s' with '%s'" % (self.name (), str(ps)))
self.manager_.targets().increase_indent ()
result = GenerateResult ()
for t in self.targets_to_build ():
g = t.generate (ps)
result.extend (g)
self.manager_.targets().decrease_indent ()
return result | [
"def",
"generate",
"(",
"self",
",",
"ps",
")",
":",
"assert",
"isinstance",
"(",
"ps",
",",
"property_set",
".",
"PropertySet",
")",
"self",
".",
"manager_",
".",
"targets",
"(",
")",
".",
"log",
"(",
"\"Building project '%s' with '%s'\"",
"%",
"(",
"self",
".",
"name",
"(",
")",
",",
"str",
"(",
"ps",
")",
")",
")",
"self",
".",
"manager_",
".",
"targets",
"(",
")",
".",
"increase_indent",
"(",
")",
"result",
"=",
"GenerateResult",
"(",
")",
"for",
"t",
"in",
"self",
".",
"targets_to_build",
"(",
")",
":",
"g",
"=",
"t",
".",
"generate",
"(",
"ps",
")",
"result",
".",
"extend",
"(",
"g",
")",
"self",
".",
"manager_",
".",
"targets",
"(",
")",
".",
"decrease_indent",
"(",
")",
"return",
"result"
] | Generates all possible targets contained in this project. | [
"Generates",
"all",
"possible",
"targets",
"contained",
"in",
"this",
"project",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L433-L448 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | ProjectTarget.targets_to_build | def targets_to_build (self):
""" Computes and returns a list of AbstractTarget instances which
must be built when this project is built.
"""
result = []
if not self.built_main_targets_:
self.build_main_targets ()
# Collect all main targets here, except for "explicit" ones.
for n, t in self.main_target_.iteritems ():
if not t.name () in self.explicit_targets_:
result.append (t)
# Collect all projects referenced via "projects-to-build" attribute.
self_location = self.get ('location')
for pn in self.get ('projects-to-build'):
result.append (self.find(pn + "/"))
return result | python | def targets_to_build (self):
""" Computes and returns a list of AbstractTarget instances which
must be built when this project is built.
"""
result = []
if not self.built_main_targets_:
self.build_main_targets ()
# Collect all main targets here, except for "explicit" ones.
for n, t in self.main_target_.iteritems ():
if not t.name () in self.explicit_targets_:
result.append (t)
# Collect all projects referenced via "projects-to-build" attribute.
self_location = self.get ('location')
for pn in self.get ('projects-to-build'):
result.append (self.find(pn + "/"))
return result | [
"def",
"targets_to_build",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"built_main_targets_",
":",
"self",
".",
"build_main_targets",
"(",
")",
"# Collect all main targets here, except for \"explicit\" ones.",
"for",
"n",
",",
"t",
"in",
"self",
".",
"main_target_",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"t",
".",
"name",
"(",
")",
"in",
"self",
".",
"explicit_targets_",
":",
"result",
".",
"append",
"(",
"t",
")",
"# Collect all projects referenced via \"projects-to-build\" attribute.",
"self_location",
"=",
"self",
".",
"get",
"(",
"'location'",
")",
"for",
"pn",
"in",
"self",
".",
"get",
"(",
"'projects-to-build'",
")",
":",
"result",
".",
"append",
"(",
"self",
".",
"find",
"(",
"pn",
"+",
"\"/\"",
")",
")",
"return",
"result"
] | Computes and returns a list of AbstractTarget instances which
must be built when this project is built. | [
"Computes",
"and",
"returns",
"a",
"list",
"of",
"AbstractTarget",
"instances",
"which",
"must",
"be",
"built",
"when",
"this",
"project",
"is",
"built",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L450-L469 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | ProjectTarget.mark_targets_as_explicit | def mark_targets_as_explicit (self, target_names):
"""Add 'target' to the list of targets in this project
that should be build only by explicit request."""
# Record the name of the target, not instance, since this
# rule is called before main target instaces are created.
assert is_iterable_typed(target_names, basestring)
self.explicit_targets_.update(target_names) | python | def mark_targets_as_explicit (self, target_names):
"""Add 'target' to the list of targets in this project
that should be build only by explicit request."""
# Record the name of the target, not instance, since this
# rule is called before main target instaces are created.
assert is_iterable_typed(target_names, basestring)
self.explicit_targets_.update(target_names) | [
"def",
"mark_targets_as_explicit",
"(",
"self",
",",
"target_names",
")",
":",
"# Record the name of the target, not instance, since this",
"# rule is called before main target instaces are created.",
"assert",
"is_iterable_typed",
"(",
"target_names",
",",
"basestring",
")",
"self",
".",
"explicit_targets_",
".",
"update",
"(",
"target_names",
")"
] | Add 'target' to the list of targets in this project
that should be build only by explicit request. | [
"Add",
"target",
"to",
"the",
"list",
"of",
"targets",
"in",
"this",
"project",
"that",
"should",
"be",
"build",
"only",
"by",
"explicit",
"request",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L471-L478 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | ProjectTarget.add_alternative | def add_alternative (self, target_instance):
""" Add new target alternative.
"""
assert isinstance(target_instance, AbstractTarget)
if self.built_main_targets_:
raise IllegalOperation ("add-alternative called when main targets are already created for project '%s'" % self.full_name ())
self.alternatives_.append (target_instance) | python | def add_alternative (self, target_instance):
""" Add new target alternative.
"""
assert isinstance(target_instance, AbstractTarget)
if self.built_main_targets_:
raise IllegalOperation ("add-alternative called when main targets are already created for project '%s'" % self.full_name ())
self.alternatives_.append (target_instance) | [
"def",
"add_alternative",
"(",
"self",
",",
"target_instance",
")",
":",
"assert",
"isinstance",
"(",
"target_instance",
",",
"AbstractTarget",
")",
"if",
"self",
".",
"built_main_targets_",
":",
"raise",
"IllegalOperation",
"(",
"\"add-alternative called when main targets are already created for project '%s'\"",
"%",
"self",
".",
"full_name",
"(",
")",
")",
"self",
".",
"alternatives_",
".",
"append",
"(",
"target_instance",
")"
] | Add new target alternative. | [
"Add",
"new",
"target",
"alternative",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L484-L491 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | ProjectTarget.has_main_target | def has_main_target (self, name):
"""Tells if a main target with the specified name exists."""
assert isinstance(name, basestring)
if not self.built_main_targets_:
self.build_main_targets()
return name in self.main_target_ | python | def has_main_target (self, name):
"""Tells if a main target with the specified name exists."""
assert isinstance(name, basestring)
if not self.built_main_targets_:
self.build_main_targets()
return name in self.main_target_ | [
"def",
"has_main_target",
"(",
"self",
",",
"name",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"if",
"not",
"self",
".",
"built_main_targets_",
":",
"self",
".",
"build_main_targets",
"(",
")",
"return",
"name",
"in",
"self",
".",
"main_target_"
] | Tells if a main target with the specified name exists. | [
"Tells",
"if",
"a",
"main",
"target",
"with",
"the",
"specified",
"name",
"exists",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L500-L506 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | ProjectTarget.create_main_target | def create_main_target (self, name):
""" Returns a 'MainTarget' class instance corresponding to the 'name'.
"""
assert isinstance(name, basestring)
if not self.built_main_targets_:
self.build_main_targets ()
return self.main_targets_.get (name, None) | python | def create_main_target (self, name):
""" Returns a 'MainTarget' class instance corresponding to the 'name'.
"""
assert isinstance(name, basestring)
if not self.built_main_targets_:
self.build_main_targets ()
return self.main_targets_.get (name, None) | [
"def",
"create_main_target",
"(",
"self",
",",
"name",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"if",
"not",
"self",
".",
"built_main_targets_",
":",
"self",
".",
"build_main_targets",
"(",
")",
"return",
"self",
".",
"main_targets_",
".",
"get",
"(",
"name",
",",
"None",
")"
] | Returns a 'MainTarget' class instance corresponding to the 'name'. | [
"Returns",
"a",
"MainTarget",
"class",
"instance",
"corresponding",
"to",
"the",
"name",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L508-L515 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | ProjectTarget.find_really | def find_really(self, id):
""" Find and return the target with the specified id, treated
relative to self.
"""
assert isinstance(id, basestring)
result = None
current_location = self.get ('location')
__re_split_project_target = re.compile (r'(.*)//(.*)')
split = __re_split_project_target.match (id)
project_part = None
target_part = None
if split:
project_part = split.group(1)
target_part = split.group(2)
if not target_part:
get_manager().errors()(
'Project ID, "{}", is not a valid target reference. There should '
'be either a target name after the "//" or the "//" should be removed '
'from the target reference.'
.format(id)
)
project_registry = self.project_.manager ().projects ()
extra_error_message = ''
if project_part:
# There's explicit project part in id. Looks up the
# project and pass the request to it.
pm = project_registry.find (project_part, current_location)
if pm:
project_target = project_registry.target (pm)
result = project_target.find (target_part, no_error=1)
else:
extra_error_message = "error: could not find project '$(project_part)'"
else:
# Interpret target-name as name of main target
# Need to do this before checking for file. Consider this:
#
# exe test : test.cpp ;
# install s : test : <location>. ;
#
# After first build we'll have target 'test' in Jamfile and file
# 'test' on the disk. We need target to override the file.
result = None
if self.has_main_target(id):
result = self.main_target(id)
if not result:
result = FileReference (self.manager_, id, self.project_)
if not result.exists ():
# File actually does not exist.
# Reset 'target' so that an error is issued.
result = None
if not result:
# Interpret id as project-id
project_module = project_registry.find (id, current_location)
if project_module:
result = project_registry.target (project_module)
return result | python | def find_really(self, id):
""" Find and return the target with the specified id, treated
relative to self.
"""
assert isinstance(id, basestring)
result = None
current_location = self.get ('location')
__re_split_project_target = re.compile (r'(.*)//(.*)')
split = __re_split_project_target.match (id)
project_part = None
target_part = None
if split:
project_part = split.group(1)
target_part = split.group(2)
if not target_part:
get_manager().errors()(
'Project ID, "{}", is not a valid target reference. There should '
'be either a target name after the "//" or the "//" should be removed '
'from the target reference.'
.format(id)
)
project_registry = self.project_.manager ().projects ()
extra_error_message = ''
if project_part:
# There's explicit project part in id. Looks up the
# project and pass the request to it.
pm = project_registry.find (project_part, current_location)
if pm:
project_target = project_registry.target (pm)
result = project_target.find (target_part, no_error=1)
else:
extra_error_message = "error: could not find project '$(project_part)'"
else:
# Interpret target-name as name of main target
# Need to do this before checking for file. Consider this:
#
# exe test : test.cpp ;
# install s : test : <location>. ;
#
# After first build we'll have target 'test' in Jamfile and file
# 'test' on the disk. We need target to override the file.
result = None
if self.has_main_target(id):
result = self.main_target(id)
if not result:
result = FileReference (self.manager_, id, self.project_)
if not result.exists ():
# File actually does not exist.
# Reset 'target' so that an error is issued.
result = None
if not result:
# Interpret id as project-id
project_module = project_registry.find (id, current_location)
if project_module:
result = project_registry.target (project_module)
return result | [
"def",
"find_really",
"(",
"self",
",",
"id",
")",
":",
"assert",
"isinstance",
"(",
"id",
",",
"basestring",
")",
"result",
"=",
"None",
"current_location",
"=",
"self",
".",
"get",
"(",
"'location'",
")",
"__re_split_project_target",
"=",
"re",
".",
"compile",
"(",
"r'(.*)//(.*)'",
")",
"split",
"=",
"__re_split_project_target",
".",
"match",
"(",
"id",
")",
"project_part",
"=",
"None",
"target_part",
"=",
"None",
"if",
"split",
":",
"project_part",
"=",
"split",
".",
"group",
"(",
"1",
")",
"target_part",
"=",
"split",
".",
"group",
"(",
"2",
")",
"if",
"not",
"target_part",
":",
"get_manager",
"(",
")",
".",
"errors",
"(",
")",
"(",
"'Project ID, \"{}\", is not a valid target reference. There should '",
"'be either a target name after the \"//\" or the \"//\" should be removed '",
"'from the target reference.'",
".",
"format",
"(",
"id",
")",
")",
"project_registry",
"=",
"self",
".",
"project_",
".",
"manager",
"(",
")",
".",
"projects",
"(",
")",
"extra_error_message",
"=",
"''",
"if",
"project_part",
":",
"# There's explicit project part in id. Looks up the",
"# project and pass the request to it.",
"pm",
"=",
"project_registry",
".",
"find",
"(",
"project_part",
",",
"current_location",
")",
"if",
"pm",
":",
"project_target",
"=",
"project_registry",
".",
"target",
"(",
"pm",
")",
"result",
"=",
"project_target",
".",
"find",
"(",
"target_part",
",",
"no_error",
"=",
"1",
")",
"else",
":",
"extra_error_message",
"=",
"\"error: could not find project '$(project_part)'\"",
"else",
":",
"# Interpret target-name as name of main target",
"# Need to do this before checking for file. Consider this:",
"#",
"# exe test : test.cpp ;",
"# install s : test : <location>. ;",
"#",
"# After first build we'll have target 'test' in Jamfile and file",
"# 'test' on the disk. We need target to override the file.",
"result",
"=",
"None",
"if",
"self",
".",
"has_main_target",
"(",
"id",
")",
":",
"result",
"=",
"self",
".",
"main_target",
"(",
"id",
")",
"if",
"not",
"result",
":",
"result",
"=",
"FileReference",
"(",
"self",
".",
"manager_",
",",
"id",
",",
"self",
".",
"project_",
")",
"if",
"not",
"result",
".",
"exists",
"(",
")",
":",
"# File actually does not exist.",
"# Reset 'target' so that an error is issued.",
"result",
"=",
"None",
"if",
"not",
"result",
":",
"# Interpret id as project-id",
"project_module",
"=",
"project_registry",
".",
"find",
"(",
"id",
",",
"current_location",
")",
"if",
"project_module",
":",
"result",
"=",
"project_registry",
".",
"target",
"(",
"project_module",
")",
"return",
"result"
] | Find and return the target with the specified id, treated
relative to self. | [
"Find",
"and",
"return",
"the",
"target",
"with",
"the",
"specified",
"id",
"treated",
"relative",
"to",
"self",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L518-L588 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | ProjectTarget.add_constant | def add_constant(self, name, value, path=0):
"""Adds a new constant for this project.
The constant will be available for use in Jamfile
module for this project. If 'path' is true,
the constant will be interpreted relatively
to the location of project.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(value, basestring)
assert isinstance(path, int) # will also match bools
if path:
l = self.location_
if not l:
# Project corresponding to config files do not have
# 'location' attribute, but do have source location.
# It might be more reasonable to make every project have
# a location and use some other approach to prevent buildable
# targets in config files, but that's for later.
l = self.get('source-location')
value = os.path.join(l, value[0])
# Now make the value absolute path. Constants should be in
# platform-native form.
value = [os.path.normpath(os.path.join(os.getcwd(), value))]
self.constants_[name] = value
bjam.call("set-variable", self.project_module(), name, value) | python | def add_constant(self, name, value, path=0):
"""Adds a new constant for this project.
The constant will be available for use in Jamfile
module for this project. If 'path' is true,
the constant will be interpreted relatively
to the location of project.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(value, basestring)
assert isinstance(path, int) # will also match bools
if path:
l = self.location_
if not l:
# Project corresponding to config files do not have
# 'location' attribute, but do have source location.
# It might be more reasonable to make every project have
# a location and use some other approach to prevent buildable
# targets in config files, but that's for later.
l = self.get('source-location')
value = os.path.join(l, value[0])
# Now make the value absolute path. Constants should be in
# platform-native form.
value = [os.path.normpath(os.path.join(os.getcwd(), value))]
self.constants_[name] = value
bjam.call("set-variable", self.project_module(), name, value) | [
"def",
"add_constant",
"(",
"self",
",",
"name",
",",
"value",
",",
"path",
"=",
"0",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"value",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"path",
",",
"int",
")",
"# will also match bools",
"if",
"path",
":",
"l",
"=",
"self",
".",
"location_",
"if",
"not",
"l",
":",
"# Project corresponding to config files do not have",
"# 'location' attribute, but do have source location.",
"# It might be more reasonable to make every project have",
"# a location and use some other approach to prevent buildable",
"# targets in config files, but that's for later.",
"l",
"=",
"self",
".",
"get",
"(",
"'source-location'",
")",
"value",
"=",
"os",
".",
"path",
".",
"join",
"(",
"l",
",",
"value",
"[",
"0",
"]",
")",
"# Now make the value absolute path. Constants should be in",
"# platform-native form.",
"value",
"=",
"[",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"value",
")",
")",
"]",
"self",
".",
"constants_",
"[",
"name",
"]",
"=",
"value",
"bjam",
".",
"call",
"(",
"\"set-variable\"",
",",
"self",
".",
"project_module",
"(",
")",
",",
"name",
",",
"value",
")"
] | Adds a new constant for this project.
The constant will be available for use in Jamfile
module for this project. If 'path' is true,
the constant will be interpreted relatively
to the location of project. | [
"Adds",
"a",
"new",
"constant",
"for",
"this",
"project",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L619-L646 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | MainTarget.add_alternative | def add_alternative (self, target):
""" Add a new alternative for this target.
"""
assert isinstance(target, BasicTarget)
d = target.default_build ()
if self.alternatives_ and self.default_build_ != d:
get_manager().errors()("default build must be identical in all alternatives\n"
"main target is '%s'\n"
"with '%s'\n"
"differing from previous default build: '%s'" % (self.full_name (), d.raw (), self.default_build_.raw ()))
else:
self.default_build_ = d
self.alternatives_.append (target) | python | def add_alternative (self, target):
""" Add a new alternative for this target.
"""
assert isinstance(target, BasicTarget)
d = target.default_build ()
if self.alternatives_ and self.default_build_ != d:
get_manager().errors()("default build must be identical in all alternatives\n"
"main target is '%s'\n"
"with '%s'\n"
"differing from previous default build: '%s'" % (self.full_name (), d.raw (), self.default_build_.raw ()))
else:
self.default_build_ = d
self.alternatives_.append (target) | [
"def",
"add_alternative",
"(",
"self",
",",
"target",
")",
":",
"assert",
"isinstance",
"(",
"target",
",",
"BasicTarget",
")",
"d",
"=",
"target",
".",
"default_build",
"(",
")",
"if",
"self",
".",
"alternatives_",
"and",
"self",
".",
"default_build_",
"!=",
"d",
":",
"get_manager",
"(",
")",
".",
"errors",
"(",
")",
"(",
"\"default build must be identical in all alternatives\\n\"",
"\"main target is '%s'\\n\"",
"\"with '%s'\\n\"",
"\"differing from previous default build: '%s'\"",
"%",
"(",
"self",
".",
"full_name",
"(",
")",
",",
"d",
".",
"raw",
"(",
")",
",",
"self",
".",
"default_build_",
".",
"raw",
"(",
")",
")",
")",
"else",
":",
"self",
".",
"default_build_",
"=",
"d",
"self",
".",
"alternatives_",
".",
"append",
"(",
"target",
")"
] | Add a new alternative for this target. | [
"Add",
"a",
"new",
"alternative",
"for",
"this",
"target",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L676-L691 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | MainTarget.__select_alternatives | def __select_alternatives (self, property_set_, debug):
""" Returns the best viable alternative for this property_set
See the documentation for selection rules.
# TODO: shouldn't this be 'alternative' (singular)?
"""
# When selecting alternatives we have to consider defaults,
# for example:
# lib l : l.cpp : <variant>debug ;
# lib l : l_opt.cpp : <variant>release ;
# won't work unless we add default value <variant>debug.
assert isinstance(property_set_, property_set.PropertySet)
assert isinstance(debug, int) # also matches bools
property_set_ = property_set_.add_defaults ()
# The algorithm: we keep the current best viable alternative.
# When we've got new best viable alternative, we compare it
# with the current one.
best = None
best_properties = None
if len (self.alternatives_) == 0:
return None
if len (self.alternatives_) == 1:
return self.alternatives_ [0]
if debug:
print "Property set for selection:", property_set_
for v in self.alternatives_:
properties = v.match (property_set_, debug)
if properties is not None:
if not best:
best = v
best_properties = properties
else:
if b2.util.set.equal (properties, best_properties):
return None
elif b2.util.set.contains (properties, best_properties):
# Do nothing, this alternative is worse
pass
elif b2.util.set.contains (best_properties, properties):
best = v
best_properties = properties
else:
return None
return best | python | def __select_alternatives (self, property_set_, debug):
""" Returns the best viable alternative for this property_set
See the documentation for selection rules.
# TODO: shouldn't this be 'alternative' (singular)?
"""
# When selecting alternatives we have to consider defaults,
# for example:
# lib l : l.cpp : <variant>debug ;
# lib l : l_opt.cpp : <variant>release ;
# won't work unless we add default value <variant>debug.
assert isinstance(property_set_, property_set.PropertySet)
assert isinstance(debug, int) # also matches bools
property_set_ = property_set_.add_defaults ()
# The algorithm: we keep the current best viable alternative.
# When we've got new best viable alternative, we compare it
# with the current one.
best = None
best_properties = None
if len (self.alternatives_) == 0:
return None
if len (self.alternatives_) == 1:
return self.alternatives_ [0]
if debug:
print "Property set for selection:", property_set_
for v in self.alternatives_:
properties = v.match (property_set_, debug)
if properties is not None:
if not best:
best = v
best_properties = properties
else:
if b2.util.set.equal (properties, best_properties):
return None
elif b2.util.set.contains (properties, best_properties):
# Do nothing, this alternative is worse
pass
elif b2.util.set.contains (best_properties, properties):
best = v
best_properties = properties
else:
return None
return best | [
"def",
"__select_alternatives",
"(",
"self",
",",
"property_set_",
",",
"debug",
")",
":",
"# When selecting alternatives we have to consider defaults,",
"# for example:",
"# lib l : l.cpp : <variant>debug ;",
"# lib l : l_opt.cpp : <variant>release ;",
"# won't work unless we add default value <variant>debug.",
"assert",
"isinstance",
"(",
"property_set_",
",",
"property_set",
".",
"PropertySet",
")",
"assert",
"isinstance",
"(",
"debug",
",",
"int",
")",
"# also matches bools",
"property_set_",
"=",
"property_set_",
".",
"add_defaults",
"(",
")",
"# The algorithm: we keep the current best viable alternative.",
"# When we've got new best viable alternative, we compare it",
"# with the current one.",
"best",
"=",
"None",
"best_properties",
"=",
"None",
"if",
"len",
"(",
"self",
".",
"alternatives_",
")",
"==",
"0",
":",
"return",
"None",
"if",
"len",
"(",
"self",
".",
"alternatives_",
")",
"==",
"1",
":",
"return",
"self",
".",
"alternatives_",
"[",
"0",
"]",
"if",
"debug",
":",
"print",
"\"Property set for selection:\"",
",",
"property_set_",
"for",
"v",
"in",
"self",
".",
"alternatives_",
":",
"properties",
"=",
"v",
".",
"match",
"(",
"property_set_",
",",
"debug",
")",
"if",
"properties",
"is",
"not",
"None",
":",
"if",
"not",
"best",
":",
"best",
"=",
"v",
"best_properties",
"=",
"properties",
"else",
":",
"if",
"b2",
".",
"util",
".",
"set",
".",
"equal",
"(",
"properties",
",",
"best_properties",
")",
":",
"return",
"None",
"elif",
"b2",
".",
"util",
".",
"set",
".",
"contains",
"(",
"properties",
",",
"best_properties",
")",
":",
"# Do nothing, this alternative is worse",
"pass",
"elif",
"b2",
".",
"util",
".",
"set",
".",
"contains",
"(",
"best_properties",
",",
"properties",
")",
":",
"best",
"=",
"v",
"best_properties",
"=",
"properties",
"else",
":",
"return",
"None",
"return",
"best"
] | Returns the best viable alternative for this property_set
See the documentation for selection rules.
# TODO: shouldn't this be 'alternative' (singular)? | [
"Returns",
"the",
"best",
"viable",
"alternative",
"for",
"this",
"property_set",
"See",
"the",
"documentation",
"for",
"selection",
"rules",
".",
"#",
"TODO",
":",
"shouldn",
"t",
"this",
"be",
"alternative",
"(",
"singular",
")",
"?"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L693-L746 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | MainTarget.generate | def generate (self, ps):
""" Select an alternative for this main target, by finding all alternatives
which requirements are satisfied by 'properties' and picking the one with
longest requirements set.
Returns the result of calling 'generate' on that alternative.
"""
assert isinstance(ps, property_set.PropertySet)
self.manager_.targets ().start_building (self)
# We want composite properties in build request act as if
# all the properties it expands too are explicitly specified.
ps = ps.expand ()
all_property_sets = self.apply_default_build (ps)
result = GenerateResult ()
for p in all_property_sets:
result.extend (self.__generate_really (p))
self.manager_.targets ().end_building (self)
return result | python | def generate (self, ps):
""" Select an alternative for this main target, by finding all alternatives
which requirements are satisfied by 'properties' and picking the one with
longest requirements set.
Returns the result of calling 'generate' on that alternative.
"""
assert isinstance(ps, property_set.PropertySet)
self.manager_.targets ().start_building (self)
# We want composite properties in build request act as if
# all the properties it expands too are explicitly specified.
ps = ps.expand ()
all_property_sets = self.apply_default_build (ps)
result = GenerateResult ()
for p in all_property_sets:
result.extend (self.__generate_really (p))
self.manager_.targets ().end_building (self)
return result | [
"def",
"generate",
"(",
"self",
",",
"ps",
")",
":",
"assert",
"isinstance",
"(",
"ps",
",",
"property_set",
".",
"PropertySet",
")",
"self",
".",
"manager_",
".",
"targets",
"(",
")",
".",
"start_building",
"(",
"self",
")",
"# We want composite properties in build request act as if",
"# all the properties it expands too are explicitly specified.",
"ps",
"=",
"ps",
".",
"expand",
"(",
")",
"all_property_sets",
"=",
"self",
".",
"apply_default_build",
"(",
"ps",
")",
"result",
"=",
"GenerateResult",
"(",
")",
"for",
"p",
"in",
"all_property_sets",
":",
"result",
".",
"extend",
"(",
"self",
".",
"__generate_really",
"(",
"p",
")",
")",
"self",
".",
"manager_",
".",
"targets",
"(",
")",
".",
"end_building",
"(",
"self",
")",
"return",
"result"
] | Select an alternative for this main target, by finding all alternatives
which requirements are satisfied by 'properties' and picking the one with
longest requirements set.
Returns the result of calling 'generate' on that alternative. | [
"Select",
"an",
"alternative",
"for",
"this",
"main",
"target",
"by",
"finding",
"all",
"alternatives",
"which",
"requirements",
"are",
"satisfied",
"by",
"properties",
"and",
"picking",
"the",
"one",
"with",
"longest",
"requirements",
"set",
".",
"Returns",
"the",
"result",
"of",
"calling",
"generate",
"on",
"that",
"alternative",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L752-L774 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | MainTarget.__generate_really | def __generate_really (self, prop_set):
""" Generates the main target with the given property set
and returns a list which first element is property_set object
containing usage_requirements of generated target and with
generated virtual target in other elements. It's possible
that no targets are generated.
"""
assert isinstance(prop_set, property_set.PropertySet)
best_alternative = self.__select_alternatives (prop_set, debug=0)
self.best_alternative = best_alternative
if not best_alternative:
# FIXME: revive.
# self.__select_alternatives(prop_set, debug=1)
self.manager_.errors()(
"No best alternative for '%s'.\n"
% (self.full_name(),))
result = best_alternative.generate (prop_set)
# Now return virtual targets for the only alternative
return result | python | def __generate_really (self, prop_set):
""" Generates the main target with the given property set
and returns a list which first element is property_set object
containing usage_requirements of generated target and with
generated virtual target in other elements. It's possible
that no targets are generated.
"""
assert isinstance(prop_set, property_set.PropertySet)
best_alternative = self.__select_alternatives (prop_set, debug=0)
self.best_alternative = best_alternative
if not best_alternative:
# FIXME: revive.
# self.__select_alternatives(prop_set, debug=1)
self.manager_.errors()(
"No best alternative for '%s'.\n"
% (self.full_name(),))
result = best_alternative.generate (prop_set)
# Now return virtual targets for the only alternative
return result | [
"def",
"__generate_really",
"(",
"self",
",",
"prop_set",
")",
":",
"assert",
"isinstance",
"(",
"prop_set",
",",
"property_set",
".",
"PropertySet",
")",
"best_alternative",
"=",
"self",
".",
"__select_alternatives",
"(",
"prop_set",
",",
"debug",
"=",
"0",
")",
"self",
".",
"best_alternative",
"=",
"best_alternative",
"if",
"not",
"best_alternative",
":",
"# FIXME: revive.",
"# self.__select_alternatives(prop_set, debug=1)",
"self",
".",
"manager_",
".",
"errors",
"(",
")",
"(",
"\"No best alternative for '%s'.\\n\"",
"%",
"(",
"self",
".",
"full_name",
"(",
")",
",",
")",
")",
"result",
"=",
"best_alternative",
".",
"generate",
"(",
"prop_set",
")",
"# Now return virtual targets for the only alternative",
"return",
"result"
] | Generates the main target with the given property set
and returns a list which first element is property_set object
containing usage_requirements of generated target and with
generated virtual target in other elements. It's possible
that no targets are generated. | [
"Generates",
"the",
"main",
"target",
"with",
"the",
"given",
"property",
"set",
"and",
"returns",
"a",
"list",
"which",
"first",
"element",
"is",
"property_set",
"object",
"containing",
"usage_requirements",
"of",
"generated",
"target",
"and",
"with",
"generated",
"virtual",
"target",
"in",
"other",
"elements",
".",
"It",
"s",
"possible",
"that",
"no",
"targets",
"are",
"generated",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L776-L797 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | BasicTarget.sources | def sources (self):
""" Returns the list of AbstractTargets which are used as sources.
The extra properties specified for sources are not represented.
The only used of this rule at the moment is the '--dump-tests'
feature of the test system.
"""
if self.source_targets_ == None:
self.source_targets_ = []
for s in self.sources_:
self.source_targets_.append(resolve_reference(s, self.project_)[0])
return self.source_targets_ | python | def sources (self):
""" Returns the list of AbstractTargets which are used as sources.
The extra properties specified for sources are not represented.
The only used of this rule at the moment is the '--dump-tests'
feature of the test system.
"""
if self.source_targets_ == None:
self.source_targets_ = []
for s in self.sources_:
self.source_targets_.append(resolve_reference(s, self.project_)[0])
return self.source_targets_ | [
"def",
"sources",
"(",
"self",
")",
":",
"if",
"self",
".",
"source_targets_",
"==",
"None",
":",
"self",
".",
"source_targets_",
"=",
"[",
"]",
"for",
"s",
"in",
"self",
".",
"sources_",
":",
"self",
".",
"source_targets_",
".",
"append",
"(",
"resolve_reference",
"(",
"s",
",",
"self",
".",
"project_",
")",
"[",
"0",
"]",
")",
"return",
"self",
".",
"source_targets_"
] | Returns the list of AbstractTargets which are used as sources.
The extra properties specified for sources are not represented.
The only used of this rule at the moment is the '--dump-tests'
feature of the test system. | [
"Returns",
"the",
"list",
"of",
"AbstractTargets",
"which",
"are",
"used",
"as",
"sources",
".",
"The",
"extra",
"properties",
"specified",
"for",
"sources",
"are",
"not",
"represented",
".",
"The",
"only",
"used",
"of",
"this",
"rule",
"at",
"the",
"moment",
"is",
"the",
"--",
"dump",
"-",
"tests",
"feature",
"of",
"the",
"test",
"system",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L939-L950 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | BasicTarget.common_properties | def common_properties (self, build_request, requirements):
""" Given build request and requirements, return properties
common to dependency build request and target build
properties.
"""
# For optimization, we add free unconditional requirements directly,
# without using complex algorithsm.
# This gives the complex algorithm better chance of caching results.
# The exact effect of this "optimization" is no longer clear
assert isinstance(build_request, property_set.PropertySet)
assert isinstance(requirements, property_set.PropertySet)
free_unconditional = []
other = []
for p in requirements.all():
if p.feature.free and not p.condition and p.feature.name != 'conditional':
free_unconditional.append(p)
else:
other.append(p)
other = property_set.create(other)
key = (build_request, other)
if key not in self.request_cache:
self.request_cache[key] = self.__common_properties2 (build_request, other)
return self.request_cache[key].add_raw(free_unconditional) | python | def common_properties (self, build_request, requirements):
""" Given build request and requirements, return properties
common to dependency build request and target build
properties.
"""
# For optimization, we add free unconditional requirements directly,
# without using complex algorithsm.
# This gives the complex algorithm better chance of caching results.
# The exact effect of this "optimization" is no longer clear
assert isinstance(build_request, property_set.PropertySet)
assert isinstance(requirements, property_set.PropertySet)
free_unconditional = []
other = []
for p in requirements.all():
if p.feature.free and not p.condition and p.feature.name != 'conditional':
free_unconditional.append(p)
else:
other.append(p)
other = property_set.create(other)
key = (build_request, other)
if key not in self.request_cache:
self.request_cache[key] = self.__common_properties2 (build_request, other)
return self.request_cache[key].add_raw(free_unconditional) | [
"def",
"common_properties",
"(",
"self",
",",
"build_request",
",",
"requirements",
")",
":",
"# For optimization, we add free unconditional requirements directly,",
"# without using complex algorithsm.",
"# This gives the complex algorithm better chance of caching results.",
"# The exact effect of this \"optimization\" is no longer clear",
"assert",
"isinstance",
"(",
"build_request",
",",
"property_set",
".",
"PropertySet",
")",
"assert",
"isinstance",
"(",
"requirements",
",",
"property_set",
".",
"PropertySet",
")",
"free_unconditional",
"=",
"[",
"]",
"other",
"=",
"[",
"]",
"for",
"p",
"in",
"requirements",
".",
"all",
"(",
")",
":",
"if",
"p",
".",
"feature",
".",
"free",
"and",
"not",
"p",
".",
"condition",
"and",
"p",
".",
"feature",
".",
"name",
"!=",
"'conditional'",
":",
"free_unconditional",
".",
"append",
"(",
"p",
")",
"else",
":",
"other",
".",
"append",
"(",
"p",
")",
"other",
"=",
"property_set",
".",
"create",
"(",
"other",
")",
"key",
"=",
"(",
"build_request",
",",
"other",
")",
"if",
"key",
"not",
"in",
"self",
".",
"request_cache",
":",
"self",
".",
"request_cache",
"[",
"key",
"]",
"=",
"self",
".",
"__common_properties2",
"(",
"build_request",
",",
"other",
")",
"return",
"self",
".",
"request_cache",
"[",
"key",
"]",
".",
"add_raw",
"(",
"free_unconditional",
")"
] | Given build request and requirements, return properties
common to dependency build request and target build
properties. | [
"Given",
"build",
"request",
"and",
"requirements",
"return",
"properties",
"common",
"to",
"dependency",
"build",
"request",
"and",
"target",
"build",
"properties",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L958-L982 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | BasicTarget.match | def match (self, property_set_, debug):
""" Returns the alternative condition for this alternative, if
the condition is satisfied by 'property_set'.
"""
# The condition is composed of all base non-conditional properties.
# It's not clear if we should expand 'self.requirements_' or not.
# For one thing, it would be nice to be able to put
# <toolset>msvc-6.0
# in requirements.
# On the other hand, if we have <variant>release in condition it
# does not make sense to require <optimization>full to be in
# build request just to select this variant.
assert isinstance(property_set_, property_set.PropertySet)
bcondition = self.requirements_.base ()
ccondition = self.requirements_.conditional ()
condition = b2.util.set.difference (bcondition, ccondition)
if debug:
print " next alternative: required properties:", [str(p) for p in condition]
if b2.util.set.contains (condition, property_set_.all()):
if debug:
print " matched"
return condition
else:
return None | python | def match (self, property_set_, debug):
""" Returns the alternative condition for this alternative, if
the condition is satisfied by 'property_set'.
"""
# The condition is composed of all base non-conditional properties.
# It's not clear if we should expand 'self.requirements_' or not.
# For one thing, it would be nice to be able to put
# <toolset>msvc-6.0
# in requirements.
# On the other hand, if we have <variant>release in condition it
# does not make sense to require <optimization>full to be in
# build request just to select this variant.
assert isinstance(property_set_, property_set.PropertySet)
bcondition = self.requirements_.base ()
ccondition = self.requirements_.conditional ()
condition = b2.util.set.difference (bcondition, ccondition)
if debug:
print " next alternative: required properties:", [str(p) for p in condition]
if b2.util.set.contains (condition, property_set_.all()):
if debug:
print " matched"
return condition
else:
return None | [
"def",
"match",
"(",
"self",
",",
"property_set_",
",",
"debug",
")",
":",
"# The condition is composed of all base non-conditional properties.",
"# It's not clear if we should expand 'self.requirements_' or not.",
"# For one thing, it would be nice to be able to put",
"# <toolset>msvc-6.0",
"# in requirements.",
"# On the other hand, if we have <variant>release in condition it",
"# does not make sense to require <optimization>full to be in",
"# build request just to select this variant.",
"assert",
"isinstance",
"(",
"property_set_",
",",
"property_set",
".",
"PropertySet",
")",
"bcondition",
"=",
"self",
".",
"requirements_",
".",
"base",
"(",
")",
"ccondition",
"=",
"self",
".",
"requirements_",
".",
"conditional",
"(",
")",
"condition",
"=",
"b2",
".",
"util",
".",
"set",
".",
"difference",
"(",
"bcondition",
",",
"ccondition",
")",
"if",
"debug",
":",
"print",
"\" next alternative: required properties:\"",
",",
"[",
"str",
"(",
"p",
")",
"for",
"p",
"in",
"condition",
"]",
"if",
"b2",
".",
"util",
".",
"set",
".",
"contains",
"(",
"condition",
",",
"property_set_",
".",
"all",
"(",
")",
")",
":",
"if",
"debug",
":",
"print",
"\" matched\"",
"return",
"condition",
"else",
":",
"return",
"None"
] | Returns the alternative condition for this alternative, if
the condition is satisfied by 'property_set'. | [
"Returns",
"the",
"alternative",
"condition",
"for",
"this",
"alternative",
"if",
"the",
"condition",
"is",
"satisfied",
"by",
"property_set",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L1103-L1131 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | BasicTarget.generate_dependency_properties | def generate_dependency_properties(self, properties, ps):
""" Takes a target reference, which might be either target id
or a dependency property, and generates that target using
'property_set' as build request.
Returns a tuple (result, usage_requirements).
"""
assert is_iterable_typed(properties, property.Property)
assert isinstance(ps, property_set.PropertySet)
result_properties = []
usage_requirements = []
for p in properties:
result = generate_from_reference(p.value, self.project_, ps)
for t in result.targets():
result_properties.append(property.Property(p.feature, t))
usage_requirements += result.usage_requirements().all()
return (result_properties, usage_requirements) | python | def generate_dependency_properties(self, properties, ps):
""" Takes a target reference, which might be either target id
or a dependency property, and generates that target using
'property_set' as build request.
Returns a tuple (result, usage_requirements).
"""
assert is_iterable_typed(properties, property.Property)
assert isinstance(ps, property_set.PropertySet)
result_properties = []
usage_requirements = []
for p in properties:
result = generate_from_reference(p.value, self.project_, ps)
for t in result.targets():
result_properties.append(property.Property(p.feature, t))
usage_requirements += result.usage_requirements().all()
return (result_properties, usage_requirements) | [
"def",
"generate_dependency_properties",
"(",
"self",
",",
"properties",
",",
"ps",
")",
":",
"assert",
"is_iterable_typed",
"(",
"properties",
",",
"property",
".",
"Property",
")",
"assert",
"isinstance",
"(",
"ps",
",",
"property_set",
".",
"PropertySet",
")",
"result_properties",
"=",
"[",
"]",
"usage_requirements",
"=",
"[",
"]",
"for",
"p",
"in",
"properties",
":",
"result",
"=",
"generate_from_reference",
"(",
"p",
".",
"value",
",",
"self",
".",
"project_",
",",
"ps",
")",
"for",
"t",
"in",
"result",
".",
"targets",
"(",
")",
":",
"result_properties",
".",
"append",
"(",
"property",
".",
"Property",
"(",
"p",
".",
"feature",
",",
"t",
")",
")",
"usage_requirements",
"+=",
"result",
".",
"usage_requirements",
"(",
")",
".",
"all",
"(",
")",
"return",
"(",
"result_properties",
",",
"usage_requirements",
")"
] | Takes a target reference, which might be either target id
or a dependency property, and generates that target using
'property_set' as build request.
Returns a tuple (result, usage_requirements). | [
"Takes",
"a",
"target",
"reference",
"which",
"might",
"be",
"either",
"target",
"id",
"or",
"a",
"dependency",
"property",
"and",
"generates",
"that",
"target",
"using",
"property_set",
"as",
"build",
"request",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L1147-L1167 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | BasicTarget.generate | def generate (self, ps):
""" Determines final build properties, generates sources,
and calls 'construct'. This method should not be
overridden.
"""
assert isinstance(ps, property_set.PropertySet)
self.manager_.errors().push_user_context(
"Generating target " + self.full_name(), self.user_context_)
if self.manager().targets().logging():
self.manager().targets().log(
"Building target '%s'" % self.name_)
self.manager().targets().increase_indent ()
self.manager().targets().log(
"Build request: '%s'" % str (ps.raw ()))
cf = self.manager().command_line_free_features()
self.manager().targets().log(
"Command line free features: '%s'" % str (cf.raw ()))
self.manager().targets().log(
"Target requirements: %s'" % str (self.requirements().raw ()))
self.manager().targets().push_target(self)
if ps not in self.generated_:
# Apply free features form the command line. If user
# said
# define=FOO
# he most likely want this define to be set for all compiles.
ps = ps.refine(self.manager().command_line_free_features())
rproperties = self.common_properties (ps, self.requirements_)
self.manager().targets().log(
"Common properties are '%s'" % str (rproperties))
if rproperties.get("<build>") != ["no"]:
result = GenerateResult ()
properties = rproperties.non_dependency ()
(p, u) = self.generate_dependency_properties (rproperties.dependency (), rproperties)
properties += p
assert all(isinstance(p, property.Property) for p in properties)
usage_requirements = u
(source_targets, u) = self.generate_dependency_targets (self.sources_, rproperties)
usage_requirements += u
self.manager_.targets().log(
"Usage requirements for '%s' are '%s'" % (self.name_, usage_requirements))
# FIXME:
rproperties = property_set.create(properties + usage_requirements)
usage_requirements = property_set.create (usage_requirements)
self.manager_.targets().log(
"Build properties: '%s'" % str(rproperties))
source_targets += rproperties.get('<source>')
# We might get duplicate sources, for example if
# we link to two library which have the same <library> in
# usage requirements.
# Use stable sort, since for some targets the order is
# important. E.g. RUN_PY target need python source to come
# first.
source_targets = unique(source_targets, stable=True)
# FIXME: figure why this call messes up source_targets in-place
result = self.construct (self.name_, source_targets[:], rproperties)
if result:
assert len(result) == 2
gur = result [0]
result = result [1]
if self.always_:
for t in result:
t.always()
s = self.create_subvariant (
result,
self.manager().virtual_targets().recent_targets(), ps,
source_targets, rproperties, usage_requirements)
self.manager().virtual_targets().clear_recent_targets()
ur = self.compute_usage_requirements (s)
ur = ur.add (gur)
s.set_usage_requirements (ur)
self.manager_.targets().log (
"Usage requirements from '%s' are '%s'" %
(self.name(), str(rproperties)))
self.generated_[ps] = GenerateResult (ur, result)
else:
self.generated_[ps] = GenerateResult (property_set.empty(), [])
else:
# If we just see <build>no, we cannot produce any reasonable
# diagnostics. The code that adds this property is expected
# to explain why a target is not built, for example using
# the configure.log-component-configuration function.
# If this target fails to build, add <build>no to properties
# to cause any parent target to fail to build. Except that it
# - does not work now, since we check for <build>no only in
# common properties, but not in properties that came from
# dependencies
# - it's not clear if that's a good idea anyway. The alias
# target, for example, should not fail to build if a dependency
# fails.
self.generated_[ps] = GenerateResult(
property_set.create(["<build>no"]), [])
else:
self.manager().targets().log ("Already built")
self.manager().targets().pop_target()
self.manager().targets().decrease_indent()
return self.generated_[ps] | python | def generate (self, ps):
""" Determines final build properties, generates sources,
and calls 'construct'. This method should not be
overridden.
"""
assert isinstance(ps, property_set.PropertySet)
self.manager_.errors().push_user_context(
"Generating target " + self.full_name(), self.user_context_)
if self.manager().targets().logging():
self.manager().targets().log(
"Building target '%s'" % self.name_)
self.manager().targets().increase_indent ()
self.manager().targets().log(
"Build request: '%s'" % str (ps.raw ()))
cf = self.manager().command_line_free_features()
self.manager().targets().log(
"Command line free features: '%s'" % str (cf.raw ()))
self.manager().targets().log(
"Target requirements: %s'" % str (self.requirements().raw ()))
self.manager().targets().push_target(self)
if ps not in self.generated_:
# Apply free features form the command line. If user
# said
# define=FOO
# he most likely want this define to be set for all compiles.
ps = ps.refine(self.manager().command_line_free_features())
rproperties = self.common_properties (ps, self.requirements_)
self.manager().targets().log(
"Common properties are '%s'" % str (rproperties))
if rproperties.get("<build>") != ["no"]:
result = GenerateResult ()
properties = rproperties.non_dependency ()
(p, u) = self.generate_dependency_properties (rproperties.dependency (), rproperties)
properties += p
assert all(isinstance(p, property.Property) for p in properties)
usage_requirements = u
(source_targets, u) = self.generate_dependency_targets (self.sources_, rproperties)
usage_requirements += u
self.manager_.targets().log(
"Usage requirements for '%s' are '%s'" % (self.name_, usage_requirements))
# FIXME:
rproperties = property_set.create(properties + usage_requirements)
usage_requirements = property_set.create (usage_requirements)
self.manager_.targets().log(
"Build properties: '%s'" % str(rproperties))
source_targets += rproperties.get('<source>')
# We might get duplicate sources, for example if
# we link to two library which have the same <library> in
# usage requirements.
# Use stable sort, since for some targets the order is
# important. E.g. RUN_PY target need python source to come
# first.
source_targets = unique(source_targets, stable=True)
# FIXME: figure why this call messes up source_targets in-place
result = self.construct (self.name_, source_targets[:], rproperties)
if result:
assert len(result) == 2
gur = result [0]
result = result [1]
if self.always_:
for t in result:
t.always()
s = self.create_subvariant (
result,
self.manager().virtual_targets().recent_targets(), ps,
source_targets, rproperties, usage_requirements)
self.manager().virtual_targets().clear_recent_targets()
ur = self.compute_usage_requirements (s)
ur = ur.add (gur)
s.set_usage_requirements (ur)
self.manager_.targets().log (
"Usage requirements from '%s' are '%s'" %
(self.name(), str(rproperties)))
self.generated_[ps] = GenerateResult (ur, result)
else:
self.generated_[ps] = GenerateResult (property_set.empty(), [])
else:
# If we just see <build>no, we cannot produce any reasonable
# diagnostics. The code that adds this property is expected
# to explain why a target is not built, for example using
# the configure.log-component-configuration function.
# If this target fails to build, add <build>no to properties
# to cause any parent target to fail to build. Except that it
# - does not work now, since we check for <build>no only in
# common properties, but not in properties that came from
# dependencies
# - it's not clear if that's a good idea anyway. The alias
# target, for example, should not fail to build if a dependency
# fails.
self.generated_[ps] = GenerateResult(
property_set.create(["<build>no"]), [])
else:
self.manager().targets().log ("Already built")
self.manager().targets().pop_target()
self.manager().targets().decrease_indent()
return self.generated_[ps] | [
"def",
"generate",
"(",
"self",
",",
"ps",
")",
":",
"assert",
"isinstance",
"(",
"ps",
",",
"property_set",
".",
"PropertySet",
")",
"self",
".",
"manager_",
".",
"errors",
"(",
")",
".",
"push_user_context",
"(",
"\"Generating target \"",
"+",
"self",
".",
"full_name",
"(",
")",
",",
"self",
".",
"user_context_",
")",
"if",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"logging",
"(",
")",
":",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"log",
"(",
"\"Building target '%s'\"",
"%",
"self",
".",
"name_",
")",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"increase_indent",
"(",
")",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"log",
"(",
"\"Build request: '%s'\"",
"%",
"str",
"(",
"ps",
".",
"raw",
"(",
")",
")",
")",
"cf",
"=",
"self",
".",
"manager",
"(",
")",
".",
"command_line_free_features",
"(",
")",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"log",
"(",
"\"Command line free features: '%s'\"",
"%",
"str",
"(",
"cf",
".",
"raw",
"(",
")",
")",
")",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"log",
"(",
"\"Target requirements: %s'\"",
"%",
"str",
"(",
"self",
".",
"requirements",
"(",
")",
".",
"raw",
"(",
")",
")",
")",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"push_target",
"(",
"self",
")",
"if",
"ps",
"not",
"in",
"self",
".",
"generated_",
":",
"# Apply free features form the command line. If user",
"# said",
"# define=FOO",
"# he most likely want this define to be set for all compiles.",
"ps",
"=",
"ps",
".",
"refine",
"(",
"self",
".",
"manager",
"(",
")",
".",
"command_line_free_features",
"(",
")",
")",
"rproperties",
"=",
"self",
".",
"common_properties",
"(",
"ps",
",",
"self",
".",
"requirements_",
")",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"log",
"(",
"\"Common properties are '%s'\"",
"%",
"str",
"(",
"rproperties",
")",
")",
"if",
"rproperties",
".",
"get",
"(",
"\"<build>\"",
")",
"!=",
"[",
"\"no\"",
"]",
":",
"result",
"=",
"GenerateResult",
"(",
")",
"properties",
"=",
"rproperties",
".",
"non_dependency",
"(",
")",
"(",
"p",
",",
"u",
")",
"=",
"self",
".",
"generate_dependency_properties",
"(",
"rproperties",
".",
"dependency",
"(",
")",
",",
"rproperties",
")",
"properties",
"+=",
"p",
"assert",
"all",
"(",
"isinstance",
"(",
"p",
",",
"property",
".",
"Property",
")",
"for",
"p",
"in",
"properties",
")",
"usage_requirements",
"=",
"u",
"(",
"source_targets",
",",
"u",
")",
"=",
"self",
".",
"generate_dependency_targets",
"(",
"self",
".",
"sources_",
",",
"rproperties",
")",
"usage_requirements",
"+=",
"u",
"self",
".",
"manager_",
".",
"targets",
"(",
")",
".",
"log",
"(",
"\"Usage requirements for '%s' are '%s'\"",
"%",
"(",
"self",
".",
"name_",
",",
"usage_requirements",
")",
")",
"# FIXME:",
"rproperties",
"=",
"property_set",
".",
"create",
"(",
"properties",
"+",
"usage_requirements",
")",
"usage_requirements",
"=",
"property_set",
".",
"create",
"(",
"usage_requirements",
")",
"self",
".",
"manager_",
".",
"targets",
"(",
")",
".",
"log",
"(",
"\"Build properties: '%s'\"",
"%",
"str",
"(",
"rproperties",
")",
")",
"source_targets",
"+=",
"rproperties",
".",
"get",
"(",
"'<source>'",
")",
"# We might get duplicate sources, for example if",
"# we link to two library which have the same <library> in",
"# usage requirements.",
"# Use stable sort, since for some targets the order is",
"# important. E.g. RUN_PY target need python source to come",
"# first.",
"source_targets",
"=",
"unique",
"(",
"source_targets",
",",
"stable",
"=",
"True",
")",
"# FIXME: figure why this call messes up source_targets in-place",
"result",
"=",
"self",
".",
"construct",
"(",
"self",
".",
"name_",
",",
"source_targets",
"[",
":",
"]",
",",
"rproperties",
")",
"if",
"result",
":",
"assert",
"len",
"(",
"result",
")",
"==",
"2",
"gur",
"=",
"result",
"[",
"0",
"]",
"result",
"=",
"result",
"[",
"1",
"]",
"if",
"self",
".",
"always_",
":",
"for",
"t",
"in",
"result",
":",
"t",
".",
"always",
"(",
")",
"s",
"=",
"self",
".",
"create_subvariant",
"(",
"result",
",",
"self",
".",
"manager",
"(",
")",
".",
"virtual_targets",
"(",
")",
".",
"recent_targets",
"(",
")",
",",
"ps",
",",
"source_targets",
",",
"rproperties",
",",
"usage_requirements",
")",
"self",
".",
"manager",
"(",
")",
".",
"virtual_targets",
"(",
")",
".",
"clear_recent_targets",
"(",
")",
"ur",
"=",
"self",
".",
"compute_usage_requirements",
"(",
"s",
")",
"ur",
"=",
"ur",
".",
"add",
"(",
"gur",
")",
"s",
".",
"set_usage_requirements",
"(",
"ur",
")",
"self",
".",
"manager_",
".",
"targets",
"(",
")",
".",
"log",
"(",
"\"Usage requirements from '%s' are '%s'\"",
"%",
"(",
"self",
".",
"name",
"(",
")",
",",
"str",
"(",
"rproperties",
")",
")",
")",
"self",
".",
"generated_",
"[",
"ps",
"]",
"=",
"GenerateResult",
"(",
"ur",
",",
"result",
")",
"else",
":",
"self",
".",
"generated_",
"[",
"ps",
"]",
"=",
"GenerateResult",
"(",
"property_set",
".",
"empty",
"(",
")",
",",
"[",
"]",
")",
"else",
":",
"# If we just see <build>no, we cannot produce any reasonable",
"# diagnostics. The code that adds this property is expected",
"# to explain why a target is not built, for example using",
"# the configure.log-component-configuration function.",
"# If this target fails to build, add <build>no to properties",
"# to cause any parent target to fail to build. Except that it",
"# - does not work now, since we check for <build>no only in",
"# common properties, but not in properties that came from",
"# dependencies",
"# - it's not clear if that's a good idea anyway. The alias",
"# target, for example, should not fail to build if a dependency",
"# fails.",
"self",
".",
"generated_",
"[",
"ps",
"]",
"=",
"GenerateResult",
"(",
"property_set",
".",
"create",
"(",
"[",
"\"<build>no\"",
"]",
")",
",",
"[",
"]",
")",
"else",
":",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"log",
"(",
"\"Already built\"",
")",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"pop_target",
"(",
")",
"self",
".",
"manager",
"(",
")",
".",
"targets",
"(",
")",
".",
"decrease_indent",
"(",
")",
"return",
"self",
".",
"generated_",
"[",
"ps",
"]"
] | Determines final build properties, generates sources,
and calls 'construct'. This method should not be
overridden. | [
"Determines",
"final",
"build",
"properties",
"generates",
"sources",
"and",
"calls",
"construct",
".",
"This",
"method",
"should",
"not",
"be",
"overridden",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L1173-L1294 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | BasicTarget.compute_usage_requirements | def compute_usage_requirements (self, subvariant):
""" Given the set of generated targets, and refined build
properties, determines and sets appripriate usage requirements
on those targets.
"""
assert isinstance(subvariant, virtual_target.Subvariant)
rproperties = subvariant.build_properties ()
xusage_requirements =self.evaluate_requirements(
self.usage_requirements_, rproperties, "added")
# We generate all dependency properties and add them,
# as well as their usage requirements, to result.
(r1, r2) = self.generate_dependency_properties(xusage_requirements.dependency (), rproperties)
extra = r1 + r2
result = property_set.create (xusage_requirements.non_dependency () + extra)
# Propagate usage requirements we've got from sources, except
# for the <pch-header> and <pch-file> features.
#
# That feature specifies which pch file to use, and should apply
# only to direct dependents. Consider:
#
# pch pch1 : ...
# lib lib1 : ..... pch1 ;
# pch pch2 :
# lib lib2 : pch2 lib1 ;
#
# Here, lib2 should not get <pch-header> property from pch1.
#
# Essentially, when those two features are in usage requirements,
# they are propagated only to direct dependents. We might need
# a more general mechanism, but for now, only those two
# features are special.
properties = []
for p in subvariant.sources_usage_requirements().all():
if p.feature.name not in ('pch-header', 'pch-file'):
properties.append(p)
if 'shared' in rproperties.get('link'):
new_properties = []
for p in properties:
if p.feature.name != 'library':
new_properties.append(p)
properties = new_properties
result = result.add_raw(properties)
return result | python | def compute_usage_requirements (self, subvariant):
""" Given the set of generated targets, and refined build
properties, determines and sets appripriate usage requirements
on those targets.
"""
assert isinstance(subvariant, virtual_target.Subvariant)
rproperties = subvariant.build_properties ()
xusage_requirements =self.evaluate_requirements(
self.usage_requirements_, rproperties, "added")
# We generate all dependency properties and add them,
# as well as their usage requirements, to result.
(r1, r2) = self.generate_dependency_properties(xusage_requirements.dependency (), rproperties)
extra = r1 + r2
result = property_set.create (xusage_requirements.non_dependency () + extra)
# Propagate usage requirements we've got from sources, except
# for the <pch-header> and <pch-file> features.
#
# That feature specifies which pch file to use, and should apply
# only to direct dependents. Consider:
#
# pch pch1 : ...
# lib lib1 : ..... pch1 ;
# pch pch2 :
# lib lib2 : pch2 lib1 ;
#
# Here, lib2 should not get <pch-header> property from pch1.
#
# Essentially, when those two features are in usage requirements,
# they are propagated only to direct dependents. We might need
# a more general mechanism, but for now, only those two
# features are special.
properties = []
for p in subvariant.sources_usage_requirements().all():
if p.feature.name not in ('pch-header', 'pch-file'):
properties.append(p)
if 'shared' in rproperties.get('link'):
new_properties = []
for p in properties:
if p.feature.name != 'library':
new_properties.append(p)
properties = new_properties
result = result.add_raw(properties)
return result | [
"def",
"compute_usage_requirements",
"(",
"self",
",",
"subvariant",
")",
":",
"assert",
"isinstance",
"(",
"subvariant",
",",
"virtual_target",
".",
"Subvariant",
")",
"rproperties",
"=",
"subvariant",
".",
"build_properties",
"(",
")",
"xusage_requirements",
"=",
"self",
".",
"evaluate_requirements",
"(",
"self",
".",
"usage_requirements_",
",",
"rproperties",
",",
"\"added\"",
")",
"# We generate all dependency properties and add them,",
"# as well as their usage requirements, to result.",
"(",
"r1",
",",
"r2",
")",
"=",
"self",
".",
"generate_dependency_properties",
"(",
"xusage_requirements",
".",
"dependency",
"(",
")",
",",
"rproperties",
")",
"extra",
"=",
"r1",
"+",
"r2",
"result",
"=",
"property_set",
".",
"create",
"(",
"xusage_requirements",
".",
"non_dependency",
"(",
")",
"+",
"extra",
")",
"# Propagate usage requirements we've got from sources, except",
"# for the <pch-header> and <pch-file> features.",
"#",
"# That feature specifies which pch file to use, and should apply",
"# only to direct dependents. Consider:",
"#",
"# pch pch1 : ...",
"# lib lib1 : ..... pch1 ;",
"# pch pch2 :",
"# lib lib2 : pch2 lib1 ;",
"#",
"# Here, lib2 should not get <pch-header> property from pch1.",
"#",
"# Essentially, when those two features are in usage requirements,",
"# they are propagated only to direct dependents. We might need",
"# a more general mechanism, but for now, only those two",
"# features are special.",
"properties",
"=",
"[",
"]",
"for",
"p",
"in",
"subvariant",
".",
"sources_usage_requirements",
"(",
")",
".",
"all",
"(",
")",
":",
"if",
"p",
".",
"feature",
".",
"name",
"not",
"in",
"(",
"'pch-header'",
",",
"'pch-file'",
")",
":",
"properties",
".",
"append",
"(",
"p",
")",
"if",
"'shared'",
"in",
"rproperties",
".",
"get",
"(",
"'link'",
")",
":",
"new_properties",
"=",
"[",
"]",
"for",
"p",
"in",
"properties",
":",
"if",
"p",
".",
"feature",
".",
"name",
"!=",
"'library'",
":",
"new_properties",
".",
"append",
"(",
"p",
")",
"properties",
"=",
"new_properties",
"result",
"=",
"result",
".",
"add_raw",
"(",
"properties",
")",
"return",
"result"
] | Given the set of generated targets, and refined build
properties, determines and sets appripriate usage requirements
on those targets. | [
"Given",
"the",
"set",
"of",
"generated",
"targets",
"and",
"refined",
"build",
"properties",
"determines",
"and",
"sets",
"appripriate",
"usage",
"requirements",
"on",
"those",
"targets",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L1296-L1342 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/targets.py | BasicTarget.create_subvariant | def create_subvariant (self, root_targets, all_targets,
build_request, sources,
rproperties, usage_requirements):
"""Creates a new subvariant-dg instances for 'targets'
- 'root-targets' the virtual targets will be returned to dependents
- 'all-targets' all virtual
targets created while building this main target
- 'build-request' is property-set instance with
requested build properties"""
assert is_iterable_typed(root_targets, virtual_target.VirtualTarget)
assert is_iterable_typed(all_targets, virtual_target.VirtualTarget)
assert isinstance(build_request, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
assert isinstance(rproperties, property_set.PropertySet)
assert isinstance(usage_requirements, property_set.PropertySet)
for e in root_targets:
e.root (True)
s = Subvariant (self, build_request, sources,
rproperties, usage_requirements, all_targets)
for v in all_targets:
if not v.creating_subvariant():
v.creating_subvariant(s)
return s | python | def create_subvariant (self, root_targets, all_targets,
build_request, sources,
rproperties, usage_requirements):
"""Creates a new subvariant-dg instances for 'targets'
- 'root-targets' the virtual targets will be returned to dependents
- 'all-targets' all virtual
targets created while building this main target
- 'build-request' is property-set instance with
requested build properties"""
assert is_iterable_typed(root_targets, virtual_target.VirtualTarget)
assert is_iterable_typed(all_targets, virtual_target.VirtualTarget)
assert isinstance(build_request, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
assert isinstance(rproperties, property_set.PropertySet)
assert isinstance(usage_requirements, property_set.PropertySet)
for e in root_targets:
e.root (True)
s = Subvariant (self, build_request, sources,
rproperties, usage_requirements, all_targets)
for v in all_targets:
if not v.creating_subvariant():
v.creating_subvariant(s)
return s | [
"def",
"create_subvariant",
"(",
"self",
",",
"root_targets",
",",
"all_targets",
",",
"build_request",
",",
"sources",
",",
"rproperties",
",",
"usage_requirements",
")",
":",
"assert",
"is_iterable_typed",
"(",
"root_targets",
",",
"virtual_target",
".",
"VirtualTarget",
")",
"assert",
"is_iterable_typed",
"(",
"all_targets",
",",
"virtual_target",
".",
"VirtualTarget",
")",
"assert",
"isinstance",
"(",
"build_request",
",",
"property_set",
".",
"PropertySet",
")",
"assert",
"is_iterable_typed",
"(",
"sources",
",",
"virtual_target",
".",
"VirtualTarget",
")",
"assert",
"isinstance",
"(",
"rproperties",
",",
"property_set",
".",
"PropertySet",
")",
"assert",
"isinstance",
"(",
"usage_requirements",
",",
"property_set",
".",
"PropertySet",
")",
"for",
"e",
"in",
"root_targets",
":",
"e",
".",
"root",
"(",
"True",
")",
"s",
"=",
"Subvariant",
"(",
"self",
",",
"build_request",
",",
"sources",
",",
"rproperties",
",",
"usage_requirements",
",",
"all_targets",
")",
"for",
"v",
"in",
"all_targets",
":",
"if",
"not",
"v",
".",
"creating_subvariant",
"(",
")",
":",
"v",
".",
"creating_subvariant",
"(",
"s",
")",
"return",
"s"
] | Creates a new subvariant-dg instances for 'targets'
- 'root-targets' the virtual targets will be returned to dependents
- 'all-targets' all virtual
targets created while building this main target
- 'build-request' is property-set instance with
requested build properties | [
"Creates",
"a",
"new",
"subvariant",
"-",
"dg",
"instances",
"for",
"targets",
"-",
"root",
"-",
"targets",
"the",
"virtual",
"targets",
"will",
"be",
"returned",
"to",
"dependents",
"-",
"all",
"-",
"targets",
"all",
"virtual",
"targets",
"created",
"while",
"building",
"this",
"main",
"target",
"-",
"build",
"-",
"request",
"is",
"property",
"-",
"set",
"instance",
"with",
"requested",
"build",
"properties"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L1344-L1370 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/builtin.py | variant | def variant (name, parents_or_properties, explicit_properties = []):
""" Declares a new variant.
First determines explicit properties for this variant, by
refining parents' explicit properties with the passed explicit
properties. The result is remembered and will be used if
this variant is used as parent.
Second, determines the full property set for this variant by
adding to the explicit properties default values for all properties
which neither present nor are symmetric.
Lastly, makes appropriate value of 'variant' property expand
to the full property set.
name: Name of the variant
parents_or_properties: Specifies parent variants, if
'explicit_properties' are given,
and explicit_properties otherwise.
explicit_properties: Explicit properties.
"""
parents = []
if not explicit_properties:
explicit_properties = parents_or_properties
else:
parents = parents_or_properties
inherited = property_set.empty()
if parents:
# If we allow multiple parents, we'd have to to check for conflicts
# between base variants, and there was no demand for so to bother.
if len (parents) > 1:
raise BaseException ("Multiple base variants are not yet supported")
p = parents[0]
# TODO: the check may be stricter
if not feature.is_implicit_value (p):
raise BaseException ("Invalid base variant '%s'" % p)
inherited = __variant_explicit_properties[p]
explicit_properties = property_set.create_with_validation(explicit_properties)
explicit_properties = inherited.refine(explicit_properties)
# Record explicitly specified properties for this variant
# We do this after inheriting parents' properties, so that
# they affect other variants, derived from this one.
__variant_explicit_properties[name] = explicit_properties
feature.extend('variant', [name])
feature.compose ("<variant>" + name, explicit_properties.all()) | python | def variant (name, parents_or_properties, explicit_properties = []):
""" Declares a new variant.
First determines explicit properties for this variant, by
refining parents' explicit properties with the passed explicit
properties. The result is remembered and will be used if
this variant is used as parent.
Second, determines the full property set for this variant by
adding to the explicit properties default values for all properties
which neither present nor are symmetric.
Lastly, makes appropriate value of 'variant' property expand
to the full property set.
name: Name of the variant
parents_or_properties: Specifies parent variants, if
'explicit_properties' are given,
and explicit_properties otherwise.
explicit_properties: Explicit properties.
"""
parents = []
if not explicit_properties:
explicit_properties = parents_or_properties
else:
parents = parents_or_properties
inherited = property_set.empty()
if parents:
# If we allow multiple parents, we'd have to to check for conflicts
# between base variants, and there was no demand for so to bother.
if len (parents) > 1:
raise BaseException ("Multiple base variants are not yet supported")
p = parents[0]
# TODO: the check may be stricter
if not feature.is_implicit_value (p):
raise BaseException ("Invalid base variant '%s'" % p)
inherited = __variant_explicit_properties[p]
explicit_properties = property_set.create_with_validation(explicit_properties)
explicit_properties = inherited.refine(explicit_properties)
# Record explicitly specified properties for this variant
# We do this after inheriting parents' properties, so that
# they affect other variants, derived from this one.
__variant_explicit_properties[name] = explicit_properties
feature.extend('variant', [name])
feature.compose ("<variant>" + name, explicit_properties.all()) | [
"def",
"variant",
"(",
"name",
",",
"parents_or_properties",
",",
"explicit_properties",
"=",
"[",
"]",
")",
":",
"parents",
"=",
"[",
"]",
"if",
"not",
"explicit_properties",
":",
"explicit_properties",
"=",
"parents_or_properties",
"else",
":",
"parents",
"=",
"parents_or_properties",
"inherited",
"=",
"property_set",
".",
"empty",
"(",
")",
"if",
"parents",
":",
"# If we allow multiple parents, we'd have to to check for conflicts",
"# between base variants, and there was no demand for so to bother.",
"if",
"len",
"(",
"parents",
")",
">",
"1",
":",
"raise",
"BaseException",
"(",
"\"Multiple base variants are not yet supported\"",
")",
"p",
"=",
"parents",
"[",
"0",
"]",
"# TODO: the check may be stricter",
"if",
"not",
"feature",
".",
"is_implicit_value",
"(",
"p",
")",
":",
"raise",
"BaseException",
"(",
"\"Invalid base variant '%s'\"",
"%",
"p",
")",
"inherited",
"=",
"__variant_explicit_properties",
"[",
"p",
"]",
"explicit_properties",
"=",
"property_set",
".",
"create_with_validation",
"(",
"explicit_properties",
")",
"explicit_properties",
"=",
"inherited",
".",
"refine",
"(",
"explicit_properties",
")",
"# Record explicitly specified properties for this variant",
"# We do this after inheriting parents' properties, so that",
"# they affect other variants, derived from this one.",
"__variant_explicit_properties",
"[",
"name",
"]",
"=",
"explicit_properties",
"feature",
".",
"extend",
"(",
"'variant'",
",",
"[",
"name",
"]",
")",
"feature",
".",
"compose",
"(",
"\"<variant>\"",
"+",
"name",
",",
"explicit_properties",
".",
"all",
"(",
")",
")"
] | Declares a new variant.
First determines explicit properties for this variant, by
refining parents' explicit properties with the passed explicit
properties. The result is remembered and will be used if
this variant is used as parent.
Second, determines the full property set for this variant by
adding to the explicit properties default values for all properties
which neither present nor are symmetric.
Lastly, makes appropriate value of 'variant' property expand
to the full property set.
name: Name of the variant
parents_or_properties: Specifies parent variants, if
'explicit_properties' are given,
and explicit_properties otherwise.
explicit_properties: Explicit properties. | [
"Declares",
"a",
"new",
"variant",
".",
"First",
"determines",
"explicit",
"properties",
"for",
"this",
"variant",
"by",
"refining",
"parents",
"explicit",
"properties",
"with",
"the",
"passed",
"explicit",
"properties",
".",
"The",
"result",
"is",
"remembered",
"and",
"will",
"be",
"used",
"if",
"this",
"variant",
"is",
"used",
"as",
"parent",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/builtin.py#L33-L82 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/builtin.py | register_globals | def register_globals ():
""" Registers all features and variants declared by this module.
"""
# This feature is used to determine which OS we're on.
# In future, this may become <target-os> and <host-os>
# TODO: check this. Compatibility with bjam names? Subfeature for version?
os = sys.platform
feature.feature ('os', [os], ['propagated', 'link-incompatible'])
# The two OS features define a known set of abstract OS names. The host-os is
# the OS under which bjam is running. Even though this should really be a fixed
# property we need to list all the values to prevent unknown value errors. Both
# set the default value to the current OS to account for the default use case of
# building on the target OS.
feature.feature('host-os', __os_names)
feature.set_default('host-os', default_host_os())
feature.feature('target-os', __os_names, ['propagated', 'link-incompatible'])
feature.set_default('target-os', default_host_os())
feature.feature ('toolset', [], ['implicit', 'propagated' ,'symmetric'])
feature.feature ('stdlib', ['native'], ['propagated', 'composite'])
feature.feature ('link', ['shared', 'static'], ['propagated'])
feature.feature ('runtime-link', ['shared', 'static'], ['propagated'])
feature.feature ('runtime-debugging', ['on', 'off'], ['propagated'])
feature.feature ('optimization', ['off', 'speed', 'space'], ['propagated'])
feature.feature ('profiling', ['off', 'on'], ['propagated'])
feature.feature ('inlining', ['off', 'on', 'full'], ['propagated'])
feature.feature ('threading', ['single', 'multi'], ['propagated'])
feature.feature ('rtti', ['on', 'off'], ['propagated'])
feature.feature ('exception-handling', ['on', 'off'], ['propagated'])
# Whether there is support for asynchronous EH (e.g. catching SEGVs).
feature.feature ('asynch-exceptions', ['off', 'on'], ['propagated'])
# Whether all extern "C" functions are considered nothrow by default.
feature.feature ('extern-c-nothrow', ['off', 'on'], ['propagated'])
feature.feature ('debug-symbols', ['on', 'off'], ['propagated'])
feature.feature ('define', [], ['free'])
feature.feature ('undef', [], ['free'])
feature.feature ('include', [], ['free', 'path']) #order-sensitive
feature.feature ('cflags', [], ['free'])
feature.feature ('cxxflags', [], ['free'])
feature.feature ('asmflags', [], ['free'])
feature.feature ('linkflags', [], ['free'])
feature.feature ('archiveflags', [], ['free'])
feature.feature ('version', [], ['free'])
feature.feature ('location-prefix', [], ['free'])
feature.feature ('action', [], ['free'])
# The following features are incidental, since
# in themself they have no effect on build products.
# Not making them incidental will result in problems in corner
# cases, for example:
#
# unit-test a : a.cpp : <use>b ;
# lib b : a.cpp b ;
#
# Here, if <use> is not incidental, we'll decide we have two
# targets for a.obj with different properties, and will complain.
#
# Note that making feature incidental does not mean it's ignored. It may
# be ignored when creating the virtual target, but the rest of build process
# will use them.
feature.feature ('use', [], ['free', 'dependency', 'incidental'])
feature.feature ('dependency', [], ['free', 'dependency', 'incidental'])
feature.feature ('implicit-dependency', [], ['free', 'dependency', 'incidental'])
feature.feature('warnings', [
'on', # Enable default/"reasonable" warning level for the tool.
'all', # Enable all possible warnings issued by the tool.
'off'], # Disable all warnings issued by the tool.
['incidental', 'propagated'])
feature.feature('warnings-as-errors', [
'off', # Do not fail the compilation if there are warnings.
'on'], # Fail the compilation if there are warnings.
['incidental', 'propagated'])
feature.feature('c++-template-depth',
[str(i) for i in range(64,1024+1,64)] +
[str(i) for i in range(20,1000+1,10)] +
# Maximum template instantiation depth guaranteed for ANSI/ISO C++
# conforming programs.
['17'],
['incidental', 'optional', 'propagated'])
feature.feature ('source', [], ['free', 'dependency', 'incidental'])
feature.feature ('library', [], ['free', 'dependency', 'incidental'])
feature.feature ('file', [], ['free', 'dependency', 'incidental'])
feature.feature ('find-shared-library', [], ['free']) #order-sensitive ;
feature.feature ('find-static-library', [], ['free']) #order-sensitive ;
feature.feature ('library-path', [], ['free', 'path']) #order-sensitive ;
# Internal feature.
feature.feature ('library-file', [], ['free', 'dependency'])
feature.feature ('name', [], ['free'])
feature.feature ('tag', [], ['free'])
feature.feature ('search', [], ['free', 'path']) #order-sensitive ;
feature.feature ('location', [], ['free', 'path'])
feature.feature ('dll-path', [], ['free', 'path'])
feature.feature ('hardcode-dll-paths', ['true', 'false'], ['incidental'])
# This is internal feature which holds the paths of all dependency
# dynamic libraries. On Windows, it's needed so that we can all
# those paths to PATH, when running applications.
# On Linux, it's needed to add proper -rpath-link command line options.
feature.feature ('xdll-path', [], ['free', 'path'])
#provides means to specify def-file for windows dlls.
feature.feature ('def-file', [], ['free', 'dependency'])
# This feature is used to allow specific generators to run.
# For example, QT tools can only be invoked when QT library
# is used. In that case, <allow>qt will be in usage requirement
# of the library.
feature.feature ('allow', [], ['free'])
# The addressing model to generate code for. Currently a limited set only
# specifying the bit size of pointers.
feature.feature('address-model', ['16', '32', '64'], ['propagated', 'optional'])
# Type of CPU architecture to compile for.
feature.feature('architecture', [
# x86 and x86-64
'x86',
# ia64
'ia64',
# Sparc
'sparc',
# RS/6000 & PowerPC
'power',
# MIPS/SGI
'mips1', 'mips2', 'mips3', 'mips4', 'mips32', 'mips32r2', 'mips64',
# HP/PA-RISC
'parisc',
# Advanced RISC Machines
'arm',
# Combined architectures for platforms/toolsets that support building for
# multiple architectures at once. "combined" would be the default multi-arch
# for the toolset.
'combined',
'combined-x86-power'],
['propagated', 'optional'])
# The specific instruction set in an architecture to compile.
feature.feature('instruction-set', [
# x86 and x86-64
'native', 'i486', 'i586', 'i686', 'pentium', 'pentium-mmx', 'pentiumpro', 'pentium2', 'pentium3',
'pentium3m', 'pentium-m', 'pentium4', 'pentium4m', 'prescott', 'nocona', 'core2', 'corei7', 'corei7-avx', 'core-avx-i',
'conroe', 'conroe-xe', 'conroe-l', 'allendale', 'merom', 'merom-xe', 'kentsfield', 'kentsfield-xe', 'penryn', 'wolfdale',
'yorksfield', 'nehalem', 'sandy-bridge', 'ivy-bridge', 'haswell', 'k6', 'k6-2', 'k6-3', 'athlon', 'athlon-tbird', 'athlon-4', 'athlon-xp',
'athlon-mp', 'k8', 'opteron', 'athlon64', 'athlon-fx', 'k8-sse3', 'opteron-sse3', 'athlon64-sse3', 'amdfam10', 'barcelona',
'bdver1', 'bdver2', 'bdver3', 'btver1', 'btver2', 'winchip-c6', 'winchip2', 'c3', 'c3-2', 'atom',
# ia64
'itanium', 'itanium1', 'merced', 'itanium2', 'mckinley',
# Sparc
'v7', 'cypress', 'v8', 'supersparc', 'sparclite', 'hypersparc', 'sparclite86x', 'f930', 'f934',
'sparclet', 'tsc701', 'v9', 'ultrasparc', 'ultrasparc3',
# RS/6000 & PowerPC
'401', '403', '405', '405fp', '440', '440fp', '505', '601', '602',
'603', '603e', '604', '604e', '620', '630', '740', '7400',
'7450', '750', '801', '821', '823', '860', '970', '8540',
'power-common', 'ec603e', 'g3', 'g4', 'g5', 'power', 'power2',
'power3', 'power4', 'power5', 'powerpc', 'powerpc64', 'rios',
'rios1', 'rsc', 'rios2', 'rs64a',
# MIPS
'4kc', '4kp', '5kc', '20kc', 'm4k', 'r2000', 'r3000', 'r3900', 'r4000',
'r4100', 'r4300', 'r4400', 'r4600', 'r4650',
'r6000', 'r8000', 'rm7000', 'rm9000', 'orion', 'sb1', 'vr4100',
'vr4111', 'vr4120', 'vr4130', 'vr4300',
'vr5000', 'vr5400', 'vr5500',
# HP/PA-RISC
'700', '7100', '7100lc', '7200', '7300', '8000',
# Advanced RISC Machines
'armv2', 'armv2a', 'armv3', 'armv3m', 'armv4', 'armv4t', 'armv5',
'armv5t', 'armv5te', 'armv6', 'armv6j', 'iwmmxt', 'ep9312'],
['propagated', 'optional'])
feature.feature('conditional', [], ['incidental', 'free'])
# The value of 'no' prevents building of a target.
feature.feature('build', ['yes', 'no'], ['optional'])
# Windows-specific features
feature.feature ('user-interface', ['console', 'gui', 'wince', 'native', 'auto'], [])
feature.feature ('variant', [], ['implicit', 'composite', 'propagated', 'symmetric'])
variant ('debug', ['<optimization>off', '<debug-symbols>on', '<inlining>off', '<runtime-debugging>on'])
variant ('release', ['<optimization>speed', '<debug-symbols>off', '<inlining>full',
'<runtime-debugging>off', '<define>NDEBUG'])
variant ('profile', ['release'], ['<profiling>on', '<debug-symbols>on']) | python | def register_globals ():
""" Registers all features and variants declared by this module.
"""
# This feature is used to determine which OS we're on.
# In future, this may become <target-os> and <host-os>
# TODO: check this. Compatibility with bjam names? Subfeature for version?
os = sys.platform
feature.feature ('os', [os], ['propagated', 'link-incompatible'])
# The two OS features define a known set of abstract OS names. The host-os is
# the OS under which bjam is running. Even though this should really be a fixed
# property we need to list all the values to prevent unknown value errors. Both
# set the default value to the current OS to account for the default use case of
# building on the target OS.
feature.feature('host-os', __os_names)
feature.set_default('host-os', default_host_os())
feature.feature('target-os', __os_names, ['propagated', 'link-incompatible'])
feature.set_default('target-os', default_host_os())
feature.feature ('toolset', [], ['implicit', 'propagated' ,'symmetric'])
feature.feature ('stdlib', ['native'], ['propagated', 'composite'])
feature.feature ('link', ['shared', 'static'], ['propagated'])
feature.feature ('runtime-link', ['shared', 'static'], ['propagated'])
feature.feature ('runtime-debugging', ['on', 'off'], ['propagated'])
feature.feature ('optimization', ['off', 'speed', 'space'], ['propagated'])
feature.feature ('profiling', ['off', 'on'], ['propagated'])
feature.feature ('inlining', ['off', 'on', 'full'], ['propagated'])
feature.feature ('threading', ['single', 'multi'], ['propagated'])
feature.feature ('rtti', ['on', 'off'], ['propagated'])
feature.feature ('exception-handling', ['on', 'off'], ['propagated'])
# Whether there is support for asynchronous EH (e.g. catching SEGVs).
feature.feature ('asynch-exceptions', ['off', 'on'], ['propagated'])
# Whether all extern "C" functions are considered nothrow by default.
feature.feature ('extern-c-nothrow', ['off', 'on'], ['propagated'])
feature.feature ('debug-symbols', ['on', 'off'], ['propagated'])
feature.feature ('define', [], ['free'])
feature.feature ('undef', [], ['free'])
feature.feature ('include', [], ['free', 'path']) #order-sensitive
feature.feature ('cflags', [], ['free'])
feature.feature ('cxxflags', [], ['free'])
feature.feature ('asmflags', [], ['free'])
feature.feature ('linkflags', [], ['free'])
feature.feature ('archiveflags', [], ['free'])
feature.feature ('version', [], ['free'])
feature.feature ('location-prefix', [], ['free'])
feature.feature ('action', [], ['free'])
# The following features are incidental, since
# in themself they have no effect on build products.
# Not making them incidental will result in problems in corner
# cases, for example:
#
# unit-test a : a.cpp : <use>b ;
# lib b : a.cpp b ;
#
# Here, if <use> is not incidental, we'll decide we have two
# targets for a.obj with different properties, and will complain.
#
# Note that making feature incidental does not mean it's ignored. It may
# be ignored when creating the virtual target, but the rest of build process
# will use them.
feature.feature ('use', [], ['free', 'dependency', 'incidental'])
feature.feature ('dependency', [], ['free', 'dependency', 'incidental'])
feature.feature ('implicit-dependency', [], ['free', 'dependency', 'incidental'])
feature.feature('warnings', [
'on', # Enable default/"reasonable" warning level for the tool.
'all', # Enable all possible warnings issued by the tool.
'off'], # Disable all warnings issued by the tool.
['incidental', 'propagated'])
feature.feature('warnings-as-errors', [
'off', # Do not fail the compilation if there are warnings.
'on'], # Fail the compilation if there are warnings.
['incidental', 'propagated'])
feature.feature('c++-template-depth',
[str(i) for i in range(64,1024+1,64)] +
[str(i) for i in range(20,1000+1,10)] +
# Maximum template instantiation depth guaranteed for ANSI/ISO C++
# conforming programs.
['17'],
['incidental', 'optional', 'propagated'])
feature.feature ('source', [], ['free', 'dependency', 'incidental'])
feature.feature ('library', [], ['free', 'dependency', 'incidental'])
feature.feature ('file', [], ['free', 'dependency', 'incidental'])
feature.feature ('find-shared-library', [], ['free']) #order-sensitive ;
feature.feature ('find-static-library', [], ['free']) #order-sensitive ;
feature.feature ('library-path', [], ['free', 'path']) #order-sensitive ;
# Internal feature.
feature.feature ('library-file', [], ['free', 'dependency'])
feature.feature ('name', [], ['free'])
feature.feature ('tag', [], ['free'])
feature.feature ('search', [], ['free', 'path']) #order-sensitive ;
feature.feature ('location', [], ['free', 'path'])
feature.feature ('dll-path', [], ['free', 'path'])
feature.feature ('hardcode-dll-paths', ['true', 'false'], ['incidental'])
# This is internal feature which holds the paths of all dependency
# dynamic libraries. On Windows, it's needed so that we can all
# those paths to PATH, when running applications.
# On Linux, it's needed to add proper -rpath-link command line options.
feature.feature ('xdll-path', [], ['free', 'path'])
#provides means to specify def-file for windows dlls.
feature.feature ('def-file', [], ['free', 'dependency'])
# This feature is used to allow specific generators to run.
# For example, QT tools can only be invoked when QT library
# is used. In that case, <allow>qt will be in usage requirement
# of the library.
feature.feature ('allow', [], ['free'])
# The addressing model to generate code for. Currently a limited set only
# specifying the bit size of pointers.
feature.feature('address-model', ['16', '32', '64'], ['propagated', 'optional'])
# Type of CPU architecture to compile for.
feature.feature('architecture', [
# x86 and x86-64
'x86',
# ia64
'ia64',
# Sparc
'sparc',
# RS/6000 & PowerPC
'power',
# MIPS/SGI
'mips1', 'mips2', 'mips3', 'mips4', 'mips32', 'mips32r2', 'mips64',
# HP/PA-RISC
'parisc',
# Advanced RISC Machines
'arm',
# Combined architectures for platforms/toolsets that support building for
# multiple architectures at once. "combined" would be the default multi-arch
# for the toolset.
'combined',
'combined-x86-power'],
['propagated', 'optional'])
# The specific instruction set in an architecture to compile.
feature.feature('instruction-set', [
# x86 and x86-64
'native', 'i486', 'i586', 'i686', 'pentium', 'pentium-mmx', 'pentiumpro', 'pentium2', 'pentium3',
'pentium3m', 'pentium-m', 'pentium4', 'pentium4m', 'prescott', 'nocona', 'core2', 'corei7', 'corei7-avx', 'core-avx-i',
'conroe', 'conroe-xe', 'conroe-l', 'allendale', 'merom', 'merom-xe', 'kentsfield', 'kentsfield-xe', 'penryn', 'wolfdale',
'yorksfield', 'nehalem', 'sandy-bridge', 'ivy-bridge', 'haswell', 'k6', 'k6-2', 'k6-3', 'athlon', 'athlon-tbird', 'athlon-4', 'athlon-xp',
'athlon-mp', 'k8', 'opteron', 'athlon64', 'athlon-fx', 'k8-sse3', 'opteron-sse3', 'athlon64-sse3', 'amdfam10', 'barcelona',
'bdver1', 'bdver2', 'bdver3', 'btver1', 'btver2', 'winchip-c6', 'winchip2', 'c3', 'c3-2', 'atom',
# ia64
'itanium', 'itanium1', 'merced', 'itanium2', 'mckinley',
# Sparc
'v7', 'cypress', 'v8', 'supersparc', 'sparclite', 'hypersparc', 'sparclite86x', 'f930', 'f934',
'sparclet', 'tsc701', 'v9', 'ultrasparc', 'ultrasparc3',
# RS/6000 & PowerPC
'401', '403', '405', '405fp', '440', '440fp', '505', '601', '602',
'603', '603e', '604', '604e', '620', '630', '740', '7400',
'7450', '750', '801', '821', '823', '860', '970', '8540',
'power-common', 'ec603e', 'g3', 'g4', 'g5', 'power', 'power2',
'power3', 'power4', 'power5', 'powerpc', 'powerpc64', 'rios',
'rios1', 'rsc', 'rios2', 'rs64a',
# MIPS
'4kc', '4kp', '5kc', '20kc', 'm4k', 'r2000', 'r3000', 'r3900', 'r4000',
'r4100', 'r4300', 'r4400', 'r4600', 'r4650',
'r6000', 'r8000', 'rm7000', 'rm9000', 'orion', 'sb1', 'vr4100',
'vr4111', 'vr4120', 'vr4130', 'vr4300',
'vr5000', 'vr5400', 'vr5500',
# HP/PA-RISC
'700', '7100', '7100lc', '7200', '7300', '8000',
# Advanced RISC Machines
'armv2', 'armv2a', 'armv3', 'armv3m', 'armv4', 'armv4t', 'armv5',
'armv5t', 'armv5te', 'armv6', 'armv6j', 'iwmmxt', 'ep9312'],
['propagated', 'optional'])
feature.feature('conditional', [], ['incidental', 'free'])
# The value of 'no' prevents building of a target.
feature.feature('build', ['yes', 'no'], ['optional'])
# Windows-specific features
feature.feature ('user-interface', ['console', 'gui', 'wince', 'native', 'auto'], [])
feature.feature ('variant', [], ['implicit', 'composite', 'propagated', 'symmetric'])
variant ('debug', ['<optimization>off', '<debug-symbols>on', '<inlining>off', '<runtime-debugging>on'])
variant ('release', ['<optimization>speed', '<debug-symbols>off', '<inlining>full',
'<runtime-debugging>off', '<define>NDEBUG'])
variant ('profile', ['release'], ['<profiling>on', '<debug-symbols>on']) | [
"def",
"register_globals",
"(",
")",
":",
"# This feature is used to determine which OS we're on.",
"# In future, this may become <target-os> and <host-os>",
"# TODO: check this. Compatibility with bjam names? Subfeature for version?",
"os",
"=",
"sys",
".",
"platform",
"feature",
".",
"feature",
"(",
"'os'",
",",
"[",
"os",
"]",
",",
"[",
"'propagated'",
",",
"'link-incompatible'",
"]",
")",
"# The two OS features define a known set of abstract OS names. The host-os is",
"# the OS under which bjam is running. Even though this should really be a fixed",
"# property we need to list all the values to prevent unknown value errors. Both",
"# set the default value to the current OS to account for the default use case of",
"# building on the target OS.",
"feature",
".",
"feature",
"(",
"'host-os'",
",",
"__os_names",
")",
"feature",
".",
"set_default",
"(",
"'host-os'",
",",
"default_host_os",
"(",
")",
")",
"feature",
".",
"feature",
"(",
"'target-os'",
",",
"__os_names",
",",
"[",
"'propagated'",
",",
"'link-incompatible'",
"]",
")",
"feature",
".",
"set_default",
"(",
"'target-os'",
",",
"default_host_os",
"(",
")",
")",
"feature",
".",
"feature",
"(",
"'toolset'",
",",
"[",
"]",
",",
"[",
"'implicit'",
",",
"'propagated'",
",",
"'symmetric'",
"]",
")",
"feature",
".",
"feature",
"(",
"'stdlib'",
",",
"[",
"'native'",
"]",
",",
"[",
"'propagated'",
",",
"'composite'",
"]",
")",
"feature",
".",
"feature",
"(",
"'link'",
",",
"[",
"'shared'",
",",
"'static'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'runtime-link'",
",",
"[",
"'shared'",
",",
"'static'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'runtime-debugging'",
",",
"[",
"'on'",
",",
"'off'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'optimization'",
",",
"[",
"'off'",
",",
"'speed'",
",",
"'space'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'profiling'",
",",
"[",
"'off'",
",",
"'on'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'inlining'",
",",
"[",
"'off'",
",",
"'on'",
",",
"'full'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'threading'",
",",
"[",
"'single'",
",",
"'multi'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'rtti'",
",",
"[",
"'on'",
",",
"'off'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'exception-handling'",
",",
"[",
"'on'",
",",
"'off'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"# Whether there is support for asynchronous EH (e.g. catching SEGVs).",
"feature",
".",
"feature",
"(",
"'asynch-exceptions'",
",",
"[",
"'off'",
",",
"'on'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"# Whether all extern \"C\" functions are considered nothrow by default.",
"feature",
".",
"feature",
"(",
"'extern-c-nothrow'",
",",
"[",
"'off'",
",",
"'on'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'debug-symbols'",
",",
"[",
"'on'",
",",
"'off'",
"]",
",",
"[",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'define'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'undef'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'include'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'path'",
"]",
")",
"#order-sensitive",
"feature",
".",
"feature",
"(",
"'cflags'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'cxxflags'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'asmflags'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'linkflags'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'archiveflags'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'version'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'location-prefix'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'action'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"# The following features are incidental, since",
"# in themself they have no effect on build products.",
"# Not making them incidental will result in problems in corner",
"# cases, for example:",
"#",
"# unit-test a : a.cpp : <use>b ;",
"# lib b : a.cpp b ;",
"#",
"# Here, if <use> is not incidental, we'll decide we have two",
"# targets for a.obj with different properties, and will complain.",
"#",
"# Note that making feature incidental does not mean it's ignored. It may",
"# be ignored when creating the virtual target, but the rest of build process",
"# will use them.",
"feature",
".",
"feature",
"(",
"'use'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'dependency'",
",",
"'incidental'",
"]",
")",
"feature",
".",
"feature",
"(",
"'dependency'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'dependency'",
",",
"'incidental'",
"]",
")",
"feature",
".",
"feature",
"(",
"'implicit-dependency'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'dependency'",
",",
"'incidental'",
"]",
")",
"feature",
".",
"feature",
"(",
"'warnings'",
",",
"[",
"'on'",
",",
"# Enable default/\"reasonable\" warning level for the tool.",
"'all'",
",",
"# Enable all possible warnings issued by the tool.",
"'off'",
"]",
",",
"# Disable all warnings issued by the tool.",
"[",
"'incidental'",
",",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'warnings-as-errors'",
",",
"[",
"'off'",
",",
"# Do not fail the compilation if there are warnings.",
"'on'",
"]",
",",
"# Fail the compilation if there are warnings.",
"[",
"'incidental'",
",",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'c++-template-depth'",
",",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"64",
",",
"1024",
"+",
"1",
",",
"64",
")",
"]",
"+",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"20",
",",
"1000",
"+",
"1",
",",
"10",
")",
"]",
"+",
"# Maximum template instantiation depth guaranteed for ANSI/ISO C++",
"# conforming programs.",
"[",
"'17'",
"]",
",",
"[",
"'incidental'",
",",
"'optional'",
",",
"'propagated'",
"]",
")",
"feature",
".",
"feature",
"(",
"'source'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'dependency'",
",",
"'incidental'",
"]",
")",
"feature",
".",
"feature",
"(",
"'library'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'dependency'",
",",
"'incidental'",
"]",
")",
"feature",
".",
"feature",
"(",
"'file'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'dependency'",
",",
"'incidental'",
"]",
")",
"feature",
".",
"feature",
"(",
"'find-shared-library'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"#order-sensitive ;",
"feature",
".",
"feature",
"(",
"'find-static-library'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"#order-sensitive ;",
"feature",
".",
"feature",
"(",
"'library-path'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'path'",
"]",
")",
"#order-sensitive ;",
"# Internal feature.",
"feature",
".",
"feature",
"(",
"'library-file'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'dependency'",
"]",
")",
"feature",
".",
"feature",
"(",
"'name'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'tag'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"feature",
".",
"feature",
"(",
"'search'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'path'",
"]",
")",
"#order-sensitive ;",
"feature",
".",
"feature",
"(",
"'location'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'path'",
"]",
")",
"feature",
".",
"feature",
"(",
"'dll-path'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'path'",
"]",
")",
"feature",
".",
"feature",
"(",
"'hardcode-dll-paths'",
",",
"[",
"'true'",
",",
"'false'",
"]",
",",
"[",
"'incidental'",
"]",
")",
"# This is internal feature which holds the paths of all dependency",
"# dynamic libraries. On Windows, it's needed so that we can all",
"# those paths to PATH, when running applications.",
"# On Linux, it's needed to add proper -rpath-link command line options.",
"feature",
".",
"feature",
"(",
"'xdll-path'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'path'",
"]",
")",
"#provides means to specify def-file for windows dlls.",
"feature",
".",
"feature",
"(",
"'def-file'",
",",
"[",
"]",
",",
"[",
"'free'",
",",
"'dependency'",
"]",
")",
"# This feature is used to allow specific generators to run.",
"# For example, QT tools can only be invoked when QT library",
"# is used. In that case, <allow>qt will be in usage requirement",
"# of the library.",
"feature",
".",
"feature",
"(",
"'allow'",
",",
"[",
"]",
",",
"[",
"'free'",
"]",
")",
"# The addressing model to generate code for. Currently a limited set only",
"# specifying the bit size of pointers.",
"feature",
".",
"feature",
"(",
"'address-model'",
",",
"[",
"'16'",
",",
"'32'",
",",
"'64'",
"]",
",",
"[",
"'propagated'",
",",
"'optional'",
"]",
")",
"# Type of CPU architecture to compile for.",
"feature",
".",
"feature",
"(",
"'architecture'",
",",
"[",
"# x86 and x86-64",
"'x86'",
",",
"# ia64",
"'ia64'",
",",
"# Sparc",
"'sparc'",
",",
"# RS/6000 & PowerPC",
"'power'",
",",
"# MIPS/SGI",
"'mips1'",
",",
"'mips2'",
",",
"'mips3'",
",",
"'mips4'",
",",
"'mips32'",
",",
"'mips32r2'",
",",
"'mips64'",
",",
"# HP/PA-RISC",
"'parisc'",
",",
"# Advanced RISC Machines",
"'arm'",
",",
"# Combined architectures for platforms/toolsets that support building for",
"# multiple architectures at once. \"combined\" would be the default multi-arch",
"# for the toolset.",
"'combined'",
",",
"'combined-x86-power'",
"]",
",",
"[",
"'propagated'",
",",
"'optional'",
"]",
")",
"# The specific instruction set in an architecture to compile.",
"feature",
".",
"feature",
"(",
"'instruction-set'",
",",
"[",
"# x86 and x86-64",
"'native'",
",",
"'i486'",
",",
"'i586'",
",",
"'i686'",
",",
"'pentium'",
",",
"'pentium-mmx'",
",",
"'pentiumpro'",
",",
"'pentium2'",
",",
"'pentium3'",
",",
"'pentium3m'",
",",
"'pentium-m'",
",",
"'pentium4'",
",",
"'pentium4m'",
",",
"'prescott'",
",",
"'nocona'",
",",
"'core2'",
",",
"'corei7'",
",",
"'corei7-avx'",
",",
"'core-avx-i'",
",",
"'conroe'",
",",
"'conroe-xe'",
",",
"'conroe-l'",
",",
"'allendale'",
",",
"'merom'",
",",
"'merom-xe'",
",",
"'kentsfield'",
",",
"'kentsfield-xe'",
",",
"'penryn'",
",",
"'wolfdale'",
",",
"'yorksfield'",
",",
"'nehalem'",
",",
"'sandy-bridge'",
",",
"'ivy-bridge'",
",",
"'haswell'",
",",
"'k6'",
",",
"'k6-2'",
",",
"'k6-3'",
",",
"'athlon'",
",",
"'athlon-tbird'",
",",
"'athlon-4'",
",",
"'athlon-xp'",
",",
"'athlon-mp'",
",",
"'k8'",
",",
"'opteron'",
",",
"'athlon64'",
",",
"'athlon-fx'",
",",
"'k8-sse3'",
",",
"'opteron-sse3'",
",",
"'athlon64-sse3'",
",",
"'amdfam10'",
",",
"'barcelona'",
",",
"'bdver1'",
",",
"'bdver2'",
",",
"'bdver3'",
",",
"'btver1'",
",",
"'btver2'",
",",
"'winchip-c6'",
",",
"'winchip2'",
",",
"'c3'",
",",
"'c3-2'",
",",
"'atom'",
",",
"# ia64",
"'itanium'",
",",
"'itanium1'",
",",
"'merced'",
",",
"'itanium2'",
",",
"'mckinley'",
",",
"# Sparc",
"'v7'",
",",
"'cypress'",
",",
"'v8'",
",",
"'supersparc'",
",",
"'sparclite'",
",",
"'hypersparc'",
",",
"'sparclite86x'",
",",
"'f930'",
",",
"'f934'",
",",
"'sparclet'",
",",
"'tsc701'",
",",
"'v9'",
",",
"'ultrasparc'",
",",
"'ultrasparc3'",
",",
"# RS/6000 & PowerPC",
"'401'",
",",
"'403'",
",",
"'405'",
",",
"'405fp'",
",",
"'440'",
",",
"'440fp'",
",",
"'505'",
",",
"'601'",
",",
"'602'",
",",
"'603'",
",",
"'603e'",
",",
"'604'",
",",
"'604e'",
",",
"'620'",
",",
"'630'",
",",
"'740'",
",",
"'7400'",
",",
"'7450'",
",",
"'750'",
",",
"'801'",
",",
"'821'",
",",
"'823'",
",",
"'860'",
",",
"'970'",
",",
"'8540'",
",",
"'power-common'",
",",
"'ec603e'",
",",
"'g3'",
",",
"'g4'",
",",
"'g5'",
",",
"'power'",
",",
"'power2'",
",",
"'power3'",
",",
"'power4'",
",",
"'power5'",
",",
"'powerpc'",
",",
"'powerpc64'",
",",
"'rios'",
",",
"'rios1'",
",",
"'rsc'",
",",
"'rios2'",
",",
"'rs64a'",
",",
"# MIPS",
"'4kc'",
",",
"'4kp'",
",",
"'5kc'",
",",
"'20kc'",
",",
"'m4k'",
",",
"'r2000'",
",",
"'r3000'",
",",
"'r3900'",
",",
"'r4000'",
",",
"'r4100'",
",",
"'r4300'",
",",
"'r4400'",
",",
"'r4600'",
",",
"'r4650'",
",",
"'r6000'",
",",
"'r8000'",
",",
"'rm7000'",
",",
"'rm9000'",
",",
"'orion'",
",",
"'sb1'",
",",
"'vr4100'",
",",
"'vr4111'",
",",
"'vr4120'",
",",
"'vr4130'",
",",
"'vr4300'",
",",
"'vr5000'",
",",
"'vr5400'",
",",
"'vr5500'",
",",
"# HP/PA-RISC",
"'700'",
",",
"'7100'",
",",
"'7100lc'",
",",
"'7200'",
",",
"'7300'",
",",
"'8000'",
",",
"# Advanced RISC Machines",
"'armv2'",
",",
"'armv2a'",
",",
"'armv3'",
",",
"'armv3m'",
",",
"'armv4'",
",",
"'armv4t'",
",",
"'armv5'",
",",
"'armv5t'",
",",
"'armv5te'",
",",
"'armv6'",
",",
"'armv6j'",
",",
"'iwmmxt'",
",",
"'ep9312'",
"]",
",",
"[",
"'propagated'",
",",
"'optional'",
"]",
")",
"feature",
".",
"feature",
"(",
"'conditional'",
",",
"[",
"]",
",",
"[",
"'incidental'",
",",
"'free'",
"]",
")",
"# The value of 'no' prevents building of a target.",
"feature",
".",
"feature",
"(",
"'build'",
",",
"[",
"'yes'",
",",
"'no'",
"]",
",",
"[",
"'optional'",
"]",
")",
"# Windows-specific features",
"feature",
".",
"feature",
"(",
"'user-interface'",
",",
"[",
"'console'",
",",
"'gui'",
",",
"'wince'",
",",
"'native'",
",",
"'auto'",
"]",
",",
"[",
"]",
")",
"feature",
".",
"feature",
"(",
"'variant'",
",",
"[",
"]",
",",
"[",
"'implicit'",
",",
"'composite'",
",",
"'propagated'",
",",
"'symmetric'",
"]",
")",
"variant",
"(",
"'debug'",
",",
"[",
"'<optimization>off'",
",",
"'<debug-symbols>on'",
",",
"'<inlining>off'",
",",
"'<runtime-debugging>on'",
"]",
")",
"variant",
"(",
"'release'",
",",
"[",
"'<optimization>speed'",
",",
"'<debug-symbols>off'",
",",
"'<inlining>full'",
",",
"'<runtime-debugging>off'",
",",
"'<define>NDEBUG'",
"]",
")",
"variant",
"(",
"'profile'",
",",
"[",
"'release'",
"]",
",",
"[",
"'<profiling>on'",
",",
"'<debug-symbols>on'",
"]",
")"
] | Registers all features and variants declared by this module. | [
"Registers",
"all",
"features",
"and",
"variants",
"declared",
"by",
"this",
"module",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/builtin.py#L110-L330 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/builtin.py | lib | def lib(names, sources=[], requirements=[], default_build=[], usage_requirements=[]):
"""The implementation of the 'lib' rule. Beyond standard syntax that rule allows
simplified: 'lib a b c ;'."""
assert is_iterable_typed(names, basestring)
assert is_iterable_typed(sources, basestring)
assert is_iterable_typed(requirements, basestring)
assert is_iterable_typed(default_build, basestring)
assert is_iterable_typed(usage_requirements, basestring)
if len(names) > 1:
if any(r.startswith('<name>') for r in requirements):
get_manager().errors()("When several names are given to the 'lib' rule\n" +
"it is not allowed to specify the <name> feature.")
if sources:
get_manager().errors()("When several names are given to the 'lib' rule\n" +
"it is not allowed to specify sources.")
project = get_manager().projects().current()
result = []
for name in names:
r = requirements[:]
# Support " lib a ; " and " lib a b c ; " syntax.
if not sources and not any(r.startswith("<name>") for r in requirements) \
and not any(r.startswith("<file") for r in requirements):
r.append("<name>" + name)
result.append(targets.create_typed_metatarget(name, "LIB", sources,
r,
default_build,
usage_requirements))
return result | python | def lib(names, sources=[], requirements=[], default_build=[], usage_requirements=[]):
"""The implementation of the 'lib' rule. Beyond standard syntax that rule allows
simplified: 'lib a b c ;'."""
assert is_iterable_typed(names, basestring)
assert is_iterable_typed(sources, basestring)
assert is_iterable_typed(requirements, basestring)
assert is_iterable_typed(default_build, basestring)
assert is_iterable_typed(usage_requirements, basestring)
if len(names) > 1:
if any(r.startswith('<name>') for r in requirements):
get_manager().errors()("When several names are given to the 'lib' rule\n" +
"it is not allowed to specify the <name> feature.")
if sources:
get_manager().errors()("When several names are given to the 'lib' rule\n" +
"it is not allowed to specify sources.")
project = get_manager().projects().current()
result = []
for name in names:
r = requirements[:]
# Support " lib a ; " and " lib a b c ; " syntax.
if not sources and not any(r.startswith("<name>") for r in requirements) \
and not any(r.startswith("<file") for r in requirements):
r.append("<name>" + name)
result.append(targets.create_typed_metatarget(name, "LIB", sources,
r,
default_build,
usage_requirements))
return result | [
"def",
"lib",
"(",
"names",
",",
"sources",
"=",
"[",
"]",
",",
"requirements",
"=",
"[",
"]",
",",
"default_build",
"=",
"[",
"]",
",",
"usage_requirements",
"=",
"[",
"]",
")",
":",
"assert",
"is_iterable_typed",
"(",
"names",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"sources",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"requirements",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"default_build",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"usage_requirements",
",",
"basestring",
")",
"if",
"len",
"(",
"names",
")",
">",
"1",
":",
"if",
"any",
"(",
"r",
".",
"startswith",
"(",
"'<name>'",
")",
"for",
"r",
"in",
"requirements",
")",
":",
"get_manager",
"(",
")",
".",
"errors",
"(",
")",
"(",
"\"When several names are given to the 'lib' rule\\n\"",
"+",
"\"it is not allowed to specify the <name> feature.\"",
")",
"if",
"sources",
":",
"get_manager",
"(",
")",
".",
"errors",
"(",
")",
"(",
"\"When several names are given to the 'lib' rule\\n\"",
"+",
"\"it is not allowed to specify sources.\"",
")",
"project",
"=",
"get_manager",
"(",
")",
".",
"projects",
"(",
")",
".",
"current",
"(",
")",
"result",
"=",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"r",
"=",
"requirements",
"[",
":",
"]",
"# Support \" lib a ; \" and \" lib a b c ; \" syntax.",
"if",
"not",
"sources",
"and",
"not",
"any",
"(",
"r",
".",
"startswith",
"(",
"\"<name>\"",
")",
"for",
"r",
"in",
"requirements",
")",
"and",
"not",
"any",
"(",
"r",
".",
"startswith",
"(",
"\"<file\"",
")",
"for",
"r",
"in",
"requirements",
")",
":",
"r",
".",
"append",
"(",
"\"<name>\"",
"+",
"name",
")",
"result",
".",
"append",
"(",
"targets",
".",
"create_typed_metatarget",
"(",
"name",
",",
"\"LIB\"",
",",
"sources",
",",
"r",
",",
"default_build",
",",
"usage_requirements",
")",
")",
"return",
"result"
] | The implementation of the 'lib' rule. Beyond standard syntax that rule allows
simplified: 'lib a b c ;'. | [
"The",
"implementation",
"of",
"the",
"lib",
"rule",
".",
"Beyond",
"standard",
"syntax",
"that",
"rule",
"allows",
"simplified",
":",
"lib",
"a",
"b",
"c",
";",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/builtin.py#L475-L507 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/builtin.py | CompileAction.adjust_properties | def adjust_properties (self, prop_set):
""" For all virtual targets for the same dependency graph as self,
i.e. which belong to the same main target, add their directories
to include path.
"""
assert isinstance(prop_set, property_set.PropertySet)
s = self.targets () [0].creating_subvariant ()
return prop_set.add_raw (s.implicit_includes ('include', 'H')) | python | def adjust_properties (self, prop_set):
""" For all virtual targets for the same dependency graph as self,
i.e. which belong to the same main target, add their directories
to include path.
"""
assert isinstance(prop_set, property_set.PropertySet)
s = self.targets () [0].creating_subvariant ()
return prop_set.add_raw (s.implicit_includes ('include', 'H')) | [
"def",
"adjust_properties",
"(",
"self",
",",
"prop_set",
")",
":",
"assert",
"isinstance",
"(",
"prop_set",
",",
"property_set",
".",
"PropertySet",
")",
"s",
"=",
"self",
".",
"targets",
"(",
")",
"[",
"0",
"]",
".",
"creating_subvariant",
"(",
")",
"return",
"prop_set",
".",
"add_raw",
"(",
"s",
".",
"implicit_includes",
"(",
"'include'",
",",
"'H'",
")",
")"
] | For all virtual targets for the same dependency graph as self,
i.e. which belong to the same main target, add their directories
to include path. | [
"For",
"all",
"virtual",
"targets",
"for",
"the",
"same",
"dependency",
"graph",
"as",
"self",
"i",
".",
"e",
".",
"which",
"belong",
"to",
"the",
"same",
"main",
"target",
"add",
"their",
"directories",
"to",
"include",
"path",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/builtin.py#L581-L589 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/popularity_recommender.py | create | def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
random_seed=0,
verbose=True):
"""
Create a model that makes recommendations using item popularity. When no
target column is provided, the popularity is determined by the number of
observations involving each item. When a target is provided, popularity
is computed using the item's mean target value. When the target column
contains ratings, for example, the model computes the mean rating for
each item and uses this to rank items for recommendations.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
verbose : bool, optional
Enables verbose output.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m = turicreate.popularity_recommender.create(sf, target='rating')
See Also
--------
PopularityRecommender
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.popularity()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
nearest_items = _turicreate.SFrame()
opts = {'user_id': user_id,
'item_id': item_id,
'target': target,
'random_seed': 1}
extra_data = {"nearest_items" : _turicreate.SFrame()}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return PopularityRecommender(model_proxy) | python | def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
random_seed=0,
verbose=True):
"""
Create a model that makes recommendations using item popularity. When no
target column is provided, the popularity is determined by the number of
observations involving each item. When a target is provided, popularity
is computed using the item's mean target value. When the target column
contains ratings, for example, the model computes the mean rating for
each item and uses this to rank items for recommendations.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
verbose : bool, optional
Enables verbose output.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m = turicreate.popularity_recommender.create(sf, target='rating')
See Also
--------
PopularityRecommender
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.popularity()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
nearest_items = _turicreate.SFrame()
opts = {'user_id': user_id,
'item_id': item_id,
'target': target,
'random_seed': 1}
extra_data = {"nearest_items" : _turicreate.SFrame()}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return PopularityRecommender(model_proxy) | [
"def",
"create",
"(",
"observation_data",
",",
"user_id",
"=",
"'user_id'",
",",
"item_id",
"=",
"'item_id'",
",",
"target",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"item_data",
"=",
"None",
",",
"random_seed",
"=",
"0",
",",
"verbose",
"=",
"True",
")",
":",
"from",
"turicreate",
".",
"_cython",
".",
"cy_server",
"import",
"QuietProgress",
"opts",
"=",
"{",
"}",
"model_proxy",
"=",
"_turicreate",
".",
"extensions",
".",
"popularity",
"(",
")",
"model_proxy",
".",
"init_options",
"(",
"opts",
")",
"if",
"user_data",
"is",
"None",
":",
"user_data",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"if",
"item_data",
"is",
"None",
":",
"item_data",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"nearest_items",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"opts",
"=",
"{",
"'user_id'",
":",
"user_id",
",",
"'item_id'",
":",
"item_id",
",",
"'target'",
":",
"target",
",",
"'random_seed'",
":",
"1",
"}",
"extra_data",
"=",
"{",
"\"nearest_items\"",
":",
"_turicreate",
".",
"SFrame",
"(",
")",
"}",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"model_proxy",
".",
"train",
"(",
"observation_data",
",",
"user_data",
",",
"item_data",
",",
"opts",
",",
"extra_data",
")",
"return",
"PopularityRecommender",
"(",
"model_proxy",
")"
] | Create a model that makes recommendations using item popularity. When no
target column is provided, the popularity is determined by the number of
observations involving each item. When a target is provided, popularity
is computed using the item's mean target value. When the target column
contains ratings, for example, the model computes the mean rating for
each item and uses this to rank items for recommendations.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
verbose : bool, optional
Enables verbose output.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m = turicreate.popularity_recommender.create(sf, target='rating')
See Also
--------
PopularityRecommender | [
"Create",
"a",
"model",
"that",
"makes",
"recommendations",
"using",
"item",
"popularity",
".",
"When",
"no",
"target",
"column",
"is",
"provided",
"the",
"popularity",
"is",
"determined",
"by",
"the",
"number",
"of",
"observations",
"involving",
"each",
"item",
".",
"When",
"a",
"target",
"is",
"provided",
"popularity",
"is",
"computed",
"using",
"the",
"item",
"s",
"mean",
"target",
"value",
".",
"When",
"the",
"target",
"column",
"contains",
"ratings",
"for",
"example",
"the",
"model",
"computes",
"the",
"mean",
"rating",
"for",
"each",
"item",
"and",
"uses",
"this",
"to",
"rank",
"items",
"for",
"recommendations",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/popularity_recommender.py#L15-L102 | train |
apple/turicreate | src/external/xgboost/python-package/xgboost/sklearn.py | XGBModel.get_params | def get_params(self, deep=False):
"""Get parameter.s"""
params = super(XGBModel, self).get_params(deep=deep)
if params['missing'] is np.nan:
params['missing'] = None # sklearn doesn't handle nan. see #4725
if not params.get('eval_metric', True):
del params['eval_metric'] # don't give as None param to Booster
return params | python | def get_params(self, deep=False):
"""Get parameter.s"""
params = super(XGBModel, self).get_params(deep=deep)
if params['missing'] is np.nan:
params['missing'] = None # sklearn doesn't handle nan. see #4725
if not params.get('eval_metric', True):
del params['eval_metric'] # don't give as None param to Booster
return params | [
"def",
"get_params",
"(",
"self",
",",
"deep",
"=",
"False",
")",
":",
"params",
"=",
"super",
"(",
"XGBModel",
",",
"self",
")",
".",
"get_params",
"(",
"deep",
"=",
"deep",
")",
"if",
"params",
"[",
"'missing'",
"]",
"is",
"np",
".",
"nan",
":",
"params",
"[",
"'missing'",
"]",
"=",
"None",
"# sklearn doesn't handle nan. see #4725",
"if",
"not",
"params",
".",
"get",
"(",
"'eval_metric'",
",",
"True",
")",
":",
"del",
"params",
"[",
"'eval_metric'",
"]",
"# don't give as None param to Booster",
"return",
"params"
] | Get parameter.s | [
"Get",
"parameter",
".",
"s"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/sklearn.py#L126-L133 | train |
apple/turicreate | src/external/xgboost/python-package/xgboost/sklearn.py | XGBModel.get_xgb_params | def get_xgb_params(self):
"""Get xgboost type parameters."""
xgb_params = self.get_params()
xgb_params['silent'] = 1 if self.silent else 0
if self.nthread <= 0:
xgb_params.pop('nthread', None)
return xgb_params | python | def get_xgb_params(self):
"""Get xgboost type parameters."""
xgb_params = self.get_params()
xgb_params['silent'] = 1 if self.silent else 0
if self.nthread <= 0:
xgb_params.pop('nthread', None)
return xgb_params | [
"def",
"get_xgb_params",
"(",
"self",
")",
":",
"xgb_params",
"=",
"self",
".",
"get_params",
"(",
")",
"xgb_params",
"[",
"'silent'",
"]",
"=",
"1",
"if",
"self",
".",
"silent",
"else",
"0",
"if",
"self",
".",
"nthread",
"<=",
"0",
":",
"xgb_params",
".",
"pop",
"(",
"'nthread'",
",",
"None",
")",
"return",
"xgb_params"
] | Get xgboost type parameters. | [
"Get",
"xgboost",
"type",
"parameters",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/sklearn.py#L135-L143 | train |
apple/turicreate | src/external/xgboost/python-package/xgboost/sklearn.py | XGBModel.fit | def fit(self, X, y, eval_set=None, eval_metric=None,
early_stopping_rounds=None, verbose=True):
# pylint: disable=missing-docstring,invalid-name,attribute-defined-outside-init
"""
Fit the gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
eval_set : list, optional
A list of (X, y) tuple pairs to use as a validation set for
early-stopping
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.md. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have two additional fields: bst.best_score and bst.best_iteration.
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
"""
trainDmatrix = DMatrix(X, label=y, missing=self.missing)
evals_result = {}
if eval_set is not None:
evals = list(DMatrix(x[0], label=x[1]) for x in eval_set)
evals = list(zip(evals, ["validation_{}".format(i) for i in
range(len(evals))]))
else:
evals = ()
params = self.get_xgb_params()
feval = eval_metric if callable(eval_metric) else None
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
params.update({'eval_metric': eval_metric})
self._Booster = train(params, trainDmatrix,
self.n_estimators, evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, feval=feval,
verbose_eval=verbose)
if evals_result:
for val in evals_result.items():
evals_result_key = list(val[1].keys())[0]
evals_result[val[0]][evals_result_key] = val[1][evals_result_key]
self.evals_result_ = evals_result
if early_stopping_rounds is not None:
self.best_score = self._Booster.best_score
self.best_iteration = self._Booster.best_iteration
return self | python | def fit(self, X, y, eval_set=None, eval_metric=None,
early_stopping_rounds=None, verbose=True):
# pylint: disable=missing-docstring,invalid-name,attribute-defined-outside-init
"""
Fit the gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
eval_set : list, optional
A list of (X, y) tuple pairs to use as a validation set for
early-stopping
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.md. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have two additional fields: bst.best_score and bst.best_iteration.
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
"""
trainDmatrix = DMatrix(X, label=y, missing=self.missing)
evals_result = {}
if eval_set is not None:
evals = list(DMatrix(x[0], label=x[1]) for x in eval_set)
evals = list(zip(evals, ["validation_{}".format(i) for i in
range(len(evals))]))
else:
evals = ()
params = self.get_xgb_params()
feval = eval_metric if callable(eval_metric) else None
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
params.update({'eval_metric': eval_metric})
self._Booster = train(params, trainDmatrix,
self.n_estimators, evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, feval=feval,
verbose_eval=verbose)
if evals_result:
for val in evals_result.items():
evals_result_key = list(val[1].keys())[0]
evals_result[val[0]][evals_result_key] = val[1][evals_result_key]
self.evals_result_ = evals_result
if early_stopping_rounds is not None:
self.best_score = self._Booster.best_score
self.best_iteration = self._Booster.best_iteration
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"eval_set",
"=",
"None",
",",
"eval_metric",
"=",
"None",
",",
"early_stopping_rounds",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"# pylint: disable=missing-docstring,invalid-name,attribute-defined-outside-init",
"trainDmatrix",
"=",
"DMatrix",
"(",
"X",
",",
"label",
"=",
"y",
",",
"missing",
"=",
"self",
".",
"missing",
")",
"evals_result",
"=",
"{",
"}",
"if",
"eval_set",
"is",
"not",
"None",
":",
"evals",
"=",
"list",
"(",
"DMatrix",
"(",
"x",
"[",
"0",
"]",
",",
"label",
"=",
"x",
"[",
"1",
"]",
")",
"for",
"x",
"in",
"eval_set",
")",
"evals",
"=",
"list",
"(",
"zip",
"(",
"evals",
",",
"[",
"\"validation_{}\"",
".",
"format",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"evals",
")",
")",
"]",
")",
")",
"else",
":",
"evals",
"=",
"(",
")",
"params",
"=",
"self",
".",
"get_xgb_params",
"(",
")",
"feval",
"=",
"eval_metric",
"if",
"callable",
"(",
"eval_metric",
")",
"else",
"None",
"if",
"eval_metric",
"is",
"not",
"None",
":",
"if",
"callable",
"(",
"eval_metric",
")",
":",
"eval_metric",
"=",
"None",
"else",
":",
"params",
".",
"update",
"(",
"{",
"'eval_metric'",
":",
"eval_metric",
"}",
")",
"self",
".",
"_Booster",
"=",
"train",
"(",
"params",
",",
"trainDmatrix",
",",
"self",
".",
"n_estimators",
",",
"evals",
"=",
"evals",
",",
"early_stopping_rounds",
"=",
"early_stopping_rounds",
",",
"evals_result",
"=",
"evals_result",
",",
"feval",
"=",
"feval",
",",
"verbose_eval",
"=",
"verbose",
")",
"if",
"evals_result",
":",
"for",
"val",
"in",
"evals_result",
".",
"items",
"(",
")",
":",
"evals_result_key",
"=",
"list",
"(",
"val",
"[",
"1",
"]",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"evals_result",
"[",
"val",
"[",
"0",
"]",
"]",
"[",
"evals_result_key",
"]",
"=",
"val",
"[",
"1",
"]",
"[",
"evals_result_key",
"]",
"self",
".",
"evals_result_",
"=",
"evals_result",
"if",
"early_stopping_rounds",
"is",
"not",
"None",
":",
"self",
".",
"best_score",
"=",
"self",
".",
"_Booster",
".",
"best_score",
"self",
".",
"best_iteration",
"=",
"self",
".",
"_Booster",
".",
"best_iteration",
"return",
"self"
] | Fit the gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
eval_set : list, optional
A list of (X, y) tuple pairs to use as a validation set for
early-stopping
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.md. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have two additional fields: bst.best_score and bst.best_iteration.
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr. | [
"Fit",
"the",
"gradient",
"boosting",
"model"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/sklearn.py#L145-L213 | train |
apple/turicreate | src/external/xgboost/python-package/xgboost/sklearn.py | XGBClassifier.fit | def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None,
early_stopping_rounds=None, verbose=True):
# pylint: disable = attribute-defined-outside-init,arguments-differ
"""
Fit gradient boosting classifier
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
Weight for each instance
eval_set : list, optional
A list of (X, y) pairs to use as a validation set for
early-stopping
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.md. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int, optional
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have two additional fields: bst.best_score and bst.best_iteration.
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
"""
evals_result = {}
self.classes_ = list(np.unique(y))
self.n_classes_ = len(self.classes_)
if self.n_classes_ > 2:
# Switch to using a multiclass objective in the underlying XGB instance
self.objective = "multi:softprob"
xgb_options = self.get_xgb_params()
xgb_options['num_class'] = self.n_classes_
else:
xgb_options = self.get_xgb_params()
feval = eval_metric if callable(eval_metric) else None
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
xgb_options.update({"eval_metric": eval_metric})
if eval_set is not None:
# TODO: use sample_weight if given?
evals = list(DMatrix(x[0], label=x[1]) for x in eval_set)
nevals = len(evals)
eval_names = ["validation_{}".format(i) for i in range(nevals)]
evals = list(zip(evals, eval_names))
else:
evals = ()
self._le = LabelEncoder().fit(y)
training_labels = self._le.transform(y)
if sample_weight is not None:
train_dmatrix = DMatrix(X, label=training_labels, weight=sample_weight,
missing=self.missing)
else:
train_dmatrix = DMatrix(X, label=training_labels,
missing=self.missing)
self._Booster = train(xgb_options, train_dmatrix, self.n_estimators,
evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, feval=feval,
verbose_eval=verbose)
if evals_result:
for val in evals_result.items():
evals_result_key = list(val[1].keys())[0]
evals_result[val[0]][evals_result_key] = val[1][evals_result_key]
self.evals_result_ = evals_result
if early_stopping_rounds is not None:
self.best_score = self._Booster.best_score
self.best_iteration = self._Booster.best_iteration
return self | python | def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None,
early_stopping_rounds=None, verbose=True):
# pylint: disable = attribute-defined-outside-init,arguments-differ
"""
Fit gradient boosting classifier
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
Weight for each instance
eval_set : list, optional
A list of (X, y) pairs to use as a validation set for
early-stopping
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.md. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int, optional
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have two additional fields: bst.best_score and bst.best_iteration.
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
"""
evals_result = {}
self.classes_ = list(np.unique(y))
self.n_classes_ = len(self.classes_)
if self.n_classes_ > 2:
# Switch to using a multiclass objective in the underlying XGB instance
self.objective = "multi:softprob"
xgb_options = self.get_xgb_params()
xgb_options['num_class'] = self.n_classes_
else:
xgb_options = self.get_xgb_params()
feval = eval_metric if callable(eval_metric) else None
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
xgb_options.update({"eval_metric": eval_metric})
if eval_set is not None:
# TODO: use sample_weight if given?
evals = list(DMatrix(x[0], label=x[1]) for x in eval_set)
nevals = len(evals)
eval_names = ["validation_{}".format(i) for i in range(nevals)]
evals = list(zip(evals, eval_names))
else:
evals = ()
self._le = LabelEncoder().fit(y)
training_labels = self._le.transform(y)
if sample_weight is not None:
train_dmatrix = DMatrix(X, label=training_labels, weight=sample_weight,
missing=self.missing)
else:
train_dmatrix = DMatrix(X, label=training_labels,
missing=self.missing)
self._Booster = train(xgb_options, train_dmatrix, self.n_estimators,
evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, feval=feval,
verbose_eval=verbose)
if evals_result:
for val in evals_result.items():
evals_result_key = list(val[1].keys())[0]
evals_result[val[0]][evals_result_key] = val[1][evals_result_key]
self.evals_result_ = evals_result
if early_stopping_rounds is not None:
self.best_score = self._Booster.best_score
self.best_iteration = self._Booster.best_iteration
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"sample_weight",
"=",
"None",
",",
"eval_set",
"=",
"None",
",",
"eval_metric",
"=",
"None",
",",
"early_stopping_rounds",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"# pylint: disable = attribute-defined-outside-init,arguments-differ",
"evals_result",
"=",
"{",
"}",
"self",
".",
"classes_",
"=",
"list",
"(",
"np",
".",
"unique",
"(",
"y",
")",
")",
"self",
".",
"n_classes_",
"=",
"len",
"(",
"self",
".",
"classes_",
")",
"if",
"self",
".",
"n_classes_",
">",
"2",
":",
"# Switch to using a multiclass objective in the underlying XGB instance",
"self",
".",
"objective",
"=",
"\"multi:softprob\"",
"xgb_options",
"=",
"self",
".",
"get_xgb_params",
"(",
")",
"xgb_options",
"[",
"'num_class'",
"]",
"=",
"self",
".",
"n_classes_",
"else",
":",
"xgb_options",
"=",
"self",
".",
"get_xgb_params",
"(",
")",
"feval",
"=",
"eval_metric",
"if",
"callable",
"(",
"eval_metric",
")",
"else",
"None",
"if",
"eval_metric",
"is",
"not",
"None",
":",
"if",
"callable",
"(",
"eval_metric",
")",
":",
"eval_metric",
"=",
"None",
"else",
":",
"xgb_options",
".",
"update",
"(",
"{",
"\"eval_metric\"",
":",
"eval_metric",
"}",
")",
"if",
"eval_set",
"is",
"not",
"None",
":",
"# TODO: use sample_weight if given?",
"evals",
"=",
"list",
"(",
"DMatrix",
"(",
"x",
"[",
"0",
"]",
",",
"label",
"=",
"x",
"[",
"1",
"]",
")",
"for",
"x",
"in",
"eval_set",
")",
"nevals",
"=",
"len",
"(",
"evals",
")",
"eval_names",
"=",
"[",
"\"validation_{}\"",
".",
"format",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"nevals",
")",
"]",
"evals",
"=",
"list",
"(",
"zip",
"(",
"evals",
",",
"eval_names",
")",
")",
"else",
":",
"evals",
"=",
"(",
")",
"self",
".",
"_le",
"=",
"LabelEncoder",
"(",
")",
".",
"fit",
"(",
"y",
")",
"training_labels",
"=",
"self",
".",
"_le",
".",
"transform",
"(",
"y",
")",
"if",
"sample_weight",
"is",
"not",
"None",
":",
"train_dmatrix",
"=",
"DMatrix",
"(",
"X",
",",
"label",
"=",
"training_labels",
",",
"weight",
"=",
"sample_weight",
",",
"missing",
"=",
"self",
".",
"missing",
")",
"else",
":",
"train_dmatrix",
"=",
"DMatrix",
"(",
"X",
",",
"label",
"=",
"training_labels",
",",
"missing",
"=",
"self",
".",
"missing",
")",
"self",
".",
"_Booster",
"=",
"train",
"(",
"xgb_options",
",",
"train_dmatrix",
",",
"self",
".",
"n_estimators",
",",
"evals",
"=",
"evals",
",",
"early_stopping_rounds",
"=",
"early_stopping_rounds",
",",
"evals_result",
"=",
"evals_result",
",",
"feval",
"=",
"feval",
",",
"verbose_eval",
"=",
"verbose",
")",
"if",
"evals_result",
":",
"for",
"val",
"in",
"evals_result",
".",
"items",
"(",
")",
":",
"evals_result_key",
"=",
"list",
"(",
"val",
"[",
"1",
"]",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"evals_result",
"[",
"val",
"[",
"0",
"]",
"]",
"[",
"evals_result_key",
"]",
"=",
"val",
"[",
"1",
"]",
"[",
"evals_result_key",
"]",
"self",
".",
"evals_result_",
"=",
"evals_result",
"if",
"early_stopping_rounds",
"is",
"not",
"None",
":",
"self",
".",
"best_score",
"=",
"self",
".",
"_Booster",
".",
"best_score",
"self",
".",
"best_iteration",
"=",
"self",
".",
"_Booster",
".",
"best_iteration",
"return",
"self"
] | Fit gradient boosting classifier
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
Weight for each instance
eval_set : list, optional
A list of (X, y) pairs to use as a validation set for
early-stopping
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.md. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int, optional
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have two additional fields: bst.best_score and bst.best_iteration.
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr. | [
"Fit",
"gradient",
"boosting",
"classifier"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/sklearn.py#L280-L369 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/utility.py | add_grist | def add_grist (features):
""" Transform a string by bracketing it with "<>". If already bracketed, does nothing.
features: one string or a sequence of strings
return: the gristed string, if features is a string, or a sequence of gristed strings, if features is a sequence
"""
assert is_iterable_typed(features, basestring) or isinstance(features, basestring)
def grist_one (feature):
if feature [0] != '<' and feature [len (feature) - 1] != '>':
return '<' + feature + '>'
else:
return feature
if isinstance (features, str):
return grist_one (features)
else:
return [ grist_one (feature) for feature in features ] | python | def add_grist (features):
""" Transform a string by bracketing it with "<>". If already bracketed, does nothing.
features: one string or a sequence of strings
return: the gristed string, if features is a string, or a sequence of gristed strings, if features is a sequence
"""
assert is_iterable_typed(features, basestring) or isinstance(features, basestring)
def grist_one (feature):
if feature [0] != '<' and feature [len (feature) - 1] != '>':
return '<' + feature + '>'
else:
return feature
if isinstance (features, str):
return grist_one (features)
else:
return [ grist_one (feature) for feature in features ] | [
"def",
"add_grist",
"(",
"features",
")",
":",
"assert",
"is_iterable_typed",
"(",
"features",
",",
"basestring",
")",
"or",
"isinstance",
"(",
"features",
",",
"basestring",
")",
"def",
"grist_one",
"(",
"feature",
")",
":",
"if",
"feature",
"[",
"0",
"]",
"!=",
"'<'",
"and",
"feature",
"[",
"len",
"(",
"feature",
")",
"-",
"1",
"]",
"!=",
"'>'",
":",
"return",
"'<'",
"+",
"feature",
"+",
"'>'",
"else",
":",
"return",
"feature",
"if",
"isinstance",
"(",
"features",
",",
"str",
")",
":",
"return",
"grist_one",
"(",
"features",
")",
"else",
":",
"return",
"[",
"grist_one",
"(",
"feature",
")",
"for",
"feature",
"in",
"features",
"]"
] | Transform a string by bracketing it with "<>". If already bracketed, does nothing.
features: one string or a sequence of strings
return: the gristed string, if features is a string, or a sequence of gristed strings, if features is a sequence | [
"Transform",
"a",
"string",
"by",
"bracketing",
"it",
"with",
"<",
">",
".",
"If",
"already",
"bracketed",
"does",
"nothing",
".",
"features",
":",
"one",
"string",
"or",
"a",
"sequence",
"of",
"strings",
"return",
":",
"the",
"gristed",
"string",
"if",
"features",
"is",
"a",
"string",
"or",
"a",
"sequence",
"of",
"gristed",
"strings",
"if",
"features",
"is",
"a",
"sequence"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/utility.py#L39-L54 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/utility.py | replace_grist | def replace_grist (features, new_grist):
""" Replaces the grist of a string by a new one.
Returns the string with the new grist.
"""
assert is_iterable_typed(features, basestring) or isinstance(features, basestring)
assert isinstance(new_grist, basestring)
# this function is used a lot in the build phase and the original implementation
# was extremely slow; thus some of the weird-looking optimizations for this function.
single_item = False
if isinstance(features, str):
features = [features]
single_item = True
result = []
for feature in features:
# '<feature>value' -> ('<feature', '>', 'value')
# 'something' -> ('something', '', '')
# '<toolset>msvc/<feature>value' -> ('<toolset', '>', 'msvc/<feature>value')
grist, split, value = feature.partition('>')
# if a partition didn't occur, then grist is just 'something'
# set the value to be the grist
if not value and not split:
value = grist
result.append(new_grist + value)
if single_item:
return result[0]
return result | python | def replace_grist (features, new_grist):
""" Replaces the grist of a string by a new one.
Returns the string with the new grist.
"""
assert is_iterable_typed(features, basestring) or isinstance(features, basestring)
assert isinstance(new_grist, basestring)
# this function is used a lot in the build phase and the original implementation
# was extremely slow; thus some of the weird-looking optimizations for this function.
single_item = False
if isinstance(features, str):
features = [features]
single_item = True
result = []
for feature in features:
# '<feature>value' -> ('<feature', '>', 'value')
# 'something' -> ('something', '', '')
# '<toolset>msvc/<feature>value' -> ('<toolset', '>', 'msvc/<feature>value')
grist, split, value = feature.partition('>')
# if a partition didn't occur, then grist is just 'something'
# set the value to be the grist
if not value and not split:
value = grist
result.append(new_grist + value)
if single_item:
return result[0]
return result | [
"def",
"replace_grist",
"(",
"features",
",",
"new_grist",
")",
":",
"assert",
"is_iterable_typed",
"(",
"features",
",",
"basestring",
")",
"or",
"isinstance",
"(",
"features",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"new_grist",
",",
"basestring",
")",
"# this function is used a lot in the build phase and the original implementation",
"# was extremely slow; thus some of the weird-looking optimizations for this function.",
"single_item",
"=",
"False",
"if",
"isinstance",
"(",
"features",
",",
"str",
")",
":",
"features",
"=",
"[",
"features",
"]",
"single_item",
"=",
"True",
"result",
"=",
"[",
"]",
"for",
"feature",
"in",
"features",
":",
"# '<feature>value' -> ('<feature', '>', 'value')",
"# 'something' -> ('something', '', '')",
"# '<toolset>msvc/<feature>value' -> ('<toolset', '>', 'msvc/<feature>value')",
"grist",
",",
"split",
",",
"value",
"=",
"feature",
".",
"partition",
"(",
"'>'",
")",
"# if a partition didn't occur, then grist is just 'something'",
"# set the value to be the grist",
"if",
"not",
"value",
"and",
"not",
"split",
":",
"value",
"=",
"grist",
"result",
".",
"append",
"(",
"new_grist",
"+",
"value",
")",
"if",
"single_item",
":",
"return",
"result",
"[",
"0",
"]",
"return",
"result"
] | Replaces the grist of a string by a new one.
Returns the string with the new grist. | [
"Replaces",
"the",
"grist",
"of",
"a",
"string",
"by",
"a",
"new",
"one",
".",
"Returns",
"the",
"string",
"with",
"the",
"new",
"grist",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/utility.py#L56-L83 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/utility.py | get_value | def get_value (property):
""" Gets the value of a property, that is, the part following the grist, if any.
"""
assert is_iterable_typed(property, basestring) or isinstance(property, basestring)
return replace_grist (property, '') | python | def get_value (property):
""" Gets the value of a property, that is, the part following the grist, if any.
"""
assert is_iterable_typed(property, basestring) or isinstance(property, basestring)
return replace_grist (property, '') | [
"def",
"get_value",
"(",
"property",
")",
":",
"assert",
"is_iterable_typed",
"(",
"property",
",",
"basestring",
")",
"or",
"isinstance",
"(",
"property",
",",
"basestring",
")",
"return",
"replace_grist",
"(",
"property",
",",
"''",
")"
] | Gets the value of a property, that is, the part following the grist, if any. | [
"Gets",
"the",
"value",
"of",
"a",
"property",
"that",
"is",
"the",
"part",
"following",
"the",
"grist",
"if",
"any",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/utility.py#L85-L89 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/utility.py | get_grist | def get_grist (value):
""" Returns the grist of a string.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
assert is_iterable_typed(value, basestring) or isinstance(value, basestring)
def get_grist_one (name):
split = __re_grist_and_value.match (name)
if not split:
return ''
else:
return split.group (1)
if isinstance (value, str):
return get_grist_one (value)
else:
return [ get_grist_one (v) for v in value ] | python | def get_grist (value):
""" Returns the grist of a string.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
assert is_iterable_typed(value, basestring) or isinstance(value, basestring)
def get_grist_one (name):
split = __re_grist_and_value.match (name)
if not split:
return ''
else:
return split.group (1)
if isinstance (value, str):
return get_grist_one (value)
else:
return [ get_grist_one (v) for v in value ] | [
"def",
"get_grist",
"(",
"value",
")",
":",
"assert",
"is_iterable_typed",
"(",
"value",
",",
"basestring",
")",
"or",
"isinstance",
"(",
"value",
",",
"basestring",
")",
"def",
"get_grist_one",
"(",
"name",
")",
":",
"split",
"=",
"__re_grist_and_value",
".",
"match",
"(",
"name",
")",
"if",
"not",
"split",
":",
"return",
"''",
"else",
":",
"return",
"split",
".",
"group",
"(",
"1",
")",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"get_grist_one",
"(",
"value",
")",
"else",
":",
"return",
"[",
"get_grist_one",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]"
] | Returns the grist of a string.
If value is a sequence, does it for every value and returns the result as a sequence. | [
"Returns",
"the",
"grist",
"of",
"a",
"string",
".",
"If",
"value",
"is",
"a",
"sequence",
"does",
"it",
"for",
"every",
"value",
"and",
"returns",
"the",
"result",
"as",
"a",
"sequence",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/utility.py#L91-L106 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/utility.py | ungrist | def ungrist (value):
""" Returns the value without grist.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
assert is_iterable_typed(value, basestring) or isinstance(value, basestring)
def ungrist_one (value):
stripped = __re_grist_content.match (value)
if not stripped:
raise BaseException ("in ungrist: '%s' is not of the form <.*>" % value)
return stripped.group (1)
if isinstance (value, str):
return ungrist_one (value)
else:
return [ ungrist_one (v) for v in value ] | python | def ungrist (value):
""" Returns the value without grist.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
assert is_iterable_typed(value, basestring) or isinstance(value, basestring)
def ungrist_one (value):
stripped = __re_grist_content.match (value)
if not stripped:
raise BaseException ("in ungrist: '%s' is not of the form <.*>" % value)
return stripped.group (1)
if isinstance (value, str):
return ungrist_one (value)
else:
return [ ungrist_one (v) for v in value ] | [
"def",
"ungrist",
"(",
"value",
")",
":",
"assert",
"is_iterable_typed",
"(",
"value",
",",
"basestring",
")",
"or",
"isinstance",
"(",
"value",
",",
"basestring",
")",
"def",
"ungrist_one",
"(",
"value",
")",
":",
"stripped",
"=",
"__re_grist_content",
".",
"match",
"(",
"value",
")",
"if",
"not",
"stripped",
":",
"raise",
"BaseException",
"(",
"\"in ungrist: '%s' is not of the form <.*>\"",
"%",
"value",
")",
"return",
"stripped",
".",
"group",
"(",
"1",
")",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"ungrist_one",
"(",
"value",
")",
"else",
":",
"return",
"[",
"ungrist_one",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]"
] | Returns the value without grist.
If value is a sequence, does it for every value and returns the result as a sequence. | [
"Returns",
"the",
"value",
"without",
"grist",
".",
"If",
"value",
"is",
"a",
"sequence",
"does",
"it",
"for",
"every",
"value",
"and",
"returns",
"the",
"result",
"as",
"a",
"sequence",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/utility.py#L108-L123 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/utility.py | replace_suffix | def replace_suffix (name, new_suffix):
""" Replaces the suffix of name by new_suffix.
If no suffix exists, the new one is added.
"""
assert isinstance(name, basestring)
assert isinstance(new_suffix, basestring)
split = os.path.splitext (name)
return split [0] + new_suffix | python | def replace_suffix (name, new_suffix):
""" Replaces the suffix of name by new_suffix.
If no suffix exists, the new one is added.
"""
assert isinstance(name, basestring)
assert isinstance(new_suffix, basestring)
split = os.path.splitext (name)
return split [0] + new_suffix | [
"def",
"replace_suffix",
"(",
"name",
",",
"new_suffix",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"new_suffix",
",",
"basestring",
")",
"split",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"name",
")",
"return",
"split",
"[",
"0",
"]",
"+",
"new_suffix"
] | Replaces the suffix of name by new_suffix.
If no suffix exists, the new one is added. | [
"Replaces",
"the",
"suffix",
"of",
"name",
"by",
"new_suffix",
".",
"If",
"no",
"suffix",
"exists",
"the",
"new",
"one",
"is",
"added",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/utility.py#L125-L132 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/utility.py | split_action_id | def split_action_id (id):
""" Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++')
"""
assert isinstance(id, basestring)
split = id.split ('.', 1)
toolset = split [0]
name = ''
if len (split) > 1:
name = split [1]
return (toolset, name) | python | def split_action_id (id):
""" Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++')
"""
assert isinstance(id, basestring)
split = id.split ('.', 1)
toolset = split [0]
name = ''
if len (split) > 1:
name = split [1]
return (toolset, name) | [
"def",
"split_action_id",
"(",
"id",
")",
":",
"assert",
"isinstance",
"(",
"id",
",",
"basestring",
")",
"split",
"=",
"id",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"toolset",
"=",
"split",
"[",
"0",
"]",
"name",
"=",
"''",
"if",
"len",
"(",
"split",
")",
">",
"1",
":",
"name",
"=",
"split",
"[",
"1",
"]",
"return",
"(",
"toolset",
",",
"name",
")"
] | Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++') | [
"Splits",
"an",
"id",
"in",
"the",
"toolset",
"and",
"specific",
"rule",
"parts",
".",
"E",
".",
"g",
".",
"gcc",
".",
"compile",
".",
"c",
"++",
"returns",
"(",
"gcc",
"compile",
".",
"c",
"++",
")"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/utility.py#L141-L151 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/utility.py | on_windows | def on_windows ():
""" Returns true if running on windows, whether in cygwin or not.
"""
if bjam.variable("NT"):
return True
elif bjam.variable("UNIX"):
uname = bjam.variable("JAMUNAME")
if uname and uname[0].startswith("CYGWIN"):
return True
return False | python | def on_windows ():
""" Returns true if running on windows, whether in cygwin or not.
"""
if bjam.variable("NT"):
return True
elif bjam.variable("UNIX"):
uname = bjam.variable("JAMUNAME")
if uname and uname[0].startswith("CYGWIN"):
return True
return False | [
"def",
"on_windows",
"(",
")",
":",
"if",
"bjam",
".",
"variable",
"(",
"\"NT\"",
")",
":",
"return",
"True",
"elif",
"bjam",
".",
"variable",
"(",
"\"UNIX\"",
")",
":",
"uname",
"=",
"bjam",
".",
"variable",
"(",
"\"JAMUNAME\"",
")",
"if",
"uname",
"and",
"uname",
"[",
"0",
"]",
".",
"startswith",
"(",
"\"CYGWIN\"",
")",
":",
"return",
"True",
"return",
"False"
] | Returns true if running on windows, whether in cygwin or not. | [
"Returns",
"true",
"if",
"running",
"on",
"windows",
"whether",
"in",
"cygwin",
"or",
"not",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/utility.py#L164-L176 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/clustering/kmeans.py | _validate_dataset | def _validate_dataset(dataset):
"""
Validate the main Kmeans dataset.
Parameters
----------
dataset: SFrame
Input dataset.
"""
if not (isinstance(dataset, _SFrame)):
raise TypeError("Input 'dataset' must be an SFrame.")
if dataset.num_rows() == 0 or dataset.num_columns() == 0:
raise ValueError("Input 'dataset' has no data.") | python | def _validate_dataset(dataset):
"""
Validate the main Kmeans dataset.
Parameters
----------
dataset: SFrame
Input dataset.
"""
if not (isinstance(dataset, _SFrame)):
raise TypeError("Input 'dataset' must be an SFrame.")
if dataset.num_rows() == 0 or dataset.num_columns() == 0:
raise ValueError("Input 'dataset' has no data.") | [
"def",
"_validate_dataset",
"(",
"dataset",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"dataset",
",",
"_SFrame",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Input 'dataset' must be an SFrame.\"",
")",
"if",
"dataset",
".",
"num_rows",
"(",
")",
"==",
"0",
"or",
"dataset",
".",
"num_columns",
"(",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Input 'dataset' has no data.\"",
")"
] | Validate the main Kmeans dataset.
Parameters
----------
dataset: SFrame
Input dataset. | [
"Validate",
"the",
"main",
"Kmeans",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/clustering/kmeans.py#L25-L38 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/clustering/kmeans.py | _validate_initial_centers | def _validate_initial_centers(initial_centers):
"""
Validate the initial centers.
Parameters
----------
initial_centers : SFrame
Initial cluster center locations, in SFrame form.
"""
if not (isinstance(initial_centers, _SFrame)):
raise TypeError("Input 'initial_centers' must be an SFrame.")
if initial_centers.num_rows() == 0 or initial_centers.num_columns() == 0:
raise ValueError("An 'initial_centers' argument is provided " +
"but has no data.") | python | def _validate_initial_centers(initial_centers):
"""
Validate the initial centers.
Parameters
----------
initial_centers : SFrame
Initial cluster center locations, in SFrame form.
"""
if not (isinstance(initial_centers, _SFrame)):
raise TypeError("Input 'initial_centers' must be an SFrame.")
if initial_centers.num_rows() == 0 or initial_centers.num_columns() == 0:
raise ValueError("An 'initial_centers' argument is provided " +
"but has no data.") | [
"def",
"_validate_initial_centers",
"(",
"initial_centers",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"initial_centers",
",",
"_SFrame",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Input 'initial_centers' must be an SFrame.\"",
")",
"if",
"initial_centers",
".",
"num_rows",
"(",
")",
"==",
"0",
"or",
"initial_centers",
".",
"num_columns",
"(",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"An 'initial_centers' argument is provided \"",
"+",
"\"but has no data.\"",
")"
] | Validate the initial centers.
Parameters
----------
initial_centers : SFrame
Initial cluster center locations, in SFrame form. | [
"Validate",
"the",
"initial",
"centers",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/clustering/kmeans.py#L41-L55 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/clustering/kmeans.py | _validate_num_clusters | def _validate_num_clusters(num_clusters, initial_centers, num_rows):
"""
Validate the combination of the `num_clusters` and `initial_centers`
parameters in the Kmeans model create function. If the combination is
valid, determine and return the correct number of clusters.
Parameters
----------
num_clusters : int
Specified number of clusters.
initial_centers : SFrame
Specified initial cluster center locations, in SFrame form. If the
number of rows in this SFrame does not match `num_clusters`, there is a
problem.
num_rows : int
Number of rows in the input dataset.
Returns
-------
_num_clusters : int
The correct number of clusters to use going forward
"""
## Basic validation
if num_clusters is not None and not isinstance(num_clusters, int):
raise _ToolkitError("Parameter 'num_clusters' must be an integer.")
## Determine the correct number of clusters.
if initial_centers is None:
if num_clusters is None:
raise ValueError("Number of clusters cannot be determined from " +
"'num_clusters' or 'initial_centers'. You must " +
"specify one of these arguments.")
else:
_num_clusters = num_clusters
else:
num_centers = initial_centers.num_rows()
if num_clusters is None:
_num_clusters = num_centers
else:
if num_clusters != num_centers:
raise ValueError("The value of 'num_clusters' does not match " +
"the number of provided initial centers. " +
"Please provide only one of these arguments " +
"or ensure the values match.")
else:
_num_clusters = num_clusters
if _num_clusters > num_rows:
raise ValueError("The desired number of clusters exceeds the number " +
"of data points. Please set 'num_clusters' to be " +
"smaller than the number of data points.")
return _num_clusters | python | def _validate_num_clusters(num_clusters, initial_centers, num_rows):
"""
Validate the combination of the `num_clusters` and `initial_centers`
parameters in the Kmeans model create function. If the combination is
valid, determine and return the correct number of clusters.
Parameters
----------
num_clusters : int
Specified number of clusters.
initial_centers : SFrame
Specified initial cluster center locations, in SFrame form. If the
number of rows in this SFrame does not match `num_clusters`, there is a
problem.
num_rows : int
Number of rows in the input dataset.
Returns
-------
_num_clusters : int
The correct number of clusters to use going forward
"""
## Basic validation
if num_clusters is not None and not isinstance(num_clusters, int):
raise _ToolkitError("Parameter 'num_clusters' must be an integer.")
## Determine the correct number of clusters.
if initial_centers is None:
if num_clusters is None:
raise ValueError("Number of clusters cannot be determined from " +
"'num_clusters' or 'initial_centers'. You must " +
"specify one of these arguments.")
else:
_num_clusters = num_clusters
else:
num_centers = initial_centers.num_rows()
if num_clusters is None:
_num_clusters = num_centers
else:
if num_clusters != num_centers:
raise ValueError("The value of 'num_clusters' does not match " +
"the number of provided initial centers. " +
"Please provide only one of these arguments " +
"or ensure the values match.")
else:
_num_clusters = num_clusters
if _num_clusters > num_rows:
raise ValueError("The desired number of clusters exceeds the number " +
"of data points. Please set 'num_clusters' to be " +
"smaller than the number of data points.")
return _num_clusters | [
"def",
"_validate_num_clusters",
"(",
"num_clusters",
",",
"initial_centers",
",",
"num_rows",
")",
":",
"## Basic validation",
"if",
"num_clusters",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"num_clusters",
",",
"int",
")",
":",
"raise",
"_ToolkitError",
"(",
"\"Parameter 'num_clusters' must be an integer.\"",
")",
"## Determine the correct number of clusters.",
"if",
"initial_centers",
"is",
"None",
":",
"if",
"num_clusters",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Number of clusters cannot be determined from \"",
"+",
"\"'num_clusters' or 'initial_centers'. You must \"",
"+",
"\"specify one of these arguments.\"",
")",
"else",
":",
"_num_clusters",
"=",
"num_clusters",
"else",
":",
"num_centers",
"=",
"initial_centers",
".",
"num_rows",
"(",
")",
"if",
"num_clusters",
"is",
"None",
":",
"_num_clusters",
"=",
"num_centers",
"else",
":",
"if",
"num_clusters",
"!=",
"num_centers",
":",
"raise",
"ValueError",
"(",
"\"The value of 'num_clusters' does not match \"",
"+",
"\"the number of provided initial centers. \"",
"+",
"\"Please provide only one of these arguments \"",
"+",
"\"or ensure the values match.\"",
")",
"else",
":",
"_num_clusters",
"=",
"num_clusters",
"if",
"_num_clusters",
">",
"num_rows",
":",
"raise",
"ValueError",
"(",
"\"The desired number of clusters exceeds the number \"",
"+",
"\"of data points. Please set 'num_clusters' to be \"",
"+",
"\"smaller than the number of data points.\"",
")",
"return",
"_num_clusters"
] | Validate the combination of the `num_clusters` and `initial_centers`
parameters in the Kmeans model create function. If the combination is
valid, determine and return the correct number of clusters.
Parameters
----------
num_clusters : int
Specified number of clusters.
initial_centers : SFrame
Specified initial cluster center locations, in SFrame form. If the
number of rows in this SFrame does not match `num_clusters`, there is a
problem.
num_rows : int
Number of rows in the input dataset.
Returns
-------
_num_clusters : int
The correct number of clusters to use going forward | [
"Validate",
"the",
"combination",
"of",
"the",
"num_clusters",
"and",
"initial_centers",
"parameters",
"in",
"the",
"Kmeans",
"model",
"create",
"function",
".",
"If",
"the",
"combination",
"is",
"valid",
"determine",
"and",
"return",
"the",
"correct",
"number",
"of",
"clusters",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/clustering/kmeans.py#L58-L115 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/clustering/kmeans.py | _validate_features | def _validate_features(features, column_type_map, valid_types, label):
"""
Identify the subset of desired `features` that are valid for the Kmeans
model. A warning is emitted for each feature that is excluded.
Parameters
----------
features : list[str]
Desired feature names.
column_type_map : dict[str, type]
Dictionary mapping each column name to the type of values in the
column.
valid_types : list[type]
Exclude features whose type is not in this list.
label : str
Name of the row label column.
Returns
-------
valid_features : list[str]
Names of features to include in the model.
"""
if not isinstance(features, list):
raise TypeError("Input 'features' must be a list, if specified.")
if len(features) == 0:
raise ValueError("If specified, input 'features' must contain " +
"at least one column name.")
## Remove duplicates
num_original_features = len(features)
features = set(features)
if len(features) < num_original_features:
_logging.warning("Duplicates have been removed from the list of features")
## Remove the row label
if label in features:
features.remove(label)
_logging.warning("The row label has been removed from the list of features.")
## Check the type of each feature against the list of valid types
valid_features = []
for ftr in features:
if not isinstance(ftr, str):
_logging.warning("Feature '{}' excluded. ".format(ftr) +
"Features must be specified as strings " +
"corresponding to column names in the input dataset.")
elif ftr not in column_type_map.keys():
_logging.warning("Feature '{}' excluded because ".format(ftr) +
"it is not in the input dataset.")
elif column_type_map[ftr] not in valid_types:
_logging.warning("Feature '{}' excluded because of its type. ".format(ftr) +
"Kmeans features must be int, float, dict, or array.array type.")
else:
valid_features.append(ftr)
if len(valid_features) == 0:
raise _ToolkitError("All specified features have been excluded. " +
"Please specify valid features.")
return valid_features | python | def _validate_features(features, column_type_map, valid_types, label):
"""
Identify the subset of desired `features` that are valid for the Kmeans
model. A warning is emitted for each feature that is excluded.
Parameters
----------
features : list[str]
Desired feature names.
column_type_map : dict[str, type]
Dictionary mapping each column name to the type of values in the
column.
valid_types : list[type]
Exclude features whose type is not in this list.
label : str
Name of the row label column.
Returns
-------
valid_features : list[str]
Names of features to include in the model.
"""
if not isinstance(features, list):
raise TypeError("Input 'features' must be a list, if specified.")
if len(features) == 0:
raise ValueError("If specified, input 'features' must contain " +
"at least one column name.")
## Remove duplicates
num_original_features = len(features)
features = set(features)
if len(features) < num_original_features:
_logging.warning("Duplicates have been removed from the list of features")
## Remove the row label
if label in features:
features.remove(label)
_logging.warning("The row label has been removed from the list of features.")
## Check the type of each feature against the list of valid types
valid_features = []
for ftr in features:
if not isinstance(ftr, str):
_logging.warning("Feature '{}' excluded. ".format(ftr) +
"Features must be specified as strings " +
"corresponding to column names in the input dataset.")
elif ftr not in column_type_map.keys():
_logging.warning("Feature '{}' excluded because ".format(ftr) +
"it is not in the input dataset.")
elif column_type_map[ftr] not in valid_types:
_logging.warning("Feature '{}' excluded because of its type. ".format(ftr) +
"Kmeans features must be int, float, dict, or array.array type.")
else:
valid_features.append(ftr)
if len(valid_features) == 0:
raise _ToolkitError("All specified features have been excluded. " +
"Please specify valid features.")
return valid_features | [
"def",
"_validate_features",
"(",
"features",
",",
"column_type_map",
",",
"valid_types",
",",
"label",
")",
":",
"if",
"not",
"isinstance",
"(",
"features",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"Input 'features' must be a list, if specified.\"",
")",
"if",
"len",
"(",
"features",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"If specified, input 'features' must contain \"",
"+",
"\"at least one column name.\"",
")",
"## Remove duplicates",
"num_original_features",
"=",
"len",
"(",
"features",
")",
"features",
"=",
"set",
"(",
"features",
")",
"if",
"len",
"(",
"features",
")",
"<",
"num_original_features",
":",
"_logging",
".",
"warning",
"(",
"\"Duplicates have been removed from the list of features\"",
")",
"## Remove the row label",
"if",
"label",
"in",
"features",
":",
"features",
".",
"remove",
"(",
"label",
")",
"_logging",
".",
"warning",
"(",
"\"The row label has been removed from the list of features.\"",
")",
"## Check the type of each feature against the list of valid types",
"valid_features",
"=",
"[",
"]",
"for",
"ftr",
"in",
"features",
":",
"if",
"not",
"isinstance",
"(",
"ftr",
",",
"str",
")",
":",
"_logging",
".",
"warning",
"(",
"\"Feature '{}' excluded. \"",
".",
"format",
"(",
"ftr",
")",
"+",
"\"Features must be specified as strings \"",
"+",
"\"corresponding to column names in the input dataset.\"",
")",
"elif",
"ftr",
"not",
"in",
"column_type_map",
".",
"keys",
"(",
")",
":",
"_logging",
".",
"warning",
"(",
"\"Feature '{}' excluded because \"",
".",
"format",
"(",
"ftr",
")",
"+",
"\"it is not in the input dataset.\"",
")",
"elif",
"column_type_map",
"[",
"ftr",
"]",
"not",
"in",
"valid_types",
":",
"_logging",
".",
"warning",
"(",
"\"Feature '{}' excluded because of its type. \"",
".",
"format",
"(",
"ftr",
")",
"+",
"\"Kmeans features must be int, float, dict, or array.array type.\"",
")",
"else",
":",
"valid_features",
".",
"append",
"(",
"ftr",
")",
"if",
"len",
"(",
"valid_features",
")",
"==",
"0",
":",
"raise",
"_ToolkitError",
"(",
"\"All specified features have been excluded. \"",
"+",
"\"Please specify valid features.\"",
")",
"return",
"valid_features"
] | Identify the subset of desired `features` that are valid for the Kmeans
model. A warning is emitted for each feature that is excluded.
Parameters
----------
features : list[str]
Desired feature names.
column_type_map : dict[str, type]
Dictionary mapping each column name to the type of values in the
column.
valid_types : list[type]
Exclude features whose type is not in this list.
label : str
Name of the row label column.
Returns
-------
valid_features : list[str]
Names of features to include in the model. | [
"Identify",
"the",
"subset",
"of",
"desired",
"features",
"that",
"are",
"valid",
"for",
"the",
"Kmeans",
"model",
".",
"A",
"warning",
"is",
"emitted",
"for",
"each",
"feature",
"that",
"is",
"excluded",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/clustering/kmeans.py#L118-L186 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/clustering/kmeans.py | create | def create(dataset, num_clusters=None, features=None, label=None,
initial_centers=None, max_iterations=10, batch_size=None,
verbose=True):
"""
Create a k-means clustering model. The KmeansModel object contains the
computed cluster centers and the cluster assignment for each instance in
the input 'dataset'.
Given a number of clusters, k-means iteratively chooses the best cluster
centers and assigns nearby points to the best cluster. If no points change
cluster membership between iterations, the algorithm terminates.
Parameters
----------
dataset : SFrame
Each row in the SFrame is an observation.
num_clusters : int
Number of clusters. This is the 'k' in k-means.
features : list[str], optional
Names of feature columns to use in computing distances between
observations and cluster centers. 'None' (the default) indicates that
all columns should be used as features. Columns may be of the following
types:
- *Numeric*: values of numeric type integer or float.
- *Array*: list of numeric (int or float) values. Each list element
is treated as a distinct feature in the model.
- *Dict*: dictionary of keys mapped to numeric values. Each unique key
is treated as a distinct feature in the model.
Note that columns of type *list* are not supported. Convert them to
array columns if all entries in the list are of numeric types.
label : str, optional
Name of the column to use as row labels in the Kmeans output. The
values in this column must be integers or strings. If not specified,
row numbers are used by default.
initial_centers : SFrame, optional
Initial centers to use when starting the K-means algorithm. If
specified, this parameter overrides the *num_clusters* parameter. The
'initial_centers' SFrame must contain the same features used in the
input 'dataset'.
If not specified (the default), initial centers are chosen
intelligently with the K-means++ algorithm.
max_iterations : int, optional
The maximum number of iterations to run. Prints a warning if the
algorithm does not converge after max_iterations iterations. If set to
0, the model returns clusters defined by the initial centers and
assignments to those centers.
batch_size : int, optional
Number of randomly-chosen data points to use in each iteration. If
'None' (the default) or greater than the number of rows in 'dataset',
then this parameter is ignored: all rows of `dataset` are used in each
iteration and model training terminates once point assignments stop
changing or `max_iterations` is reached.
verbose : bool, optional
If True, print model training progress to the screen.
Returns
-------
out : KmeansModel
A Model object containing a cluster id for each vertex, and the centers
of the clusters.
See Also
--------
KmeansModel
Notes
-----
- Integer features in the 'dataset' or 'initial_centers' inputs are
converted internally to float type, and the corresponding features in the
output centers are float-typed.
- It can be important for the K-means model to standardize the features so
they have the same scale. This function does *not* standardize
automatically.
References
----------
- `Wikipedia - k-means clustering
<http://en.wikipedia.org/wiki/K-means_clustering>`_
- Artuhur, D. and Vassilvitskii, S. (2007) `k-means++: The Advantages of
Careful Seeding <http://ilpubs.stanford.edu:8090/778/1/2006-13.pdf>`_. In
Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete
Algorithms. pp. 1027-1035.
- Elkan, C. (2003) `Using the triangle inequality to accelerate k-means
<http://www.aaai.org/Papers/ICML/2003/ICML03-022.pdf>`_. In Proceedings
of the Twentieth International Conference on Machine Learning, Volume 3,
pp. 147-153.
- Sculley, D. (2010) `Web Scale K-Means Clustering
<http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf>`_. In
Proceedings of the 19th International Conference on World Wide Web. pp.
1177-1178
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.kmeans.create(sf, num_clusters=3)
"""
opts = {'model_name': 'kmeans',
'max_iterations': max_iterations,
}
## Validate the input dataset and initial centers.
_validate_dataset(dataset)
if initial_centers is not None:
_validate_initial_centers(initial_centers)
## Validate and determine the correct number of clusters.
opts['num_clusters'] = _validate_num_clusters(num_clusters,
initial_centers,
dataset.num_rows())
## Validate the row label
col_type_map = {c: dataset[c].dtype for c in dataset.column_names()}
if label is not None:
_validate_row_label(label, col_type_map)
if label in ['cluster_id', 'distance']:
raise ValueError("Row label column name cannot be 'cluster_id' " +
"or 'distance'; these are reserved for other " +
"columns in the Kmeans model's output.")
opts['row_labels'] = dataset[label]
opts['row_label_name'] = label
else:
opts['row_labels'] = _tc.SArray.from_sequence(dataset.num_rows())
opts['row_label_name'] = 'row_id'
## Validate the features relative to the input dataset.
if features is None:
features = dataset.column_names()
valid_features = _validate_features(features, col_type_map,
valid_types=[_array, dict, int, float],
label=label)
sf_features = dataset.select_columns(valid_features)
opts['features'] = sf_features
## Validate the features in the initial centers (if provided)
if initial_centers is not None:
try:
initial_centers = initial_centers.select_columns(valid_features)
except:
raise ValueError("Specified features cannot be extracted from " +
"the provided initial centers.")
if initial_centers.column_types() != sf_features.column_types():
raise TypeError("Feature types are different in the dataset and " +
"initial centers.")
else:
initial_centers = _tc.SFrame()
opts['initial_centers'] = initial_centers
## Validate the batch size and determine the training method.
if batch_size is None:
opts['method'] = 'elkan'
opts['batch_size'] = dataset.num_rows()
else:
opts['method'] = 'minibatch'
opts['batch_size'] = batch_size
## Create and return the model
with _QuietProgress(verbose):
params = _tc.extensions._kmeans.train(opts)
return KmeansModel(params['model']) | python | def create(dataset, num_clusters=None, features=None, label=None,
initial_centers=None, max_iterations=10, batch_size=None,
verbose=True):
"""
Create a k-means clustering model. The KmeansModel object contains the
computed cluster centers and the cluster assignment for each instance in
the input 'dataset'.
Given a number of clusters, k-means iteratively chooses the best cluster
centers and assigns nearby points to the best cluster. If no points change
cluster membership between iterations, the algorithm terminates.
Parameters
----------
dataset : SFrame
Each row in the SFrame is an observation.
num_clusters : int
Number of clusters. This is the 'k' in k-means.
features : list[str], optional
Names of feature columns to use in computing distances between
observations and cluster centers. 'None' (the default) indicates that
all columns should be used as features. Columns may be of the following
types:
- *Numeric*: values of numeric type integer or float.
- *Array*: list of numeric (int or float) values. Each list element
is treated as a distinct feature in the model.
- *Dict*: dictionary of keys mapped to numeric values. Each unique key
is treated as a distinct feature in the model.
Note that columns of type *list* are not supported. Convert them to
array columns if all entries in the list are of numeric types.
label : str, optional
Name of the column to use as row labels in the Kmeans output. The
values in this column must be integers or strings. If not specified,
row numbers are used by default.
initial_centers : SFrame, optional
Initial centers to use when starting the K-means algorithm. If
specified, this parameter overrides the *num_clusters* parameter. The
'initial_centers' SFrame must contain the same features used in the
input 'dataset'.
If not specified (the default), initial centers are chosen
intelligently with the K-means++ algorithm.
max_iterations : int, optional
The maximum number of iterations to run. Prints a warning if the
algorithm does not converge after max_iterations iterations. If set to
0, the model returns clusters defined by the initial centers and
assignments to those centers.
batch_size : int, optional
Number of randomly-chosen data points to use in each iteration. If
'None' (the default) or greater than the number of rows in 'dataset',
then this parameter is ignored: all rows of `dataset` are used in each
iteration and model training terminates once point assignments stop
changing or `max_iterations` is reached.
verbose : bool, optional
If True, print model training progress to the screen.
Returns
-------
out : KmeansModel
A Model object containing a cluster id for each vertex, and the centers
of the clusters.
See Also
--------
KmeansModel
Notes
-----
- Integer features in the 'dataset' or 'initial_centers' inputs are
converted internally to float type, and the corresponding features in the
output centers are float-typed.
- It can be important for the K-means model to standardize the features so
they have the same scale. This function does *not* standardize
automatically.
References
----------
- `Wikipedia - k-means clustering
<http://en.wikipedia.org/wiki/K-means_clustering>`_
- Artuhur, D. and Vassilvitskii, S. (2007) `k-means++: The Advantages of
Careful Seeding <http://ilpubs.stanford.edu:8090/778/1/2006-13.pdf>`_. In
Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete
Algorithms. pp. 1027-1035.
- Elkan, C. (2003) `Using the triangle inequality to accelerate k-means
<http://www.aaai.org/Papers/ICML/2003/ICML03-022.pdf>`_. In Proceedings
of the Twentieth International Conference on Machine Learning, Volume 3,
pp. 147-153.
- Sculley, D. (2010) `Web Scale K-Means Clustering
<http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf>`_. In
Proceedings of the 19th International Conference on World Wide Web. pp.
1177-1178
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.kmeans.create(sf, num_clusters=3)
"""
opts = {'model_name': 'kmeans',
'max_iterations': max_iterations,
}
## Validate the input dataset and initial centers.
_validate_dataset(dataset)
if initial_centers is not None:
_validate_initial_centers(initial_centers)
## Validate and determine the correct number of clusters.
opts['num_clusters'] = _validate_num_clusters(num_clusters,
initial_centers,
dataset.num_rows())
## Validate the row label
col_type_map = {c: dataset[c].dtype for c in dataset.column_names()}
if label is not None:
_validate_row_label(label, col_type_map)
if label in ['cluster_id', 'distance']:
raise ValueError("Row label column name cannot be 'cluster_id' " +
"or 'distance'; these are reserved for other " +
"columns in the Kmeans model's output.")
opts['row_labels'] = dataset[label]
opts['row_label_name'] = label
else:
opts['row_labels'] = _tc.SArray.from_sequence(dataset.num_rows())
opts['row_label_name'] = 'row_id'
## Validate the features relative to the input dataset.
if features is None:
features = dataset.column_names()
valid_features = _validate_features(features, col_type_map,
valid_types=[_array, dict, int, float],
label=label)
sf_features = dataset.select_columns(valid_features)
opts['features'] = sf_features
## Validate the features in the initial centers (if provided)
if initial_centers is not None:
try:
initial_centers = initial_centers.select_columns(valid_features)
except:
raise ValueError("Specified features cannot be extracted from " +
"the provided initial centers.")
if initial_centers.column_types() != sf_features.column_types():
raise TypeError("Feature types are different in the dataset and " +
"initial centers.")
else:
initial_centers = _tc.SFrame()
opts['initial_centers'] = initial_centers
## Validate the batch size and determine the training method.
if batch_size is None:
opts['method'] = 'elkan'
opts['batch_size'] = dataset.num_rows()
else:
opts['method'] = 'minibatch'
opts['batch_size'] = batch_size
## Create and return the model
with _QuietProgress(verbose):
params = _tc.extensions._kmeans.train(opts)
return KmeansModel(params['model']) | [
"def",
"create",
"(",
"dataset",
",",
"num_clusters",
"=",
"None",
",",
"features",
"=",
"None",
",",
"label",
"=",
"None",
",",
"initial_centers",
"=",
"None",
",",
"max_iterations",
"=",
"10",
",",
"batch_size",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"opts",
"=",
"{",
"'model_name'",
":",
"'kmeans'",
",",
"'max_iterations'",
":",
"max_iterations",
",",
"}",
"## Validate the input dataset and initial centers.",
"_validate_dataset",
"(",
"dataset",
")",
"if",
"initial_centers",
"is",
"not",
"None",
":",
"_validate_initial_centers",
"(",
"initial_centers",
")",
"## Validate and determine the correct number of clusters.",
"opts",
"[",
"'num_clusters'",
"]",
"=",
"_validate_num_clusters",
"(",
"num_clusters",
",",
"initial_centers",
",",
"dataset",
".",
"num_rows",
"(",
")",
")",
"## Validate the row label",
"col_type_map",
"=",
"{",
"c",
":",
"dataset",
"[",
"c",
"]",
".",
"dtype",
"for",
"c",
"in",
"dataset",
".",
"column_names",
"(",
")",
"}",
"if",
"label",
"is",
"not",
"None",
":",
"_validate_row_label",
"(",
"label",
",",
"col_type_map",
")",
"if",
"label",
"in",
"[",
"'cluster_id'",
",",
"'distance'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Row label column name cannot be 'cluster_id' \"",
"+",
"\"or 'distance'; these are reserved for other \"",
"+",
"\"columns in the Kmeans model's output.\"",
")",
"opts",
"[",
"'row_labels'",
"]",
"=",
"dataset",
"[",
"label",
"]",
"opts",
"[",
"'row_label_name'",
"]",
"=",
"label",
"else",
":",
"opts",
"[",
"'row_labels'",
"]",
"=",
"_tc",
".",
"SArray",
".",
"from_sequence",
"(",
"dataset",
".",
"num_rows",
"(",
")",
")",
"opts",
"[",
"'row_label_name'",
"]",
"=",
"'row_id'",
"## Validate the features relative to the input dataset.",
"if",
"features",
"is",
"None",
":",
"features",
"=",
"dataset",
".",
"column_names",
"(",
")",
"valid_features",
"=",
"_validate_features",
"(",
"features",
",",
"col_type_map",
",",
"valid_types",
"=",
"[",
"_array",
",",
"dict",
",",
"int",
",",
"float",
"]",
",",
"label",
"=",
"label",
")",
"sf_features",
"=",
"dataset",
".",
"select_columns",
"(",
"valid_features",
")",
"opts",
"[",
"'features'",
"]",
"=",
"sf_features",
"## Validate the features in the initial centers (if provided)",
"if",
"initial_centers",
"is",
"not",
"None",
":",
"try",
":",
"initial_centers",
"=",
"initial_centers",
".",
"select_columns",
"(",
"valid_features",
")",
"except",
":",
"raise",
"ValueError",
"(",
"\"Specified features cannot be extracted from \"",
"+",
"\"the provided initial centers.\"",
")",
"if",
"initial_centers",
".",
"column_types",
"(",
")",
"!=",
"sf_features",
".",
"column_types",
"(",
")",
":",
"raise",
"TypeError",
"(",
"\"Feature types are different in the dataset and \"",
"+",
"\"initial centers.\"",
")",
"else",
":",
"initial_centers",
"=",
"_tc",
".",
"SFrame",
"(",
")",
"opts",
"[",
"'initial_centers'",
"]",
"=",
"initial_centers",
"## Validate the batch size and determine the training method.",
"if",
"batch_size",
"is",
"None",
":",
"opts",
"[",
"'method'",
"]",
"=",
"'elkan'",
"opts",
"[",
"'batch_size'",
"]",
"=",
"dataset",
".",
"num_rows",
"(",
")",
"else",
":",
"opts",
"[",
"'method'",
"]",
"=",
"'minibatch'",
"opts",
"[",
"'batch_size'",
"]",
"=",
"batch_size",
"## Create and return the model",
"with",
"_QuietProgress",
"(",
"verbose",
")",
":",
"params",
"=",
"_tc",
".",
"extensions",
".",
"_kmeans",
".",
"train",
"(",
"opts",
")",
"return",
"KmeansModel",
"(",
"params",
"[",
"'model'",
"]",
")"
] | Create a k-means clustering model. The KmeansModel object contains the
computed cluster centers and the cluster assignment for each instance in
the input 'dataset'.
Given a number of clusters, k-means iteratively chooses the best cluster
centers and assigns nearby points to the best cluster. If no points change
cluster membership between iterations, the algorithm terminates.
Parameters
----------
dataset : SFrame
Each row in the SFrame is an observation.
num_clusters : int
Number of clusters. This is the 'k' in k-means.
features : list[str], optional
Names of feature columns to use in computing distances between
observations and cluster centers. 'None' (the default) indicates that
all columns should be used as features. Columns may be of the following
types:
- *Numeric*: values of numeric type integer or float.
- *Array*: list of numeric (int or float) values. Each list element
is treated as a distinct feature in the model.
- *Dict*: dictionary of keys mapped to numeric values. Each unique key
is treated as a distinct feature in the model.
Note that columns of type *list* are not supported. Convert them to
array columns if all entries in the list are of numeric types.
label : str, optional
Name of the column to use as row labels in the Kmeans output. The
values in this column must be integers or strings. If not specified,
row numbers are used by default.
initial_centers : SFrame, optional
Initial centers to use when starting the K-means algorithm. If
specified, this parameter overrides the *num_clusters* parameter. The
'initial_centers' SFrame must contain the same features used in the
input 'dataset'.
If not specified (the default), initial centers are chosen
intelligently with the K-means++ algorithm.
max_iterations : int, optional
The maximum number of iterations to run. Prints a warning if the
algorithm does not converge after max_iterations iterations. If set to
0, the model returns clusters defined by the initial centers and
assignments to those centers.
batch_size : int, optional
Number of randomly-chosen data points to use in each iteration. If
'None' (the default) or greater than the number of rows in 'dataset',
then this parameter is ignored: all rows of `dataset` are used in each
iteration and model training terminates once point assignments stop
changing or `max_iterations` is reached.
verbose : bool, optional
If True, print model training progress to the screen.
Returns
-------
out : KmeansModel
A Model object containing a cluster id for each vertex, and the centers
of the clusters.
See Also
--------
KmeansModel
Notes
-----
- Integer features in the 'dataset' or 'initial_centers' inputs are
converted internally to float type, and the corresponding features in the
output centers are float-typed.
- It can be important for the K-means model to standardize the features so
they have the same scale. This function does *not* standardize
automatically.
References
----------
- `Wikipedia - k-means clustering
<http://en.wikipedia.org/wiki/K-means_clustering>`_
- Artuhur, D. and Vassilvitskii, S. (2007) `k-means++: The Advantages of
Careful Seeding <http://ilpubs.stanford.edu:8090/778/1/2006-13.pdf>`_. In
Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete
Algorithms. pp. 1027-1035.
- Elkan, C. (2003) `Using the triangle inequality to accelerate k-means
<http://www.aaai.org/Papers/ICML/2003/ICML03-022.pdf>`_. In Proceedings
of the Twentieth International Conference on Machine Learning, Volume 3,
pp. 147-153.
- Sculley, D. (2010) `Web Scale K-Means Clustering
<http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf>`_. In
Proceedings of the 19th International Conference on World Wide Web. pp.
1177-1178
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.kmeans.create(sf, num_clusters=3) | [
"Create",
"a",
"k",
"-",
"means",
"clustering",
"model",
".",
"The",
"KmeansModel",
"object",
"contains",
"the",
"computed",
"cluster",
"centers",
"and",
"the",
"cluster",
"assignment",
"for",
"each",
"instance",
"in",
"the",
"input",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/clustering/kmeans.py#L410-L602 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/clustering/kmeans.py | KmeansModel.predict | def predict(self, dataset, output_type='cluster_id', verbose=True):
"""
Return predicted cluster label for instances in the new 'dataset'.
K-means predictions are made by assigning each new instance to the
closest cluster center.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include the features used for
model training; additional columns are ignored.
output_type : {'cluster_id', 'distance'}, optional
Form of the prediction. 'cluster_id' (the default) returns the
cluster label assigned to each input instance, while 'distance'
returns the Euclidean distance between the instance and its
assigned cluster's center.
verbose : bool, optional
If True, print progress updates to the screen.
Returns
-------
out : SArray
Model predictions. Depending on the specified `output_type`, either
the assigned cluster label or the distance of each point to its
closest cluster center. The order of the predictions is the same as
order of the input data rows.
See Also
--------
create
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.kmeans.create(sf, num_clusters=3)
...
>>> sf_new = turicreate.SFrame({'x1': [-5.6584, -1.0167, -9.6181],
... 'x2': [-6.3803, -3.7937, -1.1022]})
>>> clusters = model.predict(sf_new, output_type='cluster_id')
>>> print clusters
[1, 0, 1]
"""
## Validate the input dataset.
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Validate the output type.
if not isinstance(output_type, str):
raise TypeError("The 'output_type' parameter must be a string.")
if not output_type in ('cluster_id', 'distance'):
raise ValueError("The 'output_type' parameter must be either " +
"'cluster_label' or 'distance'.")
## Get model features.
ref_features = self.features
sf_features = _tkutl._toolkits_select_columns(dataset, ref_features)
## Compute predictions.
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'dataset': sf_features}
with _QuietProgress(verbose):
result = _tc.extensions._kmeans.predict(opts)
sf_result = result['predictions']
if output_type == 'distance':
return sf_result['distance']
else:
return sf_result['cluster_id'] | python | def predict(self, dataset, output_type='cluster_id', verbose=True):
"""
Return predicted cluster label for instances in the new 'dataset'.
K-means predictions are made by assigning each new instance to the
closest cluster center.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include the features used for
model training; additional columns are ignored.
output_type : {'cluster_id', 'distance'}, optional
Form of the prediction. 'cluster_id' (the default) returns the
cluster label assigned to each input instance, while 'distance'
returns the Euclidean distance between the instance and its
assigned cluster's center.
verbose : bool, optional
If True, print progress updates to the screen.
Returns
-------
out : SArray
Model predictions. Depending on the specified `output_type`, either
the assigned cluster label or the distance of each point to its
closest cluster center. The order of the predictions is the same as
order of the input data rows.
See Also
--------
create
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.kmeans.create(sf, num_clusters=3)
...
>>> sf_new = turicreate.SFrame({'x1': [-5.6584, -1.0167, -9.6181],
... 'x2': [-6.3803, -3.7937, -1.1022]})
>>> clusters = model.predict(sf_new, output_type='cluster_id')
>>> print clusters
[1, 0, 1]
"""
## Validate the input dataset.
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Validate the output type.
if not isinstance(output_type, str):
raise TypeError("The 'output_type' parameter must be a string.")
if not output_type in ('cluster_id', 'distance'):
raise ValueError("The 'output_type' parameter must be either " +
"'cluster_label' or 'distance'.")
## Get model features.
ref_features = self.features
sf_features = _tkutl._toolkits_select_columns(dataset, ref_features)
## Compute predictions.
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'dataset': sf_features}
with _QuietProgress(verbose):
result = _tc.extensions._kmeans.predict(opts)
sf_result = result['predictions']
if output_type == 'distance':
return sf_result['distance']
else:
return sf_result['cluster_id'] | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"'cluster_id'",
",",
"verbose",
"=",
"True",
")",
":",
"## Validate the input dataset.",
"_tkutl",
".",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"_tkutl",
".",
"_raise_error_if_sframe_empty",
"(",
"dataset",
",",
"\"dataset\"",
")",
"## Validate the output type.",
"if",
"not",
"isinstance",
"(",
"output_type",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"The 'output_type' parameter must be a string.\"",
")",
"if",
"not",
"output_type",
"in",
"(",
"'cluster_id'",
",",
"'distance'",
")",
":",
"raise",
"ValueError",
"(",
"\"The 'output_type' parameter must be either \"",
"+",
"\"'cluster_label' or 'distance'.\"",
")",
"## Get model features.",
"ref_features",
"=",
"self",
".",
"features",
"sf_features",
"=",
"_tkutl",
".",
"_toolkits_select_columns",
"(",
"dataset",
",",
"ref_features",
")",
"## Compute predictions.",
"opts",
"=",
"{",
"'model'",
":",
"self",
".",
"__proxy__",
",",
"'model_name'",
":",
"self",
".",
"__name__",
",",
"'dataset'",
":",
"sf_features",
"}",
"with",
"_QuietProgress",
"(",
"verbose",
")",
":",
"result",
"=",
"_tc",
".",
"extensions",
".",
"_kmeans",
".",
"predict",
"(",
"opts",
")",
"sf_result",
"=",
"result",
"[",
"'predictions'",
"]",
"if",
"output_type",
"==",
"'distance'",
":",
"return",
"sf_result",
"[",
"'distance'",
"]",
"else",
":",
"return",
"sf_result",
"[",
"'cluster_id'",
"]"
] | Return predicted cluster label for instances in the new 'dataset'.
K-means predictions are made by assigning each new instance to the
closest cluster center.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include the features used for
model training; additional columns are ignored.
output_type : {'cluster_id', 'distance'}, optional
Form of the prediction. 'cluster_id' (the default) returns the
cluster label assigned to each input instance, while 'distance'
returns the Euclidean distance between the instance and its
assigned cluster's center.
verbose : bool, optional
If True, print progress updates to the screen.
Returns
-------
out : SArray
Model predictions. Depending on the specified `output_type`, either
the assigned cluster label or the distance of each point to its
closest cluster center. The order of the predictions is the same as
order of the input data rows.
See Also
--------
create
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.kmeans.create(sf, num_clusters=3)
...
>>> sf_new = turicreate.SFrame({'x1': [-5.6584, -1.0167, -9.6181],
... 'x2': [-6.3803, -3.7937, -1.1022]})
>>> clusters = model.predict(sf_new, output_type='cluster_id')
>>> print clusters
[1, 0, 1] | [
"Return",
"predicted",
"cluster",
"label",
"for",
"instances",
"in",
"the",
"new",
"dataset",
".",
"K",
"-",
"means",
"predictions",
"are",
"made",
"by",
"assigning",
"each",
"new",
"instance",
"to",
"the",
"closest",
"cluster",
"center",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/clustering/kmeans.py#L208-L287 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/clustering/kmeans.py | KmeansModel._get | def _get(self, field):
"""
Return the value of a given field.
+-----------------------+----------------------------------------------+
| Field | Description |
+=======================+==============================================+
| batch_size | Number of randomly chosen examples to use in |
| | each training iteration. |
+-----------------------+----------------------------------------------+
| cluster_id | Cluster assignment for each data point and |
| | Euclidean distance to the cluster center |
+-----------------------+----------------------------------------------+
| cluster_info | Cluster centers, sum of squared Euclidean |
| | distances from each cluster member to the |
| | assigned center, and the number of data |
| | points belonging to the cluster |
+-----------------------+----------------------------------------------+
| features | Names of feature columns |
+-----------------------+----------------------------------------------+
| max_iterations | Maximum number of iterations to perform |
+-----------------------+----------------------------------------------+
| method | Algorithm used to train the model. |
+-----------------------+----------------------------------------------+
| num_clusters | Number of clusters |
+-----------------------+----------------------------------------------+
| num_examples | Number of examples in the dataset |
+-----------------------+----------------------------------------------+
| num_features | Number of feature columns used |
+-----------------------+----------------------------------------------+
| num_unpacked_features | Number of features unpacked from the |
| | feature columns |
+-----------------------+----------------------------------------------+
| training_iterations | Total number of iterations performed |
+-----------------------+----------------------------------------------+
| training_time | Total time taken to cluster the data |
+-----------------------+----------------------------------------------+
| unpacked_features | Names of features unpacked from the |
| | feature columns |
+-----------------------+----------------------------------------------+
Parameters
----------
field : str
The name of the field to query.
Returns
-------
out
Value of the requested field
"""
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'field': field}
response = _tc.extensions._kmeans.get_value(opts)
return response['value'] | python | def _get(self, field):
"""
Return the value of a given field.
+-----------------------+----------------------------------------------+
| Field | Description |
+=======================+==============================================+
| batch_size | Number of randomly chosen examples to use in |
| | each training iteration. |
+-----------------------+----------------------------------------------+
| cluster_id | Cluster assignment for each data point and |
| | Euclidean distance to the cluster center |
+-----------------------+----------------------------------------------+
| cluster_info | Cluster centers, sum of squared Euclidean |
| | distances from each cluster member to the |
| | assigned center, and the number of data |
| | points belonging to the cluster |
+-----------------------+----------------------------------------------+
| features | Names of feature columns |
+-----------------------+----------------------------------------------+
| max_iterations | Maximum number of iterations to perform |
+-----------------------+----------------------------------------------+
| method | Algorithm used to train the model. |
+-----------------------+----------------------------------------------+
| num_clusters | Number of clusters |
+-----------------------+----------------------------------------------+
| num_examples | Number of examples in the dataset |
+-----------------------+----------------------------------------------+
| num_features | Number of feature columns used |
+-----------------------+----------------------------------------------+
| num_unpacked_features | Number of features unpacked from the |
| | feature columns |
+-----------------------+----------------------------------------------+
| training_iterations | Total number of iterations performed |
+-----------------------+----------------------------------------------+
| training_time | Total time taken to cluster the data |
+-----------------------+----------------------------------------------+
| unpacked_features | Names of features unpacked from the |
| | feature columns |
+-----------------------+----------------------------------------------+
Parameters
----------
field : str
The name of the field to query.
Returns
-------
out
Value of the requested field
"""
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'field': field}
response = _tc.extensions._kmeans.get_value(opts)
return response['value'] | [
"def",
"_get",
"(",
"self",
",",
"field",
")",
":",
"opts",
"=",
"{",
"'model'",
":",
"self",
".",
"__proxy__",
",",
"'model_name'",
":",
"self",
".",
"__name__",
",",
"'field'",
":",
"field",
"}",
"response",
"=",
"_tc",
".",
"extensions",
".",
"_kmeans",
".",
"get_value",
"(",
"opts",
")",
"return",
"response",
"[",
"'value'",
"]"
] | Return the value of a given field.
+-----------------------+----------------------------------------------+
| Field | Description |
+=======================+==============================================+
| batch_size | Number of randomly chosen examples to use in |
| | each training iteration. |
+-----------------------+----------------------------------------------+
| cluster_id | Cluster assignment for each data point and |
| | Euclidean distance to the cluster center |
+-----------------------+----------------------------------------------+
| cluster_info | Cluster centers, sum of squared Euclidean |
| | distances from each cluster member to the |
| | assigned center, and the number of data |
| | points belonging to the cluster |
+-----------------------+----------------------------------------------+
| features | Names of feature columns |
+-----------------------+----------------------------------------------+
| max_iterations | Maximum number of iterations to perform |
+-----------------------+----------------------------------------------+
| method | Algorithm used to train the model. |
+-----------------------+----------------------------------------------+
| num_clusters | Number of clusters |
+-----------------------+----------------------------------------------+
| num_examples | Number of examples in the dataset |
+-----------------------+----------------------------------------------+
| num_features | Number of feature columns used |
+-----------------------+----------------------------------------------+
| num_unpacked_features | Number of features unpacked from the |
| | feature columns |
+-----------------------+----------------------------------------------+
| training_iterations | Total number of iterations performed |
+-----------------------+----------------------------------------------+
| training_time | Total time taken to cluster the data |
+-----------------------+----------------------------------------------+
| unpacked_features | Names of features unpacked from the |
| | feature columns |
+-----------------------+----------------------------------------------+
Parameters
----------
field : str
The name of the field to query.
Returns
-------
out
Value of the requested field | [
"Return",
"the",
"value",
"of",
"a",
"given",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/clustering/kmeans.py#L289-L345 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.