repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | implied_feature | def implied_feature (implicit_value):
""" Returns the implicit feature associated with the given implicit value.
"""
assert isinstance(implicit_value, basestring)
components = implicit_value.split('-')
if components[0] not in __implicit_features:
raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value)
return __implicit_features[components[0]] | python | def implied_feature (implicit_value):
""" Returns the implicit feature associated with the given implicit value.
"""
assert isinstance(implicit_value, basestring)
components = implicit_value.split('-')
if components[0] not in __implicit_features:
raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value)
return __implicit_features[components[0]] | [
"def",
"implied_feature",
"(",
"implicit_value",
")",
":",
"assert",
"isinstance",
"(",
"implicit_value",
",",
"basestring",
")",
"components",
"=",
"implicit_value",
".",
"split",
"(",
"'-'",
")",
"if",
"components",
"[",
"0",
"]",
"not",
"in",
"__implicit_features",
":",
"raise",
"InvalidValue",
"(",
"\"'%s' is not a value of an implicit feature\"",
"%",
"implicit_value",
")",
"return",
"__implicit_features",
"[",
"components",
"[",
"0",
"]",
"]"
] | Returns the implicit feature associated with the given implicit value. | [
"Returns",
"the",
"implicit",
"feature",
"associated",
"with",
"the",
"given",
"implicit",
"value",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L243-L252 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | validate_feature | def validate_feature (name):
""" Checks if all name is a valid feature. Otherwise, raises an exception.
"""
assert isinstance(name, basestring)
if name not in __all_features:
raise InvalidFeature ("'%s' is not a valid feature name" % name)
else:
return __all_features[name] | python | def validate_feature (name):
""" Checks if all name is a valid feature. Otherwise, raises an exception.
"""
assert isinstance(name, basestring)
if name not in __all_features:
raise InvalidFeature ("'%s' is not a valid feature name" % name)
else:
return __all_features[name] | [
"def",
"validate_feature",
"(",
"name",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"if",
"name",
"not",
"in",
"__all_features",
":",
"raise",
"InvalidFeature",
"(",
"\"'%s' is not a valid feature name\"",
"%",
"name",
")",
"else",
":",
"return",
"__all_features",
"[",
"name",
"]"
] | Checks if all name is a valid feature. Otherwise, raises an exception. | [
"Checks",
"if",
"all",
"name",
"is",
"a",
"valid",
"feature",
".",
"Otherwise",
"raises",
"an",
"exception",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L281-L288 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | __expand_subfeatures_aux | def __expand_subfeatures_aux (property_, dont_validate = False):
""" Helper for expand_subfeatures.
Given a feature and value, or just a value corresponding to an
implicit feature, returns a property set consisting of all component
subfeatures and their values. For example:
expand_subfeatures <toolset>gcc-2.95.2-linux-x86
-> <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
equivalent to:
expand_subfeatures gcc-2.95.2-linux-x86
feature: The name of the feature, or empty if value corresponds to an implicit property
value: The value of the feature.
dont_validate: If True, no validation of value string will be done.
"""
from . import property # no __debug__ since Property is used elsewhere
assert isinstance(property_, property.Property)
assert isinstance(dont_validate, int) # matches bools
f = property_.feature
v = property_.value
if not dont_validate:
validate_value_string(f, v)
components = v.split ("-")
v = components[0]
result = [property.Property(f, components[0])]
subvalues = components[1:]
while len(subvalues) > 0:
subvalue = subvalues [0] # pop the head off of subvalues
subvalues = subvalues [1:]
subfeature = __find_implied_subfeature (f, subvalue, v)
# If no subfeature was found, reconstitute the value string and use that
if not subfeature:
return [property.Property(f, '-'.join(components))]
result.append(property.Property(subfeature, subvalue))
return result | python | def __expand_subfeatures_aux (property_, dont_validate = False):
""" Helper for expand_subfeatures.
Given a feature and value, or just a value corresponding to an
implicit feature, returns a property set consisting of all component
subfeatures and their values. For example:
expand_subfeatures <toolset>gcc-2.95.2-linux-x86
-> <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
equivalent to:
expand_subfeatures gcc-2.95.2-linux-x86
feature: The name of the feature, or empty if value corresponds to an implicit property
value: The value of the feature.
dont_validate: If True, no validation of value string will be done.
"""
from . import property # no __debug__ since Property is used elsewhere
assert isinstance(property_, property.Property)
assert isinstance(dont_validate, int) # matches bools
f = property_.feature
v = property_.value
if not dont_validate:
validate_value_string(f, v)
components = v.split ("-")
v = components[0]
result = [property.Property(f, components[0])]
subvalues = components[1:]
while len(subvalues) > 0:
subvalue = subvalues [0] # pop the head off of subvalues
subvalues = subvalues [1:]
subfeature = __find_implied_subfeature (f, subvalue, v)
# If no subfeature was found, reconstitute the value string and use that
if not subfeature:
return [property.Property(f, '-'.join(components))]
result.append(property.Property(subfeature, subvalue))
return result | [
"def",
"__expand_subfeatures_aux",
"(",
"property_",
",",
"dont_validate",
"=",
"False",
")",
":",
"from",
".",
"import",
"property",
"# no __debug__ since Property is used elsewhere",
"assert",
"isinstance",
"(",
"property_",
",",
"property",
".",
"Property",
")",
"assert",
"isinstance",
"(",
"dont_validate",
",",
"int",
")",
"# matches bools",
"f",
"=",
"property_",
".",
"feature",
"v",
"=",
"property_",
".",
"value",
"if",
"not",
"dont_validate",
":",
"validate_value_string",
"(",
"f",
",",
"v",
")",
"components",
"=",
"v",
".",
"split",
"(",
"\"-\"",
")",
"v",
"=",
"components",
"[",
"0",
"]",
"result",
"=",
"[",
"property",
".",
"Property",
"(",
"f",
",",
"components",
"[",
"0",
"]",
")",
"]",
"subvalues",
"=",
"components",
"[",
"1",
":",
"]",
"while",
"len",
"(",
"subvalues",
")",
">",
"0",
":",
"subvalue",
"=",
"subvalues",
"[",
"0",
"]",
"# pop the head off of subvalues",
"subvalues",
"=",
"subvalues",
"[",
"1",
":",
"]",
"subfeature",
"=",
"__find_implied_subfeature",
"(",
"f",
",",
"subvalue",
",",
"v",
")",
"# If no subfeature was found, reconstitute the value string and use that",
"if",
"not",
"subfeature",
":",
"return",
"[",
"property",
".",
"Property",
"(",
"f",
",",
"'-'",
".",
"join",
"(",
"components",
")",
")",
"]",
"result",
".",
"append",
"(",
"property",
".",
"Property",
"(",
"subfeature",
",",
"subvalue",
")",
")",
"return",
"result"
] | Helper for expand_subfeatures.
Given a feature and value, or just a value corresponding to an
implicit feature, returns a property set consisting of all component
subfeatures and their values. For example:
expand_subfeatures <toolset>gcc-2.95.2-linux-x86
-> <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
equivalent to:
expand_subfeatures gcc-2.95.2-linux-x86
feature: The name of the feature, or empty if value corresponds to an implicit property
value: The value of the feature.
dont_validate: If True, no validation of value string will be done. | [
"Helper",
"for",
"expand_subfeatures",
".",
"Given",
"a",
"feature",
"and",
"value",
"or",
"just",
"a",
"value",
"corresponding",
"to",
"an",
"implicit",
"feature",
"returns",
"a",
"property",
"set",
"consisting",
"of",
"all",
"component",
"subfeatures",
"and",
"their",
"values",
".",
"For",
"example",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L292-L336 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | expand_subfeatures | def expand_subfeatures(properties, dont_validate = False):
"""
Make all elements of properties corresponding to implicit features
explicit, and express all subfeature values as separate properties
in their own right. For example, the property
gcc-2.95.2-linux-x86
might expand to
<toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
properties: A sequence with elements of the form
<feature>value-string or just value-string in the
case of implicit features.
: dont_validate: If True, no validation of value string will be done.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
assert isinstance(dont_validate, int) # matches bools
result = []
for p in properties:
# Don't expand subfeatures in subfeatures
if p.feature.subfeature:
result.append (p)
else:
result.extend(__expand_subfeatures_aux (p, dont_validate))
return result | python | def expand_subfeatures(properties, dont_validate = False):
"""
Make all elements of properties corresponding to implicit features
explicit, and express all subfeature values as separate properties
in their own right. For example, the property
gcc-2.95.2-linux-x86
might expand to
<toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
properties: A sequence with elements of the form
<feature>value-string or just value-string in the
case of implicit features.
: dont_validate: If True, no validation of value string will be done.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
assert isinstance(dont_validate, int) # matches bools
result = []
for p in properties:
# Don't expand subfeatures in subfeatures
if p.feature.subfeature:
result.append (p)
else:
result.extend(__expand_subfeatures_aux (p, dont_validate))
return result | [
"def",
"expand_subfeatures",
"(",
"properties",
",",
"dont_validate",
"=",
"False",
")",
":",
"if",
"__debug__",
":",
"from",
".",
"property",
"import",
"Property",
"assert",
"is_iterable_typed",
"(",
"properties",
",",
"Property",
")",
"assert",
"isinstance",
"(",
"dont_validate",
",",
"int",
")",
"# matches bools",
"result",
"=",
"[",
"]",
"for",
"p",
"in",
"properties",
":",
"# Don't expand subfeatures in subfeatures",
"if",
"p",
".",
"feature",
".",
"subfeature",
":",
"result",
".",
"append",
"(",
"p",
")",
"else",
":",
"result",
".",
"extend",
"(",
"__expand_subfeatures_aux",
"(",
"p",
",",
"dont_validate",
")",
")",
"return",
"result"
] | Make all elements of properties corresponding to implicit features
explicit, and express all subfeature values as separate properties
in their own right. For example, the property
gcc-2.95.2-linux-x86
might expand to
<toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
properties: A sequence with elements of the form
<feature>value-string or just value-string in the
case of implicit features.
: dont_validate: If True, no validation of value string will be done. | [
"Make",
"all",
"elements",
"of",
"properties",
"corresponding",
"to",
"implicit",
"features",
"explicit",
"and",
"express",
"all",
"subfeature",
"values",
"as",
"separate",
"properties",
"in",
"their",
"own",
"right",
".",
"For",
"example",
"the",
"property"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L338-L367 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | extend | def extend (name, values):
""" Adds the given values to the given feature.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(values, basestring)
name = add_grist (name)
__validate_feature (name)
feature = __all_features [name]
if feature.implicit:
for v in values:
if v in __implicit_features:
raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v]))
__implicit_features[v] = feature
if values and not feature.values and not(feature.free or feature.optional):
# This is the first value specified for this feature,
# take it as default value
feature.set_default(values[0])
feature.add_values(values) | python | def extend (name, values):
""" Adds the given values to the given feature.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(values, basestring)
name = add_grist (name)
__validate_feature (name)
feature = __all_features [name]
if feature.implicit:
for v in values:
if v in __implicit_features:
raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v]))
__implicit_features[v] = feature
if values and not feature.values and not(feature.free or feature.optional):
# This is the first value specified for this feature,
# take it as default value
feature.set_default(values[0])
feature.add_values(values) | [
"def",
"extend",
"(",
"name",
",",
"values",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"values",
",",
"basestring",
")",
"name",
"=",
"add_grist",
"(",
"name",
")",
"__validate_feature",
"(",
"name",
")",
"feature",
"=",
"__all_features",
"[",
"name",
"]",
"if",
"feature",
".",
"implicit",
":",
"for",
"v",
"in",
"values",
":",
"if",
"v",
"in",
"__implicit_features",
":",
"raise",
"BaseException",
"(",
"\"'%s' is already associated with the feature '%s'\"",
"%",
"(",
"v",
",",
"__implicit_features",
"[",
"v",
"]",
")",
")",
"__implicit_features",
"[",
"v",
"]",
"=",
"feature",
"if",
"values",
"and",
"not",
"feature",
".",
"values",
"and",
"not",
"(",
"feature",
".",
"free",
"or",
"feature",
".",
"optional",
")",
":",
"# This is the first value specified for this feature,",
"# take it as default value",
"feature",
".",
"set_default",
"(",
"values",
"[",
"0",
"]",
")",
"feature",
".",
"add_values",
"(",
"values",
")"
] | Adds the given values to the given feature. | [
"Adds",
"the",
"given",
"values",
"to",
"the",
"given",
"feature",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L389-L410 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | validate_value_string | def validate_value_string (f, value_string):
""" Checks that value-string is a valid value-string for the given feature.
"""
assert isinstance(f, Feature)
assert isinstance(value_string, basestring)
if f.free or value_string in f.values:
return
values = [value_string]
if f.subfeatures:
if not value_string in f.values and \
not value_string in f.subfeatures:
values = value_string.split('-')
# An empty value is allowed for optional features
if not values[0] in f.values and \
(values[0] or not f.optional):
raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name, f.values))
for v in values [1:]:
# this will validate any subfeature values in value-string
implied_subfeature(f, v, values[0]) | python | def validate_value_string (f, value_string):
""" Checks that value-string is a valid value-string for the given feature.
"""
assert isinstance(f, Feature)
assert isinstance(value_string, basestring)
if f.free or value_string in f.values:
return
values = [value_string]
if f.subfeatures:
if not value_string in f.values and \
not value_string in f.subfeatures:
values = value_string.split('-')
# An empty value is allowed for optional features
if not values[0] in f.values and \
(values[0] or not f.optional):
raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name, f.values))
for v in values [1:]:
# this will validate any subfeature values in value-string
implied_subfeature(f, v, values[0]) | [
"def",
"validate_value_string",
"(",
"f",
",",
"value_string",
")",
":",
"assert",
"isinstance",
"(",
"f",
",",
"Feature",
")",
"assert",
"isinstance",
"(",
"value_string",
",",
"basestring",
")",
"if",
"f",
".",
"free",
"or",
"value_string",
"in",
"f",
".",
"values",
":",
"return",
"values",
"=",
"[",
"value_string",
"]",
"if",
"f",
".",
"subfeatures",
":",
"if",
"not",
"value_string",
"in",
"f",
".",
"values",
"and",
"not",
"value_string",
"in",
"f",
".",
"subfeatures",
":",
"values",
"=",
"value_string",
".",
"split",
"(",
"'-'",
")",
"# An empty value is allowed for optional features",
"if",
"not",
"values",
"[",
"0",
"]",
"in",
"f",
".",
"values",
"and",
"(",
"values",
"[",
"0",
"]",
"or",
"not",
"f",
".",
"optional",
")",
":",
"raise",
"InvalidValue",
"(",
"\"'%s' is not a known value of feature '%s'\\nlegal values: '%s'\"",
"%",
"(",
"values",
"[",
"0",
"]",
",",
"f",
".",
"name",
",",
"f",
".",
"values",
")",
")",
"for",
"v",
"in",
"values",
"[",
"1",
":",
"]",
":",
"# this will validate any subfeature values in value-string",
"implied_subfeature",
"(",
"f",
",",
"v",
",",
"values",
"[",
"0",
"]",
")"
] | Checks that value-string is a valid value-string for the given feature. | [
"Checks",
"that",
"value",
"-",
"string",
"is",
"a",
"valid",
"value",
"-",
"string",
"for",
"the",
"given",
"feature",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L412-L434 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | subfeature | def subfeature (feature_name, value_string, subfeature, subvalues, attributes = []):
""" Declares a subfeature.
feature_name: Root feature that is not a subfeature.
value_string: An optional value-string specifying which feature or
subfeature values this subfeature is specific to,
if any.
subfeature: The name of the subfeature being declared.
subvalues: The allowed values of this subfeature.
attributes: The attributes of the subfeature.
"""
parent_feature = validate_feature (feature_name)
# Add grist to the subfeature name if a value-string was supplied
subfeature_name = __get_subfeature_name (subfeature, value_string)
if subfeature_name in __all_features[feature_name].subfeatures:
message = "'%s' already declared as a subfeature of '%s'" % (subfeature, feature_name)
message += " specific to '%s'" % value_string
raise BaseException (message)
# First declare the subfeature as a feature in its own right
f = feature (feature_name + '-' + subfeature_name, subvalues, attributes + ['subfeature'])
f.set_parent(parent_feature, value_string)
parent_feature.add_subfeature(f)
# Now make sure the subfeature values are known.
extend_subfeature (feature_name, value_string, subfeature, subvalues) | python | def subfeature (feature_name, value_string, subfeature, subvalues, attributes = []):
""" Declares a subfeature.
feature_name: Root feature that is not a subfeature.
value_string: An optional value-string specifying which feature or
subfeature values this subfeature is specific to,
if any.
subfeature: The name of the subfeature being declared.
subvalues: The allowed values of this subfeature.
attributes: The attributes of the subfeature.
"""
parent_feature = validate_feature (feature_name)
# Add grist to the subfeature name if a value-string was supplied
subfeature_name = __get_subfeature_name (subfeature, value_string)
if subfeature_name in __all_features[feature_name].subfeatures:
message = "'%s' already declared as a subfeature of '%s'" % (subfeature, feature_name)
message += " specific to '%s'" % value_string
raise BaseException (message)
# First declare the subfeature as a feature in its own right
f = feature (feature_name + '-' + subfeature_name, subvalues, attributes + ['subfeature'])
f.set_parent(parent_feature, value_string)
parent_feature.add_subfeature(f)
# Now make sure the subfeature values are known.
extend_subfeature (feature_name, value_string, subfeature, subvalues) | [
"def",
"subfeature",
"(",
"feature_name",
",",
"value_string",
",",
"subfeature",
",",
"subvalues",
",",
"attributes",
"=",
"[",
"]",
")",
":",
"parent_feature",
"=",
"validate_feature",
"(",
"feature_name",
")",
"# Add grist to the subfeature name if a value-string was supplied",
"subfeature_name",
"=",
"__get_subfeature_name",
"(",
"subfeature",
",",
"value_string",
")",
"if",
"subfeature_name",
"in",
"__all_features",
"[",
"feature_name",
"]",
".",
"subfeatures",
":",
"message",
"=",
"\"'%s' already declared as a subfeature of '%s'\"",
"%",
"(",
"subfeature",
",",
"feature_name",
")",
"message",
"+=",
"\" specific to '%s'\"",
"%",
"value_string",
"raise",
"BaseException",
"(",
"message",
")",
"# First declare the subfeature as a feature in its own right",
"f",
"=",
"feature",
"(",
"feature_name",
"+",
"'-'",
"+",
"subfeature_name",
",",
"subvalues",
",",
"attributes",
"+",
"[",
"'subfeature'",
"]",
")",
"f",
".",
"set_parent",
"(",
"parent_feature",
",",
"value_string",
")",
"parent_feature",
".",
"add_subfeature",
"(",
"f",
")",
"# Now make sure the subfeature values are known.",
"extend_subfeature",
"(",
"feature_name",
",",
"value_string",
",",
"subfeature",
",",
"subvalues",
")"
] | Declares a subfeature.
feature_name: Root feature that is not a subfeature.
value_string: An optional value-string specifying which feature or
subfeature values this subfeature is specific to,
if any.
subfeature: The name of the subfeature being declared.
subvalues: The allowed values of this subfeature.
attributes: The attributes of the subfeature. | [
"Declares",
"a",
"subfeature",
".",
"feature_name",
":",
"Root",
"feature",
"that",
"is",
"not",
"a",
"subfeature",
".",
"value_string",
":",
"An",
"optional",
"value",
"-",
"string",
"specifying",
"which",
"feature",
"or",
"subfeature",
"values",
"this",
"subfeature",
"is",
"specific",
"to",
"if",
"any",
".",
"subfeature",
":",
"The",
"name",
"of",
"the",
"subfeature",
"being",
"declared",
".",
"subvalues",
":",
"The",
"allowed",
"values",
"of",
"this",
"subfeature",
".",
"attributes",
":",
"The",
"attributes",
"of",
"the",
"subfeature",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L482-L509 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | compose | def compose (composite_property_s, component_properties_s):
""" Sets the components of the given composite property.
All parameters are <feature>value strings
"""
from . import property
component_properties_s = to_seq (component_properties_s)
composite_property = property.create_from_string(composite_property_s)
f = composite_property.feature
if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property):
component_properties = component_properties_s
else:
component_properties = [property.create_from_string(p) for p in component_properties_s]
if not f.composite:
raise BaseException ("'%s' is not a composite feature" % f)
if property in __composite_properties:
raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property])))
if composite_property in component_properties:
raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property)
__composite_properties[composite_property] = component_properties | python | def compose (composite_property_s, component_properties_s):
""" Sets the components of the given composite property.
All parameters are <feature>value strings
"""
from . import property
component_properties_s = to_seq (component_properties_s)
composite_property = property.create_from_string(composite_property_s)
f = composite_property.feature
if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property):
component_properties = component_properties_s
else:
component_properties = [property.create_from_string(p) for p in component_properties_s]
if not f.composite:
raise BaseException ("'%s' is not a composite feature" % f)
if property in __composite_properties:
raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property])))
if composite_property in component_properties:
raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property)
__composite_properties[composite_property] = component_properties | [
"def",
"compose",
"(",
"composite_property_s",
",",
"component_properties_s",
")",
":",
"from",
".",
"import",
"property",
"component_properties_s",
"=",
"to_seq",
"(",
"component_properties_s",
")",
"composite_property",
"=",
"property",
".",
"create_from_string",
"(",
"composite_property_s",
")",
"f",
"=",
"composite_property",
".",
"feature",
"if",
"len",
"(",
"component_properties_s",
")",
">",
"0",
"and",
"isinstance",
"(",
"component_properties_s",
"[",
"0",
"]",
",",
"property",
".",
"Property",
")",
":",
"component_properties",
"=",
"component_properties_s",
"else",
":",
"component_properties",
"=",
"[",
"property",
".",
"create_from_string",
"(",
"p",
")",
"for",
"p",
"in",
"component_properties_s",
"]",
"if",
"not",
"f",
".",
"composite",
":",
"raise",
"BaseException",
"(",
"\"'%s' is not a composite feature\"",
"%",
"f",
")",
"if",
"property",
"in",
"__composite_properties",
":",
"raise",
"BaseException",
"(",
"'components of \"%s\" already set: %s'",
"%",
"(",
"composite_property",
",",
"str",
"(",
"__composite_properties",
"[",
"composite_property",
"]",
")",
")",
")",
"if",
"composite_property",
"in",
"component_properties",
":",
"raise",
"BaseException",
"(",
"'composite property \"%s\" cannot have itself as a component'",
"%",
"composite_property",
")",
"__composite_properties",
"[",
"composite_property",
"]",
"=",
"component_properties"
] | Sets the components of the given composite property.
All parameters are <feature>value strings | [
"Sets",
"the",
"components",
"of",
"the",
"given",
"composite",
"property",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L513-L538 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | get_values | def get_values (feature, properties):
""" Returns all values of the given feature specified by the given property set.
"""
if feature[0] != '<':
feature = '<' + feature + '>'
result = []
for p in properties:
if get_grist (p) == feature:
result.append (replace_grist (p, ''))
return result | python | def get_values (feature, properties):
""" Returns all values of the given feature specified by the given property set.
"""
if feature[0] != '<':
feature = '<' + feature + '>'
result = []
for p in properties:
if get_grist (p) == feature:
result.append (replace_grist (p, ''))
return result | [
"def",
"get_values",
"(",
"feature",
",",
"properties",
")",
":",
"if",
"feature",
"[",
"0",
"]",
"!=",
"'<'",
":",
"feature",
"=",
"'<'",
"+",
"feature",
"+",
"'>'",
"result",
"=",
"[",
"]",
"for",
"p",
"in",
"properties",
":",
"if",
"get_grist",
"(",
"p",
")",
"==",
"feature",
":",
"result",
".",
"append",
"(",
"replace_grist",
"(",
"p",
",",
"''",
")",
")",
"return",
"result"
] | Returns all values of the given feature specified by the given property set. | [
"Returns",
"all",
"values",
"of",
"the",
"given",
"feature",
"specified",
"by",
"the",
"given",
"property",
"set",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L552-L562 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | expand_composites | def expand_composites (properties):
""" Expand all composite properties in the set so that all components
are explicitly expressed.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
explicit_features = set(p.feature for p in properties)
result = []
# now expand composite features
for p in properties:
expanded = expand_composite(p)
for x in expanded:
if not x in result:
f = x.feature
if f.free:
result.append (x)
elif not x in properties: # x is the result of expansion
if not f in explicit_features: # not explicitly-specified
if any(r.feature == f for r in result):
raise FeatureConflict(
"expansions of composite features result in "
"conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" %
(f.name, [r.value for r in result if r.feature == f] + [x.value], p))
else:
result.append (x)
elif any(r.feature == f for r in result):
raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n"
"existing values: '%s'\nvalue from expanding '%s': '%s'" % (f,
[r.value for r in result if r.feature == f], p, x.value))
else:
result.append (x)
return result | python | def expand_composites (properties):
""" Expand all composite properties in the set so that all components
are explicitly expressed.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
explicit_features = set(p.feature for p in properties)
result = []
# now expand composite features
for p in properties:
expanded = expand_composite(p)
for x in expanded:
if not x in result:
f = x.feature
if f.free:
result.append (x)
elif not x in properties: # x is the result of expansion
if not f in explicit_features: # not explicitly-specified
if any(r.feature == f for r in result):
raise FeatureConflict(
"expansions of composite features result in "
"conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" %
(f.name, [r.value for r in result if r.feature == f] + [x.value], p))
else:
result.append (x)
elif any(r.feature == f for r in result):
raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n"
"existing values: '%s'\nvalue from expanding '%s': '%s'" % (f,
[r.value for r in result if r.feature == f], p, x.value))
else:
result.append (x)
return result | [
"def",
"expand_composites",
"(",
"properties",
")",
":",
"if",
"__debug__",
":",
"from",
".",
"property",
"import",
"Property",
"assert",
"is_iterable_typed",
"(",
"properties",
",",
"Property",
")",
"explicit_features",
"=",
"set",
"(",
"p",
".",
"feature",
"for",
"p",
"in",
"properties",
")",
"result",
"=",
"[",
"]",
"# now expand composite features",
"for",
"p",
"in",
"properties",
":",
"expanded",
"=",
"expand_composite",
"(",
"p",
")",
"for",
"x",
"in",
"expanded",
":",
"if",
"not",
"x",
"in",
"result",
":",
"f",
"=",
"x",
".",
"feature",
"if",
"f",
".",
"free",
":",
"result",
".",
"append",
"(",
"x",
")",
"elif",
"not",
"x",
"in",
"properties",
":",
"# x is the result of expansion",
"if",
"not",
"f",
"in",
"explicit_features",
":",
"# not explicitly-specified",
"if",
"any",
"(",
"r",
".",
"feature",
"==",
"f",
"for",
"r",
"in",
"result",
")",
":",
"raise",
"FeatureConflict",
"(",
"\"expansions of composite features result in \"",
"\"conflicting values for '%s'\\nvalues: '%s'\\none contributing composite property was '%s'\"",
"%",
"(",
"f",
".",
"name",
",",
"[",
"r",
".",
"value",
"for",
"r",
"in",
"result",
"if",
"r",
".",
"feature",
"==",
"f",
"]",
"+",
"[",
"x",
".",
"value",
"]",
",",
"p",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"x",
")",
"elif",
"any",
"(",
"r",
".",
"feature",
"==",
"f",
"for",
"r",
"in",
"result",
")",
":",
"raise",
"FeatureConflict",
"(",
"\"explicitly-specified values of non-free feature '%s' conflict\\n\"",
"\"existing values: '%s'\\nvalue from expanding '%s': '%s'\"",
"%",
"(",
"f",
",",
"[",
"r",
".",
"value",
"for",
"r",
"in",
"result",
"if",
"r",
".",
"feature",
"==",
"f",
"]",
",",
"p",
",",
"x",
".",
"value",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"x",
")",
"return",
"result"
] | Expand all composite properties in the set so that all components
are explicitly expressed. | [
"Expand",
"all",
"composite",
"properties",
"in",
"the",
"set",
"so",
"that",
"all",
"components",
"are",
"explicitly",
"expressed",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L569-L606 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | is_subfeature_of | def is_subfeature_of (parent_property, f):
""" Return true iff f is an ordinary subfeature of the parent_property's
feature, or if f is a subfeature of the parent_property's feature
specific to the parent_property's value.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(f, Feature)
if not f.subfeature:
return False
p = f.parent
if not p:
return False
parent_feature = p[0]
parent_value = p[1]
if parent_feature != parent_property.feature:
return False
if parent_value and parent_value != parent_property.value:
return False
return True | python | def is_subfeature_of (parent_property, f):
""" Return true iff f is an ordinary subfeature of the parent_property's
feature, or if f is a subfeature of the parent_property's feature
specific to the parent_property's value.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(f, Feature)
if not f.subfeature:
return False
p = f.parent
if not p:
return False
parent_feature = p[0]
parent_value = p[1]
if parent_feature != parent_property.feature:
return False
if parent_value and parent_value != parent_property.value:
return False
return True | [
"def",
"is_subfeature_of",
"(",
"parent_property",
",",
"f",
")",
":",
"if",
"__debug__",
":",
"from",
".",
"property",
"import",
"Property",
"assert",
"isinstance",
"(",
"parent_property",
",",
"Property",
")",
"assert",
"isinstance",
"(",
"f",
",",
"Feature",
")",
"if",
"not",
"f",
".",
"subfeature",
":",
"return",
"False",
"p",
"=",
"f",
".",
"parent",
"if",
"not",
"p",
":",
"return",
"False",
"parent_feature",
"=",
"p",
"[",
"0",
"]",
"parent_value",
"=",
"p",
"[",
"1",
"]",
"if",
"parent_feature",
"!=",
"parent_property",
".",
"feature",
":",
"return",
"False",
"if",
"parent_value",
"and",
"parent_value",
"!=",
"parent_property",
".",
"value",
":",
"return",
"False",
"return",
"True"
] | Return true iff f is an ordinary subfeature of the parent_property's
feature, or if f is a subfeature of the parent_property's feature
specific to the parent_property's value. | [
"Return",
"true",
"iff",
"f",
"is",
"an",
"ordinary",
"subfeature",
"of",
"the",
"parent_property",
"s",
"feature",
"or",
"if",
"f",
"is",
"a",
"subfeature",
"of",
"the",
"parent_property",
"s",
"feature",
"specific",
"to",
"the",
"parent_property",
"s",
"value",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L609-L635 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | __is_subproperty_of | def __is_subproperty_of (parent_property, p):
""" As is_subfeature_of, for subproperties.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(p, Property)
return is_subfeature_of (parent_property, p.feature) | python | def __is_subproperty_of (parent_property, p):
""" As is_subfeature_of, for subproperties.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(p, Property)
return is_subfeature_of (parent_property, p.feature) | [
"def",
"__is_subproperty_of",
"(",
"parent_property",
",",
"p",
")",
":",
"if",
"__debug__",
":",
"from",
".",
"property",
"import",
"Property",
"assert",
"isinstance",
"(",
"parent_property",
",",
"Property",
")",
"assert",
"isinstance",
"(",
"p",
",",
"Property",
")",
"return",
"is_subfeature_of",
"(",
"parent_property",
",",
"p",
".",
"feature",
")"
] | As is_subfeature_of, for subproperties. | [
"As",
"is_subfeature_of",
"for",
"subproperties",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L637-L644 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | expand | def expand (properties):
""" Given a property set which may consist of composite and implicit
properties and combined subfeature values, returns an expanded,
normalized property set with all implicit features expressed
explicitly, all subfeature values individually expressed, and all
components of composite properties expanded. Non-free features
directly expressed in the input properties cause any values of
those features due to composite feature expansion to be dropped. If
two values of a given non-free feature are directly expressed in the
input, an error is issued.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
expanded = expand_subfeatures(properties)
return expand_composites (expanded) | python | def expand (properties):
""" Given a property set which may consist of composite and implicit
properties and combined subfeature values, returns an expanded,
normalized property set with all implicit features expressed
explicitly, all subfeature values individually expressed, and all
components of composite properties expanded. Non-free features
directly expressed in the input properties cause any values of
those features due to composite feature expansion to be dropped. If
two values of a given non-free feature are directly expressed in the
input, an error is issued.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
expanded = expand_subfeatures(properties)
return expand_composites (expanded) | [
"def",
"expand",
"(",
"properties",
")",
":",
"if",
"__debug__",
":",
"from",
".",
"property",
"import",
"Property",
"assert",
"is_iterable_typed",
"(",
"properties",
",",
"Property",
")",
"expanded",
"=",
"expand_subfeatures",
"(",
"properties",
")",
"return",
"expand_composites",
"(",
"expanded",
")"
] | Given a property set which may consist of composite and implicit
properties and combined subfeature values, returns an expanded,
normalized property set with all implicit features expressed
explicitly, all subfeature values individually expressed, and all
components of composite properties expanded. Non-free features
directly expressed in the input properties cause any values of
those features due to composite feature expansion to be dropped. If
two values of a given non-free feature are directly expressed in the
input, an error is issued. | [
"Given",
"a",
"property",
"set",
"which",
"may",
"consist",
"of",
"composite",
"and",
"implicit",
"properties",
"and",
"combined",
"subfeature",
"values",
"returns",
"an",
"expanded",
"normalized",
"property",
"set",
"with",
"all",
"implicit",
"features",
"expressed",
"explicitly",
"all",
"subfeature",
"values",
"individually",
"expressed",
"and",
"all",
"components",
"of",
"composite",
"properties",
"expanded",
".",
"Non",
"-",
"free",
"features",
"directly",
"expressed",
"in",
"the",
"input",
"properties",
"cause",
"any",
"values",
"of",
"those",
"features",
"due",
"to",
"composite",
"feature",
"expansion",
"to",
"be",
"dropped",
".",
"If",
"two",
"values",
"of",
"a",
"given",
"non",
"-",
"free",
"feature",
"are",
"directly",
"expressed",
"in",
"the",
"input",
"an",
"error",
"is",
"issued",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L664-L679 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | add_defaults | def add_defaults (properties):
""" Given a set of properties, add default values for features not
represented in the set.
Note: if there's there's ordinary feature F1 and composite feature
F2, which includes some value for F1, and both feature have default values,
then the default value of F1 will be added, not the value in F2. This might
not be right idea: consider
feature variant : debug ... ;
<variant>debug : .... <runtime-debugging>on
feature <runtime-debugging> : off on ;
Here, when adding default for an empty property set, we'll get
<variant>debug <runtime_debugging>off
and that's kind of strange.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
# create a copy since properties will be modified
result = list(properties)
# We don't add default for conditional properties. We don't want
# <variant>debug:<define>DEBUG to be takes as specified value for <variant>
handled_features = set(p.feature for p in properties if not p.condition)
missing_top = [f for f in __all_top_features if not f in handled_features]
more = defaults(missing_top)
result.extend(more)
handled_features.update(p.feature for p in more)
# Add defaults for subfeatures of features which are present
for p in result[:]:
subfeatures = [s for s in p.feature.subfeatures if not s in handled_features]
more = defaults(__select_subfeatures(p, subfeatures))
handled_features.update(h.feature for h in more)
result.extend(more)
return result | python | def add_defaults (properties):
""" Given a set of properties, add default values for features not
represented in the set.
Note: if there's there's ordinary feature F1 and composite feature
F2, which includes some value for F1, and both feature have default values,
then the default value of F1 will be added, not the value in F2. This might
not be right idea: consider
feature variant : debug ... ;
<variant>debug : .... <runtime-debugging>on
feature <runtime-debugging> : off on ;
Here, when adding default for an empty property set, we'll get
<variant>debug <runtime_debugging>off
and that's kind of strange.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
# create a copy since properties will be modified
result = list(properties)
# We don't add default for conditional properties. We don't want
# <variant>debug:<define>DEBUG to be takes as specified value for <variant>
handled_features = set(p.feature for p in properties if not p.condition)
missing_top = [f for f in __all_top_features if not f in handled_features]
more = defaults(missing_top)
result.extend(more)
handled_features.update(p.feature for p in more)
# Add defaults for subfeatures of features which are present
for p in result[:]:
subfeatures = [s for s in p.feature.subfeatures if not s in handled_features]
more = defaults(__select_subfeatures(p, subfeatures))
handled_features.update(h.feature for h in more)
result.extend(more)
return result | [
"def",
"add_defaults",
"(",
"properties",
")",
":",
"if",
"__debug__",
":",
"from",
".",
"property",
"import",
"Property",
"assert",
"is_iterable_typed",
"(",
"properties",
",",
"Property",
")",
"# create a copy since properties will be modified",
"result",
"=",
"list",
"(",
"properties",
")",
"# We don't add default for conditional properties. We don't want",
"# <variant>debug:<define>DEBUG to be takes as specified value for <variant>",
"handled_features",
"=",
"set",
"(",
"p",
".",
"feature",
"for",
"p",
"in",
"properties",
"if",
"not",
"p",
".",
"condition",
")",
"missing_top",
"=",
"[",
"f",
"for",
"f",
"in",
"__all_top_features",
"if",
"not",
"f",
"in",
"handled_features",
"]",
"more",
"=",
"defaults",
"(",
"missing_top",
")",
"result",
".",
"extend",
"(",
"more",
")",
"handled_features",
".",
"update",
"(",
"p",
".",
"feature",
"for",
"p",
"in",
"more",
")",
"# Add defaults for subfeatures of features which are present",
"for",
"p",
"in",
"result",
"[",
":",
"]",
":",
"subfeatures",
"=",
"[",
"s",
"for",
"s",
"in",
"p",
".",
"feature",
".",
"subfeatures",
"if",
"not",
"s",
"in",
"handled_features",
"]",
"more",
"=",
"defaults",
"(",
"__select_subfeatures",
"(",
"p",
",",
"subfeatures",
")",
")",
"handled_features",
".",
"update",
"(",
"h",
".",
"feature",
"for",
"h",
"in",
"more",
")",
"result",
".",
"extend",
"(",
"more",
")",
"return",
"result"
] | Given a set of properties, add default values for features not
represented in the set.
Note: if there's there's ordinary feature F1 and composite feature
F2, which includes some value for F1, and both feature have default values,
then the default value of F1 will be added, not the value in F2. This might
not be right idea: consider
feature variant : debug ... ;
<variant>debug : .... <runtime-debugging>on
feature <runtime-debugging> : off on ;
Here, when adding default for an empty property set, we'll get
<variant>debug <runtime_debugging>off
and that's kind of strange. | [
"Given",
"a",
"set",
"of",
"properties",
"add",
"default",
"values",
"for",
"features",
"not",
"represented",
"in",
"the",
"set",
".",
"Note",
":",
"if",
"there",
"s",
"there",
"s",
"ordinary",
"feature",
"F1",
"and",
"composite",
"feature",
"F2",
"which",
"includes",
"some",
"value",
"for",
"F1",
"and",
"both",
"feature",
"have",
"default",
"values",
"then",
"the",
"default",
"value",
"of",
"F1",
"will",
"be",
"added",
"not",
"the",
"value",
"in",
"F2",
".",
"This",
"might",
"not",
"be",
"right",
"idea",
":",
"consider"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L682-L722 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | minimize | def minimize (properties):
""" Given an expanded property set, eliminate all redundancy: properties
which are elements of other (composite) properties in the set will
be eliminated. Non-symmetric properties equal to default values will be
eliminated, unless the override a value from some composite property.
Implicit properties will be expressed without feature
grist, and sub-property values will be expressed as elements joined
to the corresponding main property.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
# remove properties implied by composite features
components = []
component_features = set()
for property in properties:
if property in __composite_properties:
cs = __composite_properties[property]
components.extend(cs)
component_features.update(c.feature for c in cs)
properties = b2.util.set.difference (properties, components)
# handle subfeatures and implicit features
# move subfeatures to the end of the list
properties = [p for p in properties if not p.feature.subfeature] +\
[p for p in properties if p.feature.subfeature]
result = []
while properties:
p = properties[0]
f = p.feature
# locate all subproperties of $(x[1]) in the property set
subproperties = [x for x in properties if is_subfeature_of(p, x.feature)]
if subproperties:
# reconstitute the joined property name
subproperties.sort ()
joined = b2.build.property.Property(p.feature, p.value + '-' + '-'.join ([sp.value for sp in subproperties]))
result.append(joined)
properties = b2.util.set.difference(properties[1:], subproperties)
else:
# eliminate properties whose value is equal to feature's
# default and which are not symmetric and which do not
# contradict values implied by composite properties.
# since all component properties of composites in the set
# have been eliminated, any remaining property whose
# feature is the same as a component of a composite in the
# set must have a non-redundant value.
if p.value != f.default or f.symmetric or f in component_features:
result.append (p)
properties = properties[1:]
return result | python | def minimize (properties):
""" Given an expanded property set, eliminate all redundancy: properties
which are elements of other (composite) properties in the set will
be eliminated. Non-symmetric properties equal to default values will be
eliminated, unless the override a value from some composite property.
Implicit properties will be expressed without feature
grist, and sub-property values will be expressed as elements joined
to the corresponding main property.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
# remove properties implied by composite features
components = []
component_features = set()
for property in properties:
if property in __composite_properties:
cs = __composite_properties[property]
components.extend(cs)
component_features.update(c.feature for c in cs)
properties = b2.util.set.difference (properties, components)
# handle subfeatures and implicit features
# move subfeatures to the end of the list
properties = [p for p in properties if not p.feature.subfeature] +\
[p for p in properties if p.feature.subfeature]
result = []
while properties:
p = properties[0]
f = p.feature
# locate all subproperties of $(x[1]) in the property set
subproperties = [x for x in properties if is_subfeature_of(p, x.feature)]
if subproperties:
# reconstitute the joined property name
subproperties.sort ()
joined = b2.build.property.Property(p.feature, p.value + '-' + '-'.join ([sp.value for sp in subproperties]))
result.append(joined)
properties = b2.util.set.difference(properties[1:], subproperties)
else:
# eliminate properties whose value is equal to feature's
# default and which are not symmetric and which do not
# contradict values implied by composite properties.
# since all component properties of composites in the set
# have been eliminated, any remaining property whose
# feature is the same as a component of a composite in the
# set must have a non-redundant value.
if p.value != f.default or f.symmetric or f in component_features:
result.append (p)
properties = properties[1:]
return result | [
"def",
"minimize",
"(",
"properties",
")",
":",
"if",
"__debug__",
":",
"from",
".",
"property",
"import",
"Property",
"assert",
"is_iterable_typed",
"(",
"properties",
",",
"Property",
")",
"# remove properties implied by composite features",
"components",
"=",
"[",
"]",
"component_features",
"=",
"set",
"(",
")",
"for",
"property",
"in",
"properties",
":",
"if",
"property",
"in",
"__composite_properties",
":",
"cs",
"=",
"__composite_properties",
"[",
"property",
"]",
"components",
".",
"extend",
"(",
"cs",
")",
"component_features",
".",
"update",
"(",
"c",
".",
"feature",
"for",
"c",
"in",
"cs",
")",
"properties",
"=",
"b2",
".",
"util",
".",
"set",
".",
"difference",
"(",
"properties",
",",
"components",
")",
"# handle subfeatures and implicit features",
"# move subfeatures to the end of the list",
"properties",
"=",
"[",
"p",
"for",
"p",
"in",
"properties",
"if",
"not",
"p",
".",
"feature",
".",
"subfeature",
"]",
"+",
"[",
"p",
"for",
"p",
"in",
"properties",
"if",
"p",
".",
"feature",
".",
"subfeature",
"]",
"result",
"=",
"[",
"]",
"while",
"properties",
":",
"p",
"=",
"properties",
"[",
"0",
"]",
"f",
"=",
"p",
".",
"feature",
"# locate all subproperties of $(x[1]) in the property set",
"subproperties",
"=",
"[",
"x",
"for",
"x",
"in",
"properties",
"if",
"is_subfeature_of",
"(",
"p",
",",
"x",
".",
"feature",
")",
"]",
"if",
"subproperties",
":",
"# reconstitute the joined property name",
"subproperties",
".",
"sort",
"(",
")",
"joined",
"=",
"b2",
".",
"build",
".",
"property",
".",
"Property",
"(",
"p",
".",
"feature",
",",
"p",
".",
"value",
"+",
"'-'",
"+",
"'-'",
".",
"join",
"(",
"[",
"sp",
".",
"value",
"for",
"sp",
"in",
"subproperties",
"]",
")",
")",
"result",
".",
"append",
"(",
"joined",
")",
"properties",
"=",
"b2",
".",
"util",
".",
"set",
".",
"difference",
"(",
"properties",
"[",
"1",
":",
"]",
",",
"subproperties",
")",
"else",
":",
"# eliminate properties whose value is equal to feature's",
"# default and which are not symmetric and which do not",
"# contradict values implied by composite properties.",
"# since all component properties of composites in the set",
"# have been eliminated, any remaining property whose",
"# feature is the same as a component of a composite in the",
"# set must have a non-redundant value.",
"if",
"p",
".",
"value",
"!=",
"f",
".",
"default",
"or",
"f",
".",
"symmetric",
"or",
"f",
"in",
"component_features",
":",
"result",
".",
"append",
"(",
"p",
")",
"properties",
"=",
"properties",
"[",
"1",
":",
"]",
"return",
"result"
] | Given an expanded property set, eliminate all redundancy: properties
which are elements of other (composite) properties in the set will
be eliminated. Non-symmetric properties equal to default values will be
eliminated, unless the override a value from some composite property.
Implicit properties will be expressed without feature
grist, and sub-property values will be expressed as elements joined
to the corresponding main property. | [
"Given",
"an",
"expanded",
"property",
"set",
"eliminate",
"all",
"redundancy",
":",
"properties",
"which",
"are",
"elements",
"of",
"other",
"(",
"composite",
")",
"properties",
"in",
"the",
"set",
"will",
"be",
"eliminated",
".",
"Non",
"-",
"symmetric",
"properties",
"equal",
"to",
"default",
"values",
"will",
"be",
"eliminated",
"unless",
"the",
"override",
"a",
"value",
"from",
"some",
"composite",
"property",
".",
"Implicit",
"properties",
"will",
"be",
"expressed",
"without",
"feature",
"grist",
"and",
"sub",
"-",
"property",
"values",
"will",
"be",
"expressed",
"as",
"elements",
"joined",
"to",
"the",
"corresponding",
"main",
"property",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L724-L783 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | split | def split (properties):
""" Given a property-set of the form
v1/v2/...vN-1/<fN>vN/<fN+1>vN+1/...<fM>vM
Returns
v1 v2 ... vN-1 <fN>vN <fN+1>vN+1 ... <fM>vM
Note that vN...vM may contain slashes. This is resilient to the
substitution of backslashes for slashes, since Jam, unbidden,
sometimes swaps slash direction on NT.
"""
assert isinstance(properties, basestring)
def split_one (properties):
pieces = re.split (__re_slash_or_backslash, properties)
result = []
for x in pieces:
if not get_grist (x) and len (result) > 0 and get_grist (result [-1]):
result = result [0:-1] + [ result [-1] + '/' + x ]
else:
result.append (x)
return result
if isinstance (properties, str):
return split_one (properties)
result = []
for p in properties:
result += split_one (p)
return result | python | def split (properties):
""" Given a property-set of the form
v1/v2/...vN-1/<fN>vN/<fN+1>vN+1/...<fM>vM
Returns
v1 v2 ... vN-1 <fN>vN <fN+1>vN+1 ... <fM>vM
Note that vN...vM may contain slashes. This is resilient to the
substitution of backslashes for slashes, since Jam, unbidden,
sometimes swaps slash direction on NT.
"""
assert isinstance(properties, basestring)
def split_one (properties):
pieces = re.split (__re_slash_or_backslash, properties)
result = []
for x in pieces:
if not get_grist (x) and len (result) > 0 and get_grist (result [-1]):
result = result [0:-1] + [ result [-1] + '/' + x ]
else:
result.append (x)
return result
if isinstance (properties, str):
return split_one (properties)
result = []
for p in properties:
result += split_one (p)
return result | [
"def",
"split",
"(",
"properties",
")",
":",
"assert",
"isinstance",
"(",
"properties",
",",
"basestring",
")",
"def",
"split_one",
"(",
"properties",
")",
":",
"pieces",
"=",
"re",
".",
"split",
"(",
"__re_slash_or_backslash",
",",
"properties",
")",
"result",
"=",
"[",
"]",
"for",
"x",
"in",
"pieces",
":",
"if",
"not",
"get_grist",
"(",
"x",
")",
"and",
"len",
"(",
"result",
")",
">",
"0",
"and",
"get_grist",
"(",
"result",
"[",
"-",
"1",
"]",
")",
":",
"result",
"=",
"result",
"[",
"0",
":",
"-",
"1",
"]",
"+",
"[",
"result",
"[",
"-",
"1",
"]",
"+",
"'/'",
"+",
"x",
"]",
"else",
":",
"result",
".",
"append",
"(",
"x",
")",
"return",
"result",
"if",
"isinstance",
"(",
"properties",
",",
"str",
")",
":",
"return",
"split_one",
"(",
"properties",
")",
"result",
"=",
"[",
"]",
"for",
"p",
"in",
"properties",
":",
"result",
"+=",
"split_one",
"(",
"p",
")",
"return",
"result"
] | Given a property-set of the form
v1/v2/...vN-1/<fN>vN/<fN+1>vN+1/...<fM>vM
Returns
v1 v2 ... vN-1 <fN>vN <fN+1>vN+1 ... <fM>vM
Note that vN...vM may contain slashes. This is resilient to the
substitution of backslashes for slashes, since Jam, unbidden,
sometimes swaps slash direction on NT. | [
"Given",
"a",
"property",
"-",
"set",
"of",
"the",
"form",
"v1",
"/",
"v2",
"/",
"...",
"vN",
"-",
"1",
"/",
"<fN",
">",
"vN",
"/",
"<fN",
"+",
"1",
">",
"vN",
"+",
"1",
"/",
"...",
"<fM",
">",
"vM"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L786-L816 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | compress_subproperties | def compress_subproperties (properties):
""" Combine all subproperties into their parent properties
Requires: for every subproperty, there is a parent property. All
features are explicitly expressed.
This rule probably shouldn't be needed, but
build-request.expand-no-defaults is being abused for unintended
purposes and it needs help
"""
from .property import Property
assert is_iterable_typed(properties, Property)
result = []
matched_subs = set()
all_subs = set()
for p in properties:
f = p.feature
if not f.subfeature:
subs = [x for x in properties if is_subfeature_of(p, x.feature)]
if subs:
matched_subs.update(subs)
subvalues = '-'.join (sub.value for sub in subs)
result.append(Property(
p.feature, p.value + '-' + subvalues,
p.condition))
else:
result.append(p)
else:
all_subs.add(p)
# TODO: this variables are used just for debugging. What's the overhead?
assert all_subs == matched_subs
return result | python | def compress_subproperties (properties):
""" Combine all subproperties into their parent properties
Requires: for every subproperty, there is a parent property. All
features are explicitly expressed.
This rule probably shouldn't be needed, but
build-request.expand-no-defaults is being abused for unintended
purposes and it needs help
"""
from .property import Property
assert is_iterable_typed(properties, Property)
result = []
matched_subs = set()
all_subs = set()
for p in properties:
f = p.feature
if not f.subfeature:
subs = [x for x in properties if is_subfeature_of(p, x.feature)]
if subs:
matched_subs.update(subs)
subvalues = '-'.join (sub.value for sub in subs)
result.append(Property(
p.feature, p.value + '-' + subvalues,
p.condition))
else:
result.append(p)
else:
all_subs.add(p)
# TODO: this variables are used just for debugging. What's the overhead?
assert all_subs == matched_subs
return result | [
"def",
"compress_subproperties",
"(",
"properties",
")",
":",
"from",
".",
"property",
"import",
"Property",
"assert",
"is_iterable_typed",
"(",
"properties",
",",
"Property",
")",
"result",
"=",
"[",
"]",
"matched_subs",
"=",
"set",
"(",
")",
"all_subs",
"=",
"set",
"(",
")",
"for",
"p",
"in",
"properties",
":",
"f",
"=",
"p",
".",
"feature",
"if",
"not",
"f",
".",
"subfeature",
":",
"subs",
"=",
"[",
"x",
"for",
"x",
"in",
"properties",
"if",
"is_subfeature_of",
"(",
"p",
",",
"x",
".",
"feature",
")",
"]",
"if",
"subs",
":",
"matched_subs",
".",
"update",
"(",
"subs",
")",
"subvalues",
"=",
"'-'",
".",
"join",
"(",
"sub",
".",
"value",
"for",
"sub",
"in",
"subs",
")",
"result",
".",
"append",
"(",
"Property",
"(",
"p",
".",
"feature",
",",
"p",
".",
"value",
"+",
"'-'",
"+",
"subvalues",
",",
"p",
".",
"condition",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"p",
")",
"else",
":",
"all_subs",
".",
"add",
"(",
"p",
")",
"# TODO: this variables are used just for debugging. What's the overhead?",
"assert",
"all_subs",
"==",
"matched_subs",
"return",
"result"
] | Combine all subproperties into their parent properties
Requires: for every subproperty, there is a parent property. All
features are explicitly expressed.
This rule probably shouldn't be needed, but
build-request.expand-no-defaults is being abused for unintended
purposes and it needs help | [
"Combine",
"all",
"subproperties",
"into",
"their",
"parent",
"properties"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L819-L856 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | __select_subfeatures | def __select_subfeatures (parent_property, features):
""" Given a property, return the subset of features consisting of all
ordinary subfeatures of the property's feature, and all specific
subfeatures of the property's feature which are conditional on the
property's value.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert is_iterable_typed(features, Feature)
return [f for f in features if is_subfeature_of (parent_property, f)] | python | def __select_subfeatures (parent_property, features):
""" Given a property, return the subset of features consisting of all
ordinary subfeatures of the property's feature, and all specific
subfeatures of the property's feature which are conditional on the
property's value.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert is_iterable_typed(features, Feature)
return [f for f in features if is_subfeature_of (parent_property, f)] | [
"def",
"__select_subfeatures",
"(",
"parent_property",
",",
"features",
")",
":",
"if",
"__debug__",
":",
"from",
".",
"property",
"import",
"Property",
"assert",
"isinstance",
"(",
"parent_property",
",",
"Property",
")",
"assert",
"is_iterable_typed",
"(",
"features",
",",
"Feature",
")",
"return",
"[",
"f",
"for",
"f",
"in",
"features",
"if",
"is_subfeature_of",
"(",
"parent_property",
",",
"f",
")",
"]"
] | Given a property, return the subset of features consisting of all
ordinary subfeatures of the property's feature, and all specific
subfeatures of the property's feature which are conditional on the
property's value. | [
"Given",
"a",
"property",
"return",
"the",
"subset",
"of",
"features",
"consisting",
"of",
"all",
"ordinary",
"subfeatures",
"of",
"the",
"property",
"s",
"feature",
"and",
"all",
"specific",
"subfeatures",
"of",
"the",
"property",
"s",
"feature",
"which",
"are",
"conditional",
"on",
"the",
"property",
"s",
"value",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L902-L912 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | _get_interpretation_function | def _get_interpretation_function(interpretation, dtype):
"""
Retrieves the interpretation function used.
"""
type_string = dtype.__name__
name = "%s__%s" % (interpretation, type_string)
global _interpretations
if not hasattr(_interpretations, name):
raise ValueError("No transform available for type '%s' with interpretation '%s'."
% (type_string, interpretation))
return getattr(_interpretations, name) | python | def _get_interpretation_function(interpretation, dtype):
"""
Retrieves the interpretation function used.
"""
type_string = dtype.__name__
name = "%s__%s" % (interpretation, type_string)
global _interpretations
if not hasattr(_interpretations, name):
raise ValueError("No transform available for type '%s' with interpretation '%s'."
% (type_string, interpretation))
return getattr(_interpretations, name) | [
"def",
"_get_interpretation_function",
"(",
"interpretation",
",",
"dtype",
")",
":",
"type_string",
"=",
"dtype",
".",
"__name__",
"name",
"=",
"\"%s__%s\"",
"%",
"(",
"interpretation",
",",
"type_string",
")",
"global",
"_interpretations",
"if",
"not",
"hasattr",
"(",
"_interpretations",
",",
"name",
")",
":",
"raise",
"ValueError",
"(",
"\"No transform available for type '%s' with interpretation '%s'.\"",
"%",
"(",
"type_string",
",",
"interpretation",
")",
")",
"return",
"getattr",
"(",
"_interpretations",
",",
"name",
")"
] | Retrieves the interpretation function used. | [
"Retrieves",
"the",
"interpretation",
"function",
"used",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L380-L394 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | _get_interpretation_description_and_output_type | def _get_interpretation_description_and_output_type(interpretation, dtype):
"""
Returns the description and output type for a given interpretation.
"""
type_string = dtype.__name__
name = "%s__%s" % (interpretation, type_string)
if not hasattr(_interpretations_class, name):
raise ValueError("No transform available for type '%s' with interpretation '%s'."
% (type_string, interpretation))
# Need unbound method to get the attributes
func = getattr(_interpretations_class, name)
return func.description, func.output_type | python | def _get_interpretation_description_and_output_type(interpretation, dtype):
"""
Returns the description and output type for a given interpretation.
"""
type_string = dtype.__name__
name = "%s__%s" % (interpretation, type_string)
if not hasattr(_interpretations_class, name):
raise ValueError("No transform available for type '%s' with interpretation '%s'."
% (type_string, interpretation))
# Need unbound method to get the attributes
func = getattr(_interpretations_class, name)
return func.description, func.output_type | [
"def",
"_get_interpretation_description_and_output_type",
"(",
"interpretation",
",",
"dtype",
")",
":",
"type_string",
"=",
"dtype",
".",
"__name__",
"name",
"=",
"\"%s__%s\"",
"%",
"(",
"interpretation",
",",
"type_string",
")",
"if",
"not",
"hasattr",
"(",
"_interpretations_class",
",",
"name",
")",
":",
"raise",
"ValueError",
"(",
"\"No transform available for type '%s' with interpretation '%s'.\"",
"%",
"(",
"type_string",
",",
"interpretation",
")",
")",
"# Need unbound method to get the attributes",
"func",
"=",
"getattr",
"(",
"_interpretations_class",
",",
"name",
")",
"return",
"func",
".",
"description",
",",
"func",
".",
"output_type"
] | Returns the description and output type for a given interpretation. | [
"Returns",
"the",
"description",
"and",
"output",
"type",
"for",
"a",
"given",
"interpretation",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L396-L411 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | _get_embeddable_interpretation_doc | def _get_embeddable_interpretation_doc(indent = 0):
"""
Returns a list of the available interpretations and what they do.
If indent is specified, then the entire doc string is indented by that amount.
"""
output_rows = []
# Pull out the doc string and put it in a table.
for name in sorted(dir(_interpretations)):
if name.startswith("_") or "__" not in name:
continue
interpretation, type_str = name.split("__")
func = getattr(_interpretations, name)
output_rows.append("%s (%s type):" % (interpretation, type_str))
output_rows += [(" " + line) for line in _textwrap.dedent(func.__doc__).strip().split("\n")]
output_rows.append("")
return "\n".join(" "*indent + line for line in output_rows) | python | def _get_embeddable_interpretation_doc(indent = 0):
"""
Returns a list of the available interpretations and what they do.
If indent is specified, then the entire doc string is indented by that amount.
"""
output_rows = []
# Pull out the doc string and put it in a table.
for name in sorted(dir(_interpretations)):
if name.startswith("_") or "__" not in name:
continue
interpretation, type_str = name.split("__")
func = getattr(_interpretations, name)
output_rows.append("%s (%s type):" % (interpretation, type_str))
output_rows += [(" " + line) for line in _textwrap.dedent(func.__doc__).strip().split("\n")]
output_rows.append("")
return "\n".join(" "*indent + line for line in output_rows) | [
"def",
"_get_embeddable_interpretation_doc",
"(",
"indent",
"=",
"0",
")",
":",
"output_rows",
"=",
"[",
"]",
"# Pull out the doc string and put it in a table.",
"for",
"name",
"in",
"sorted",
"(",
"dir",
"(",
"_interpretations",
")",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"\"_\"",
")",
"or",
"\"__\"",
"not",
"in",
"name",
":",
"continue",
"interpretation",
",",
"type_str",
"=",
"name",
".",
"split",
"(",
"\"__\"",
")",
"func",
"=",
"getattr",
"(",
"_interpretations",
",",
"name",
")",
"output_rows",
".",
"append",
"(",
"\"%s (%s type):\"",
"%",
"(",
"interpretation",
",",
"type_str",
")",
")",
"output_rows",
"+=",
"[",
"(",
"\" \"",
"+",
"line",
")",
"for",
"line",
"in",
"_textwrap",
".",
"dedent",
"(",
"func",
".",
"__doc__",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"]",
"output_rows",
".",
"append",
"(",
"\"\"",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"\" \"",
"*",
"indent",
"+",
"line",
"for",
"line",
"in",
"output_rows",
")"
] | Returns a list of the available interpretations and what they do.
If indent is specified, then the entire doc string is indented by that amount. | [
"Returns",
"a",
"list",
"of",
"the",
"available",
"interpretations",
"and",
"what",
"they",
"do",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L413-L436 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | _ColumnFunctionTransformation._load_version | def _load_version(cls, unpickler, version):
"""
A function to load a previously saved SentenceSplitter instance.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer.
"""
state, _exclude, _features = unpickler.load()
features = state['features']
excluded_features = state['excluded_features']
model = cls.__new__(cls)
model._setup()
model.__proxy__.update(state)
model._exclude = _exclude
model._features = _features
return model | python | def _load_version(cls, unpickler, version):
"""
A function to load a previously saved SentenceSplitter instance.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer.
"""
state, _exclude, _features = unpickler.load()
features = state['features']
excluded_features = state['excluded_features']
model = cls.__new__(cls)
model._setup()
model.__proxy__.update(state)
model._exclude = _exclude
model._features = _features
return model | [
"def",
"_load_version",
"(",
"cls",
",",
"unpickler",
",",
"version",
")",
":",
"state",
",",
"_exclude",
",",
"_features",
"=",
"unpickler",
".",
"load",
"(",
")",
"features",
"=",
"state",
"[",
"'features'",
"]",
"excluded_features",
"=",
"state",
"[",
"'excluded_features'",
"]",
"model",
"=",
"cls",
".",
"__new__",
"(",
"cls",
")",
"model",
".",
"_setup",
"(",
")",
"model",
".",
"__proxy__",
".",
"update",
"(",
"state",
")",
"model",
".",
"_exclude",
"=",
"_exclude",
"model",
".",
"_features",
"=",
"_features",
"return",
"model"
] | A function to load a previously saved SentenceSplitter instance.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer. | [
"A",
"function",
"to",
"load",
"a",
"previously",
"saved",
"SentenceSplitter",
"instance",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L72-L95 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | _ColumnFunctionTransformation.fit | def fit(self, data):
"""
Fits the transformer using the given data.
"""
_raise_error_if_not_sframe(data, "data")
fitted_state = {}
feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features)
if not feature_columns:
raise RuntimeError("No valid feature columns specified in transformation.")
fitted_state['features'] = feature_columns
fitted_state['fitted'] = True
self.__proxy__.update(fitted_state)
return self | python | def fit(self, data):
"""
Fits the transformer using the given data.
"""
_raise_error_if_not_sframe(data, "data")
fitted_state = {}
feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features)
if not feature_columns:
raise RuntimeError("No valid feature columns specified in transformation.")
fitted_state['features'] = feature_columns
fitted_state['fitted'] = True
self.__proxy__.update(fitted_state)
return self | [
"def",
"fit",
"(",
"self",
",",
"data",
")",
":",
"_raise_error_if_not_sframe",
"(",
"data",
",",
"\"data\"",
")",
"fitted_state",
"=",
"{",
"}",
"feature_columns",
"=",
"_internal_utils",
".",
"get_column_names",
"(",
"data",
",",
"self",
".",
"_exclude",
",",
"self",
".",
"_features",
")",
"if",
"not",
"feature_columns",
":",
"raise",
"RuntimeError",
"(",
"\"No valid feature columns specified in transformation.\"",
")",
"fitted_state",
"[",
"'features'",
"]",
"=",
"feature_columns",
"fitted_state",
"[",
"'fitted'",
"]",
"=",
"True",
"self",
".",
"__proxy__",
".",
"update",
"(",
"fitted_state",
")",
"return",
"self"
] | Fits the transformer using the given data. | [
"Fits",
"the",
"transformer",
"using",
"the",
"given",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L151-L169 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | _ColumnFunctionTransformation.transform | def transform(self, data):
"""
Transforms the data.
"""
if not self._get("fitted"):
raise RuntimeError("`transform` called before `fit` or `fit_transform`.")
data = data.copy()
output_column_prefix = self._get("output_column_prefix")
if output_column_prefix is None:
prefix = ""
else:
prefix = output_column_prefix + '.'
transform_function = self._get("transform_function")
feature_columns = self._get("features")
feature_columns = _internal_utils.select_feature_subset(data, feature_columns)
for f in feature_columns:
data[prefix + f] = transform_function(data[f])
return data | python | def transform(self, data):
"""
Transforms the data.
"""
if not self._get("fitted"):
raise RuntimeError("`transform` called before `fit` or `fit_transform`.")
data = data.copy()
output_column_prefix = self._get("output_column_prefix")
if output_column_prefix is None:
prefix = ""
else:
prefix = output_column_prefix + '.'
transform_function = self._get("transform_function")
feature_columns = self._get("features")
feature_columns = _internal_utils.select_feature_subset(data, feature_columns)
for f in feature_columns:
data[prefix + f] = transform_function(data[f])
return data | [
"def",
"transform",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"self",
".",
"_get",
"(",
"\"fitted\"",
")",
":",
"raise",
"RuntimeError",
"(",
"\"`transform` called before `fit` or `fit_transform`.\"",
")",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"output_column_prefix",
"=",
"self",
".",
"_get",
"(",
"\"output_column_prefix\"",
")",
"if",
"output_column_prefix",
"is",
"None",
":",
"prefix",
"=",
"\"\"",
"else",
":",
"prefix",
"=",
"output_column_prefix",
"+",
"'.'",
"transform_function",
"=",
"self",
".",
"_get",
"(",
"\"transform_function\"",
")",
"feature_columns",
"=",
"self",
".",
"_get",
"(",
"\"features\"",
")",
"feature_columns",
"=",
"_internal_utils",
".",
"select_feature_subset",
"(",
"data",
",",
"feature_columns",
")",
"for",
"f",
"in",
"feature_columns",
":",
"data",
"[",
"prefix",
"+",
"f",
"]",
"=",
"transform_function",
"(",
"data",
"[",
"f",
"]",
")",
"return",
"data"
] | Transforms the data. | [
"Transforms",
"the",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L171-L195 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | _interpretations_class.short_text__str | def short_text__str(self, column_name, output_column_prefix):
"""
Transforms short text into a dictionary of TFIDF-weighted 3-gram
character counts.
"""
from ._ngram_counter import NGramCounter
from ._tfidf import TFIDF
return [NGramCounter(features=[column_name],
n = 3,
method = "character",
output_column_prefix = output_column_prefix),
TFIDF(features=[column_name],
min_document_frequency=0.01,
max_document_frequency=0.5,
output_column_prefix = output_column_prefix)] | python | def short_text__str(self, column_name, output_column_prefix):
"""
Transforms short text into a dictionary of TFIDF-weighted 3-gram
character counts.
"""
from ._ngram_counter import NGramCounter
from ._tfidf import TFIDF
return [NGramCounter(features=[column_name],
n = 3,
method = "character",
output_column_prefix = output_column_prefix),
TFIDF(features=[column_name],
min_document_frequency=0.01,
max_document_frequency=0.5,
output_column_prefix = output_column_prefix)] | [
"def",
"short_text__str",
"(",
"self",
",",
"column_name",
",",
"output_column_prefix",
")",
":",
"from",
".",
"_ngram_counter",
"import",
"NGramCounter",
"from",
".",
"_tfidf",
"import",
"TFIDF",
"return",
"[",
"NGramCounter",
"(",
"features",
"=",
"[",
"column_name",
"]",
",",
"n",
"=",
"3",
",",
"method",
"=",
"\"character\"",
",",
"output_column_prefix",
"=",
"output_column_prefix",
")",
",",
"TFIDF",
"(",
"features",
"=",
"[",
"column_name",
"]",
",",
"min_document_frequency",
"=",
"0.01",
",",
"max_document_frequency",
"=",
"0.5",
",",
"output_column_prefix",
"=",
"output_column_prefix",
")",
"]"
] | Transforms short text into a dictionary of TFIDF-weighted 3-gram
character counts. | [
"Transforms",
"short",
"text",
"into",
"a",
"dictionary",
"of",
"TFIDF",
"-",
"weighted",
"3",
"-",
"gram",
"character",
"counts",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L224-L241 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | _interpretations_class.categorical__int | def categorical__int(self, column_name, output_column_prefix):
"""
Interprets an integer column as a categorical variable.
"""
return [_ColumnFunctionTransformation(
features = [column_name],
output_column_prefix = output_column_prefix,
transform_function = lambda col: col.astype(str),
transform_function_name = "astype(str)")] | python | def categorical__int(self, column_name, output_column_prefix):
"""
Interprets an integer column as a categorical variable.
"""
return [_ColumnFunctionTransformation(
features = [column_name],
output_column_prefix = output_column_prefix,
transform_function = lambda col: col.astype(str),
transform_function_name = "astype(str)")] | [
"def",
"categorical__int",
"(",
"self",
",",
"column_name",
",",
"output_column_prefix",
")",
":",
"return",
"[",
"_ColumnFunctionTransformation",
"(",
"features",
"=",
"[",
"column_name",
"]",
",",
"output_column_prefix",
"=",
"output_column_prefix",
",",
"transform_function",
"=",
"lambda",
"col",
":",
"col",
".",
"astype",
"(",
"str",
")",
",",
"transform_function_name",
"=",
"\"astype(str)\"",
")",
"]"
] | Interprets an integer column as a categorical variable. | [
"Interprets",
"an",
"integer",
"column",
"as",
"a",
"categorical",
"variable",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L283-L292 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | AutoVectorizer._setup_from_data | def _setup_from_data(self, data):
"""
Sets up the content transforms.
"""
fitted_state = {}
_raise_error_if_not_of_type(data, [_SFrame])
feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features)
if not feature_columns:
raise RuntimeError("No valid feature columns specified in transformation.")
fitted_state["features"] = feature_columns
################################################################################
# Helper functions
def get_valid_interpretations():
return list(n.split("__")[0] for n in dir(_interpretations) if not n.startswith("_"))
################################################################################
# Check input data.
if not isinstance(data, _SFrame):
raise TypeError("`data` parameter must be an SFrame.")
all_col_names = set(feature_columns)
column_interpretations = self._get("column_interpretations").copy()
# Make sure all the interpretations are valid.
for k, v in column_interpretations.items():
if k not in all_col_names:
raise ValueError("Column '%s' in column_interpretations, but not found in `data`." % k)
# Get the automatic column interpretations.
for col_name in feature_columns:
if col_name not in column_interpretations:
n = column_interpretations[col_name] = infer_column_interpretation(data[col_name])
if n.startswith("unknown"):
raise ValueError("Interpretation inference failed on column '%s'; %s"
% (col_name, n[len("unknown"):].strip()))
# Now, build up the feature transforms.
transforms = {}
input_types = {}
output_column_prefix = self._get("output_column_prefix")
assert output_column_prefix is None or type(output_column_prefix) is str
tr_chain = []
for col_name in feature_columns:
in_type = input_types[col_name] = data[col_name].dtype
intr_func = _get_interpretation_function(column_interpretations[col_name], in_type)
tr_list = intr_func(col_name, output_column_prefix)
transforms[col_name] = tr_list
tr_chain += tr_list
fitted_state["transform_chain"] = _TransformerChain(tr_chain)
fitted_state["transforms"] = transforms
fitted_state["input_types"] = input_types
fitted_state["column_interpretations"] = column_interpretations
self.__proxy__.update(fitted_state) | python | def _setup_from_data(self, data):
"""
Sets up the content transforms.
"""
fitted_state = {}
_raise_error_if_not_of_type(data, [_SFrame])
feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features)
if not feature_columns:
raise RuntimeError("No valid feature columns specified in transformation.")
fitted_state["features"] = feature_columns
################################################################################
# Helper functions
def get_valid_interpretations():
return list(n.split("__")[0] for n in dir(_interpretations) if not n.startswith("_"))
################################################################################
# Check input data.
if not isinstance(data, _SFrame):
raise TypeError("`data` parameter must be an SFrame.")
all_col_names = set(feature_columns)
column_interpretations = self._get("column_interpretations").copy()
# Make sure all the interpretations are valid.
for k, v in column_interpretations.items():
if k not in all_col_names:
raise ValueError("Column '%s' in column_interpretations, but not found in `data`." % k)
# Get the automatic column interpretations.
for col_name in feature_columns:
if col_name not in column_interpretations:
n = column_interpretations[col_name] = infer_column_interpretation(data[col_name])
if n.startswith("unknown"):
raise ValueError("Interpretation inference failed on column '%s'; %s"
% (col_name, n[len("unknown"):].strip()))
# Now, build up the feature transforms.
transforms = {}
input_types = {}
output_column_prefix = self._get("output_column_prefix")
assert output_column_prefix is None or type(output_column_prefix) is str
tr_chain = []
for col_name in feature_columns:
in_type = input_types[col_name] = data[col_name].dtype
intr_func = _get_interpretation_function(column_interpretations[col_name], in_type)
tr_list = intr_func(col_name, output_column_prefix)
transforms[col_name] = tr_list
tr_chain += tr_list
fitted_state["transform_chain"] = _TransformerChain(tr_chain)
fitted_state["transforms"] = transforms
fitted_state["input_types"] = input_types
fitted_state["column_interpretations"] = column_interpretations
self.__proxy__.update(fitted_state) | [
"def",
"_setup_from_data",
"(",
"self",
",",
"data",
")",
":",
"fitted_state",
"=",
"{",
"}",
"_raise_error_if_not_of_type",
"(",
"data",
",",
"[",
"_SFrame",
"]",
")",
"feature_columns",
"=",
"_internal_utils",
".",
"get_column_names",
"(",
"data",
",",
"self",
".",
"_exclude",
",",
"self",
".",
"_features",
")",
"if",
"not",
"feature_columns",
":",
"raise",
"RuntimeError",
"(",
"\"No valid feature columns specified in transformation.\"",
")",
"fitted_state",
"[",
"\"features\"",
"]",
"=",
"feature_columns",
"################################################################################",
"# Helper functions",
"def",
"get_valid_interpretations",
"(",
")",
":",
"return",
"list",
"(",
"n",
".",
"split",
"(",
"\"__\"",
")",
"[",
"0",
"]",
"for",
"n",
"in",
"dir",
"(",
"_interpretations",
")",
"if",
"not",
"n",
".",
"startswith",
"(",
"\"_\"",
")",
")",
"################################################################################",
"# Check input data.",
"if",
"not",
"isinstance",
"(",
"data",
",",
"_SFrame",
")",
":",
"raise",
"TypeError",
"(",
"\"`data` parameter must be an SFrame.\"",
")",
"all_col_names",
"=",
"set",
"(",
"feature_columns",
")",
"column_interpretations",
"=",
"self",
".",
"_get",
"(",
"\"column_interpretations\"",
")",
".",
"copy",
"(",
")",
"# Make sure all the interpretations are valid.",
"for",
"k",
",",
"v",
"in",
"column_interpretations",
".",
"items",
"(",
")",
":",
"if",
"k",
"not",
"in",
"all_col_names",
":",
"raise",
"ValueError",
"(",
"\"Column '%s' in column_interpretations, but not found in `data`.\"",
"%",
"k",
")",
"# Get the automatic column interpretations.",
"for",
"col_name",
"in",
"feature_columns",
":",
"if",
"col_name",
"not",
"in",
"column_interpretations",
":",
"n",
"=",
"column_interpretations",
"[",
"col_name",
"]",
"=",
"infer_column_interpretation",
"(",
"data",
"[",
"col_name",
"]",
")",
"if",
"n",
".",
"startswith",
"(",
"\"unknown\"",
")",
":",
"raise",
"ValueError",
"(",
"\"Interpretation inference failed on column '%s'; %s\"",
"%",
"(",
"col_name",
",",
"n",
"[",
"len",
"(",
"\"unknown\"",
")",
":",
"]",
".",
"strip",
"(",
")",
")",
")",
"# Now, build up the feature transforms.",
"transforms",
"=",
"{",
"}",
"input_types",
"=",
"{",
"}",
"output_column_prefix",
"=",
"self",
".",
"_get",
"(",
"\"output_column_prefix\"",
")",
"assert",
"output_column_prefix",
"is",
"None",
"or",
"type",
"(",
"output_column_prefix",
")",
"is",
"str",
"tr_chain",
"=",
"[",
"]",
"for",
"col_name",
"in",
"feature_columns",
":",
"in_type",
"=",
"input_types",
"[",
"col_name",
"]",
"=",
"data",
"[",
"col_name",
"]",
".",
"dtype",
"intr_func",
"=",
"_get_interpretation_function",
"(",
"column_interpretations",
"[",
"col_name",
"]",
",",
"in_type",
")",
"tr_list",
"=",
"intr_func",
"(",
"col_name",
",",
"output_column_prefix",
")",
"transforms",
"[",
"col_name",
"]",
"=",
"tr_list",
"tr_chain",
"+=",
"tr_list",
"fitted_state",
"[",
"\"transform_chain\"",
"]",
"=",
"_TransformerChain",
"(",
"tr_chain",
")",
"fitted_state",
"[",
"\"transforms\"",
"]",
"=",
"transforms",
"fitted_state",
"[",
"\"input_types\"",
"]",
"=",
"input_types",
"fitted_state",
"[",
"\"column_interpretations\"",
"]",
"=",
"column_interpretations",
"self",
".",
"__proxy__",
".",
"update",
"(",
"fitted_state",
")"
] | Sets up the content transforms. | [
"Sets",
"up",
"the",
"content",
"transforms",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L550-L619 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | AutoVectorizer.fit | def fit(self, data):
"""
Fits a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted object)
See Also
--------
transform, fit_transform
"""
self._setup_from_data(data)
self.transform_chain.fit(data)
self.__proxy__.update({"fitted" : True})
return self | python | def fit(self, data):
"""
Fits a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted object)
See Also
--------
transform, fit_transform
"""
self._setup_from_data(data)
self.transform_chain.fit(data)
self.__proxy__.update({"fitted" : True})
return self | [
"def",
"fit",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_setup_from_data",
"(",
"data",
")",
"self",
".",
"transform_chain",
".",
"fit",
"(",
"data",
")",
"self",
".",
"__proxy__",
".",
"update",
"(",
"{",
"\"fitted\"",
":",
"True",
"}",
")",
"return",
"self"
] | Fits a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted object)
See Also
--------
transform, fit_transform | [
"Fits",
"a",
"transformer",
"using",
"the",
"SFrame",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L621-L643 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | AutoVectorizer.fit_transform | def fit_transform(self, data):
"""
Fits and transforms the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, transform
"""
self._setup_from_data(data)
ret = self.transform_chain.fit_transform(data)
self.__proxy__.update({"fitted" : True})
return ret | python | def fit_transform(self, data):
"""
Fits and transforms the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, transform
"""
self._setup_from_data(data)
ret = self.transform_chain.fit_transform(data)
self.__proxy__.update({"fitted" : True})
return ret | [
"def",
"fit_transform",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_setup_from_data",
"(",
"data",
")",
"ret",
"=",
"self",
".",
"transform_chain",
".",
"fit_transform",
"(",
"data",
")",
"self",
".",
"__proxy__",
".",
"update",
"(",
"{",
"\"fitted\"",
":",
"True",
"}",
")",
"return",
"ret"
] | Fits and transforms the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, transform | [
"Fits",
"and",
"transforms",
"the",
"SFrame",
"data",
"using",
"a",
"fitted",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L645-L671 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | AutoVectorizer.transform | def transform(self, data):
"""
Transform the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, fit_transform
"""
if self.transform_chain is None:
raise RuntimeError("`transform()` method called before `fit` or `fit_transform`.")
return self.transform_chain.transform(data) | python | def transform(self, data):
"""
Transform the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, fit_transform
"""
if self.transform_chain is None:
raise RuntimeError("`transform()` method called before `fit` or `fit_transform`.")
return self.transform_chain.transform(data) | [
"def",
"transform",
"(",
"self",
",",
"data",
")",
":",
"if",
"self",
".",
"transform_chain",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"`transform()` method called before `fit` or `fit_transform`.\"",
")",
"return",
"self",
".",
"transform_chain",
".",
"transform",
"(",
"data",
")"
] | Transform the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, fit_transform | [
"Transform",
"the",
"SFrame",
"data",
"using",
"a",
"fitted",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L673-L699 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | AutoVectorizer._get_summary_struct | def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<feature>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
sections = []
fields = []
_features = _precomputed_field(_internal_utils.pretty_print_list(self.features))
_exclude = _precomputed_field(_internal_utils.pretty_print_list(self.excluded_features))
header_fields = [("Features", "features"),
("Excluded Features", "excluded_features")]
sections.append("Model Fields")
fields.append(header_fields)
if self.user_column_interpretations:
sections.append("User Specified Interpretations")
fields.append(list(sorted(self._get("user_column_interpretations").items())))
column_interpretations = self._get("column_interpretations")
features = self._get("features")
if self._get("fitted") and features is not None:
n_rows = len(features)
transform_info = [None]*n_rows
for i, f in enumerate(features):
interpretation = column_interpretations[f]
input_type = self.input_types[f]
description, output_type = _get_interpretation_description_and_output_type(
interpretation, input_type)
transform_info[i] = (f, input_type.__name__, interpretation, description, output_type.__name__)
transform_table = _SFrame()
transform_table["Column"] = [t[0] for t in transform_info]
transform_table["Type"] = [t[1] for t in transform_info]
transform_table["Interpretation"] = [t[2] for t in transform_info]
transform_table["Transforms"] = [t[3] for t in transform_info]
transform_table["Output Type"] = [t[4] for t in transform_info]
fields[-1].append(transform_table)
return fields, sections | python | def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<feature>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
sections = []
fields = []
_features = _precomputed_field(_internal_utils.pretty_print_list(self.features))
_exclude = _precomputed_field(_internal_utils.pretty_print_list(self.excluded_features))
header_fields = [("Features", "features"),
("Excluded Features", "excluded_features")]
sections.append("Model Fields")
fields.append(header_fields)
if self.user_column_interpretations:
sections.append("User Specified Interpretations")
fields.append(list(sorted(self._get("user_column_interpretations").items())))
column_interpretations = self._get("column_interpretations")
features = self._get("features")
if self._get("fitted") and features is not None:
n_rows = len(features)
transform_info = [None]*n_rows
for i, f in enumerate(features):
interpretation = column_interpretations[f]
input_type = self.input_types[f]
description, output_type = _get_interpretation_description_and_output_type(
interpretation, input_type)
transform_info[i] = (f, input_type.__name__, interpretation, description, output_type.__name__)
transform_table = _SFrame()
transform_table["Column"] = [t[0] for t in transform_info]
transform_table["Type"] = [t[1] for t in transform_info]
transform_table["Interpretation"] = [t[2] for t in transform_info]
transform_table["Transforms"] = [t[3] for t in transform_info]
transform_table["Output Type"] = [t[4] for t in transform_info]
fields[-1].append(transform_table)
return fields, sections | [
"def",
"_get_summary_struct",
"(",
"self",
")",
":",
"sections",
"=",
"[",
"]",
"fields",
"=",
"[",
"]",
"_features",
"=",
"_precomputed_field",
"(",
"_internal_utils",
".",
"pretty_print_list",
"(",
"self",
".",
"features",
")",
")",
"_exclude",
"=",
"_precomputed_field",
"(",
"_internal_utils",
".",
"pretty_print_list",
"(",
"self",
".",
"excluded_features",
")",
")",
"header_fields",
"=",
"[",
"(",
"\"Features\"",
",",
"\"features\"",
")",
",",
"(",
"\"Excluded Features\"",
",",
"\"excluded_features\"",
")",
"]",
"sections",
".",
"append",
"(",
"\"Model Fields\"",
")",
"fields",
".",
"append",
"(",
"header_fields",
")",
"if",
"self",
".",
"user_column_interpretations",
":",
"sections",
".",
"append",
"(",
"\"User Specified Interpretations\"",
")",
"fields",
".",
"append",
"(",
"list",
"(",
"sorted",
"(",
"self",
".",
"_get",
"(",
"\"user_column_interpretations\"",
")",
".",
"items",
"(",
")",
")",
")",
")",
"column_interpretations",
"=",
"self",
".",
"_get",
"(",
"\"column_interpretations\"",
")",
"features",
"=",
"self",
".",
"_get",
"(",
"\"features\"",
")",
"if",
"self",
".",
"_get",
"(",
"\"fitted\"",
")",
"and",
"features",
"is",
"not",
"None",
":",
"n_rows",
"=",
"len",
"(",
"features",
")",
"transform_info",
"=",
"[",
"None",
"]",
"*",
"n_rows",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"features",
")",
":",
"interpretation",
"=",
"column_interpretations",
"[",
"f",
"]",
"input_type",
"=",
"self",
".",
"input_types",
"[",
"f",
"]",
"description",
",",
"output_type",
"=",
"_get_interpretation_description_and_output_type",
"(",
"interpretation",
",",
"input_type",
")",
"transform_info",
"[",
"i",
"]",
"=",
"(",
"f",
",",
"input_type",
".",
"__name__",
",",
"interpretation",
",",
"description",
",",
"output_type",
".",
"__name__",
")",
"transform_table",
"=",
"_SFrame",
"(",
")",
"transform_table",
"[",
"\"Column\"",
"]",
"=",
"[",
"t",
"[",
"0",
"]",
"for",
"t",
"in",
"transform_info",
"]",
"transform_table",
"[",
"\"Type\"",
"]",
"=",
"[",
"t",
"[",
"1",
"]",
"for",
"t",
"in",
"transform_info",
"]",
"transform_table",
"[",
"\"Interpretation\"",
"]",
"=",
"[",
"t",
"[",
"2",
"]",
"for",
"t",
"in",
"transform_info",
"]",
"transform_table",
"[",
"\"Transforms\"",
"]",
"=",
"[",
"t",
"[",
"3",
"]",
"for",
"t",
"in",
"transform_info",
"]",
"transform_table",
"[",
"\"Output Type\"",
"]",
"=",
"[",
"t",
"[",
"4",
"]",
"for",
"t",
"in",
"transform_info",
"]",
"fields",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"transform_table",
")",
"return",
"fields",
",",
"sections"
] | Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<feature>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object. | [
"Returns",
"a",
"structured",
"description",
"of",
"the",
"model",
"including",
"(",
"where",
"relevant",
")",
"the",
"schema",
"of",
"the",
"training",
"data",
"description",
"of",
"the",
"training",
"data",
"training",
"statistics",
"and",
"model",
"hyperparameters",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L702-L761 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py | AutoVectorizer._save_impl | def _save_impl(self, pickler):
"""
Save the model as a directory, which can be loaded with the
:py:func:`~turicreate.load_model` method.
Parameters
----------
pickler : GLPickler
An opened GLPickle archive (Do not close the archive).
See Also
--------
turicreate.load_model
Examples
--------
>>> model.save('my_model_file')
>>> loaded_model = turicreate.load_model('my_model_file')
"""
pickler.dump( (self.__proxy__.state, self._exclude, self._features) ) | python | def _save_impl(self, pickler):
"""
Save the model as a directory, which can be loaded with the
:py:func:`~turicreate.load_model` method.
Parameters
----------
pickler : GLPickler
An opened GLPickle archive (Do not close the archive).
See Also
--------
turicreate.load_model
Examples
--------
>>> model.save('my_model_file')
>>> loaded_model = turicreate.load_model('my_model_file')
"""
pickler.dump( (self.__proxy__.state, self._exclude, self._features) ) | [
"def",
"_save_impl",
"(",
"self",
",",
"pickler",
")",
":",
"pickler",
".",
"dump",
"(",
"(",
"self",
".",
"__proxy__",
".",
"state",
",",
"self",
".",
"_exclude",
",",
"self",
".",
"_features",
")",
")"
] | Save the model as a directory, which can be loaded with the
:py:func:`~turicreate.load_model` method.
Parameters
----------
pickler : GLPickler
An opened GLPickle archive (Do not close the archive).
See Also
--------
turicreate.load_model
Examples
--------
>>> model.save('my_model_file')
>>> loaded_model = turicreate.load_model('my_model_file') | [
"Save",
"the",
"model",
"as",
"a",
"directory",
"which",
"can",
"be",
"loaded",
"with",
"the",
":",
"py",
":",
"func",
":",
"~turicreate",
".",
"load_model",
"method",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L796-L815 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | Mox.CreateMock | def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock | python | def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock | [
"def",
"CreateMock",
"(",
"self",
",",
"class_to_mock",
")",
":",
"new_mock",
"=",
"MockObject",
"(",
"class_to_mock",
")",
"self",
".",
"_mock_objects",
".",
"append",
"(",
"new_mock",
")",
"return",
"new_mock"
] | Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be. | [
"Create",
"a",
"new",
"mock",
"object",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L164-L177 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | Mox.StubOutWithMock | def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub) | python | def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub) | [
"def",
"StubOutWithMock",
"(",
"self",
",",
"obj",
",",
"attr_name",
",",
"use_mock_anything",
"=",
"False",
")",
":",
"attr_to_replace",
"=",
"getattr",
"(",
"obj",
",",
"attr_name",
")",
"if",
"type",
"(",
"attr_to_replace",
")",
"in",
"self",
".",
"_USE_MOCK_OBJECT",
"and",
"not",
"use_mock_anything",
":",
"stub",
"=",
"self",
".",
"CreateMock",
"(",
"attr_to_replace",
")",
"else",
":",
"stub",
"=",
"self",
".",
"CreateMockAnything",
"(",
")",
"self",
".",
"stubs",
".",
"Set",
"(",
"obj",
",",
"attr_name",
",",
"stub",
")"
] | Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute. | [
"Replace",
"a",
"method",
"attribute",
"etc",
".",
"with",
"a",
"Mock",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L208-L228 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | MockAnything._Verify | def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue) | python | def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue) | [
"def",
"_Verify",
"(",
"self",
")",
":",
"# If the list of expected calls is not empty, raise an exception",
"if",
"self",
".",
"_expected_calls_queue",
":",
"# The last MultipleTimesGroup is not popped from the queue.",
"if",
"(",
"len",
"(",
"self",
".",
"_expected_calls_queue",
")",
"==",
"1",
"and",
"isinstance",
"(",
"self",
".",
"_expected_calls_queue",
"[",
"0",
"]",
",",
"MultipleTimesGroup",
")",
"and",
"self",
".",
"_expected_calls_queue",
"[",
"0",
"]",
".",
"IsSatisfied",
"(",
")",
")",
":",
"pass",
"else",
":",
"raise",
"ExpectedMethodCallsError",
"(",
"self",
".",
"_expected_calls_queue",
")"
] | Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue. | [
"Verify",
"that",
"all",
"of",
"the",
"expected",
"calls",
"have",
"been",
"made",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L331-L347 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | MockMethod._VerifyMethodCall | def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected | python | def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected | [
"def",
"_VerifyMethodCall",
"(",
"self",
")",
":",
"expected",
"=",
"self",
".",
"_PopNextMethod",
"(",
")",
"# Loop here, because we might have a MethodGroup followed by another",
"# group.",
"while",
"isinstance",
"(",
"expected",
",",
"MethodGroup",
")",
":",
"expected",
",",
"method",
"=",
"expected",
".",
"MethodCalled",
"(",
"self",
")",
"if",
"method",
"is",
"not",
"None",
":",
"return",
"method",
"# This is a mock method, so just check equality.",
"if",
"expected",
"!=",
"self",
":",
"raise",
"UnexpectedMethodCallError",
"(",
"self",
",",
"expected",
")",
"return",
"expected"
] | Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected. | [
"Verify",
"the",
"called",
"method",
"is",
"expected",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L588-L613 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | MockMethod.GetPossibleGroup | def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group | python | def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group | [
"def",
"GetPossibleGroup",
"(",
"self",
")",
":",
"# Remove this method from the tail of the queue so we can add it to a group.",
"this_method",
"=",
"self",
".",
"_call_queue",
".",
"pop",
"(",
")",
"assert",
"this_method",
"==",
"self",
"# Determine if the tail of the queue is a group, or just a regular ordered",
"# mock method.",
"group",
"=",
"None",
"try",
":",
"group",
"=",
"self",
".",
"_call_queue",
"[",
"-",
"1",
"]",
"except",
"IndexError",
":",
"pass",
"return",
"group"
] | Returns a possible group from the end of the call queue or None if no
other methods are on the stack. | [
"Returns",
"a",
"possible",
"group",
"from",
"the",
"end",
"of",
"the",
"call",
"queue",
"or",
"None",
"if",
"no",
"other",
"methods",
"are",
"on",
"the",
"stack",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L645-L662 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | MockMethod._CheckAndCreateNewGroup | def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self | python | def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self | [
"def",
"_CheckAndCreateNewGroup",
"(",
"self",
",",
"group_name",
",",
"group_class",
")",
":",
"group",
"=",
"self",
".",
"GetPossibleGroup",
"(",
")",
"# If this is a group, and it is the correct group, add the method.",
"if",
"isinstance",
"(",
"group",
",",
"group_class",
")",
"and",
"group",
".",
"group_name",
"(",
")",
"==",
"group_name",
":",
"group",
".",
"AddMethod",
"(",
"self",
")",
"return",
"self",
"# Create a new group and add the method.",
"new_group",
"=",
"group_class",
"(",
"group_name",
")",
"new_group",
".",
"AddMethod",
"(",
"self",
")",
"self",
".",
"_call_queue",
".",
"append",
"(",
"new_group",
")",
"return",
"self"
] | Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group | [
"Checks",
"if",
"the",
"last",
"method",
"(",
"a",
"possible",
"group",
")",
"is",
"an",
"instance",
"of",
"our",
"group_class",
".",
"Adds",
"the",
"current",
"method",
"to",
"this",
"group",
"or",
"creates",
"a",
"new",
"one",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L664-L684 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | IsA.equals | def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name) | python | def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name) | [
"def",
"equals",
"(",
"self",
",",
"rhs",
")",
":",
"try",
":",
"return",
"isinstance",
"(",
"rhs",
",",
"self",
".",
"_class_name",
")",
"except",
"TypeError",
":",
"# Check raw types if there was a type error. This is helpful for",
"# things like cStringIO.StringIO.",
"return",
"type",
"(",
"rhs",
")",
"==",
"type",
"(",
"self",
".",
"_class_name",
")"
] | Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool | [
"Check",
"to",
"see",
"if",
"the",
"RHS",
"is",
"an",
"instance",
"of",
"class_name",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L807-L823 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | IsAlmost.equals | def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False | python | def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False | [
"def",
"equals",
"(",
"self",
",",
"rhs",
")",
":",
"try",
":",
"return",
"round",
"(",
"rhs",
"-",
"self",
".",
"_float_value",
",",
"self",
".",
"_places",
")",
"==",
"0",
"except",
"TypeError",
":",
"# This is probably because either float_value or rhs is not a number.",
"return",
"False"
] | Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool | [
"Check",
"to",
"see",
"if",
"RHS",
"is",
"almost",
"equal",
"to",
"float_value"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L846-L860 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | ContainsKeyValue.equals | def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False | python | def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False | [
"def",
"equals",
"(",
"self",
",",
"rhs",
")",
":",
"try",
":",
"return",
"rhs",
"[",
"self",
".",
"_key",
"]",
"==",
"self",
".",
"_value",
"except",
"Exception",
":",
"return",
"False"
] | Check whether the given key/value pair is in the rhs dict.
Returns:
bool | [
"Check",
"whether",
"the",
"given",
"key",
"/",
"value",
"pair",
"is",
"in",
"the",
"rhs",
"dict",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L989-L999 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | SameElementsAs.equals | def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual | python | def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual | [
"def",
"equals",
"(",
"self",
",",
"actual_seq",
")",
":",
"try",
":",
"expected",
"=",
"dict",
"(",
"[",
"(",
"element",
",",
"None",
")",
"for",
"element",
"in",
"self",
".",
"_expected_seq",
"]",
")",
"actual",
"=",
"dict",
"(",
"[",
"(",
"element",
",",
"None",
")",
"for",
"element",
"in",
"actual_seq",
"]",
")",
"except",
"TypeError",
":",
"# Fall back to slower list-compare if any of the objects are unhashable.",
"expected",
"=",
"list",
"(",
"self",
".",
"_expected_seq",
")",
"actual",
"=",
"list",
"(",
"actual_seq",
")",
"expected",
".",
"sort",
"(",
")",
"actual",
".",
"sort",
"(",
")",
"return",
"expected",
"==",
"actual"
] | Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool | [
"Check",
"to",
"see",
"whether",
"actual_seq",
"has",
"same",
"elements",
"as",
"expected_seq",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L1021-L1040 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | Or.equals | def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False | python | def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False | [
"def",
"equals",
"(",
"self",
",",
"rhs",
")",
":",
"for",
"comparator",
"in",
"self",
".",
"_comparators",
":",
"if",
"comparator",
".",
"equals",
"(",
"rhs",
")",
":",
"return",
"True",
"return",
"False"
] | Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool | [
"Checks",
"whether",
"any",
"Comparator",
"is",
"equal",
"to",
"rhs",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L1092-L1106 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | UnorderedGroup.MethodCalled | def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self) | python | def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self) | [
"def",
"MethodCalled",
"(",
"self",
",",
"mock_method",
")",
":",
"# Check to see if this method exists, and if so, remove it from the set",
"# and return it.",
"for",
"method",
"in",
"self",
".",
"_methods",
":",
"if",
"method",
"==",
"mock_method",
":",
"# Remove the called mock_method instead of the method in the group.",
"# The called method will match any comparators when equality is checked",
"# during removal. The method in the group could pass a comparator to",
"# another comparator during the equality check.",
"self",
".",
"_methods",
".",
"remove",
"(",
"mock_method",
")",
"# If this group is not empty, put it back at the head of the queue.",
"if",
"not",
"self",
".",
"IsSatisfied",
"(",
")",
":",
"mock_method",
".",
"_call_queue",
".",
"appendleft",
"(",
"self",
")",
"return",
"self",
",",
"method",
"raise",
"UnexpectedMethodCallError",
"(",
"mock_method",
",",
"self",
")"
] | Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group. | [
"Remove",
"a",
"method",
"call",
"from",
"the",
"group",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L1223-L1255 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | MultipleTimesGroup.MethodCalled | def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self) | python | def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self) | [
"def",
"MethodCalled",
"(",
"self",
",",
"mock_method",
")",
":",
"# Check to see if this method exists, and if so add it to the set of",
"# called methods.",
"for",
"method",
"in",
"self",
".",
"_methods",
":",
"if",
"method",
"==",
"mock_method",
":",
"self",
".",
"_methods_called",
".",
"add",
"(",
"mock_method",
")",
"# Always put this group back on top of the queue, because we don't know",
"# when we are done.",
"mock_method",
".",
"_call_queue",
".",
"appendleft",
"(",
"self",
")",
"return",
"self",
",",
"method",
"if",
"self",
".",
"IsSatisfied",
"(",
")",
":",
"next_method",
"=",
"mock_method",
".",
"_PopNextMethod",
"(",
")",
"return",
"next_method",
",",
"None",
"else",
":",
"raise",
"UnexpectedMethodCallError",
"(",
"mock_method",
",",
"self",
")"
] | Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group. | [
"Remove",
"a",
"method",
"call",
"from",
"the",
"group",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L1285-L1316 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | MultipleTimesGroup.IsSatisfied | def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False | python | def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False | [
"def",
"IsSatisfied",
"(",
"self",
")",
":",
"# NOTE(psycho): We can't use the simple set difference here because we want",
"# to match different parameters which are considered the same e.g. IsA(str)",
"# and some string. This solution is O(n^2) but n should be small.",
"tmp",
"=",
"self",
".",
"_methods",
".",
"copy",
"(",
")",
"for",
"called",
"in",
"self",
".",
"_methods_called",
":",
"for",
"expected",
"in",
"tmp",
":",
"if",
"called",
"==",
"expected",
":",
"tmp",
".",
"remove",
"(",
"expected",
")",
"if",
"not",
"tmp",
":",
"return",
"True",
"break",
"return",
"False"
] | Return True if all methods in this group are called at least once. | [
"Return",
"True",
"if",
"all",
"methods",
"in",
"this",
"group",
"are",
"called",
"at",
"least",
"once",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L1318-L1331 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_dict_vectorizer.py | convert | def convert(model, input_features, output_features):
"""Convert a _imputer model to the protobuf spec.
Parameters
----------
model: Imputer
A trained Imputer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
_INTERMEDIATE_FEATURE_NAME = "__sparse_vector_features__"
n_dimensions = len(model.feature_names_)
input_features = process_or_validate_features(input_features)
# Ensure that the output_features are also solid.
output_features = process_or_validate_features(output_features, n_dimensions)
# The DictVectorizer in the framework outputs a sparse dictionary
# of index to value due to other considerations, but we are expecting
# the output of this to be a dense feature vector. To make that happen,
# put a feature_vectorizer immediately after the dict vectorizer.
pline = Pipeline(input_features, output_features)
# Set the basic model parameters of the dict vectorizer component.
dv_spec = _Model_pb2.Model()
dv_spec.specificationVersion = SPECIFICATION_VERSION
# Set up the dict vectorizer parameters
tr_spec = dv_spec.dictVectorizer
is_str = None
for feature_name in model.feature_names_:
if isinstance(feature_name, _six.string_types):
if is_str == False:
raise ValueError("Mapping of DictVectorizer mixes int and str types.")
tr_spec.stringToIndex.vector.append(feature_name)
is_str == True
if isinstance(feature_name, _six.integer_types):
if is_str == True:
raise ValueError("Mapping of DictVectorizer mixes int and str types.")
tr_spec.int64ToIndex.vector.append(feature_name)
is_str == False
intermediate_features = [(_INTERMEDIATE_FEATURE_NAME,
datatypes.Dictionary(key_type = int))]
# Set the interface for the dict vectorizer with the input and the
# intermediate output
set_transform_interface_params(
dv_spec, input_features, intermediate_features)
pline.add_model(dv_spec)
# Follow the dict vectorizer by a feature_vectorizer to change the sparse
# output layer into a dense vector as expected.
fvec, _num_out_dim = create_feature_vectorizer(intermediate_features,
output_features[0][0], {"__sparse_vector_features__" : n_dimensions})
pline.add_model(fvec)
return _MLModel(pline.spec) | python | def convert(model, input_features, output_features):
"""Convert a _imputer model to the protobuf spec.
Parameters
----------
model: Imputer
A trained Imputer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
_INTERMEDIATE_FEATURE_NAME = "__sparse_vector_features__"
n_dimensions = len(model.feature_names_)
input_features = process_or_validate_features(input_features)
# Ensure that the output_features are also solid.
output_features = process_or_validate_features(output_features, n_dimensions)
# The DictVectorizer in the framework outputs a sparse dictionary
# of index to value due to other considerations, but we are expecting
# the output of this to be a dense feature vector. To make that happen,
# put a feature_vectorizer immediately after the dict vectorizer.
pline = Pipeline(input_features, output_features)
# Set the basic model parameters of the dict vectorizer component.
dv_spec = _Model_pb2.Model()
dv_spec.specificationVersion = SPECIFICATION_VERSION
# Set up the dict vectorizer parameters
tr_spec = dv_spec.dictVectorizer
is_str = None
for feature_name in model.feature_names_:
if isinstance(feature_name, _six.string_types):
if is_str == False:
raise ValueError("Mapping of DictVectorizer mixes int and str types.")
tr_spec.stringToIndex.vector.append(feature_name)
is_str == True
if isinstance(feature_name, _six.integer_types):
if is_str == True:
raise ValueError("Mapping of DictVectorizer mixes int and str types.")
tr_spec.int64ToIndex.vector.append(feature_name)
is_str == False
intermediate_features = [(_INTERMEDIATE_FEATURE_NAME,
datatypes.Dictionary(key_type = int))]
# Set the interface for the dict vectorizer with the input and the
# intermediate output
set_transform_interface_params(
dv_spec, input_features, intermediate_features)
pline.add_model(dv_spec)
# Follow the dict vectorizer by a feature_vectorizer to change the sparse
# output layer into a dense vector as expected.
fvec, _num_out_dim = create_feature_vectorizer(intermediate_features,
output_features[0][0], {"__sparse_vector_features__" : n_dimensions})
pline.add_model(fvec)
return _MLModel(pline.spec) | [
"def",
"convert",
"(",
"model",
",",
"input_features",
",",
"output_features",
")",
":",
"_INTERMEDIATE_FEATURE_NAME",
"=",
"\"__sparse_vector_features__\"",
"n_dimensions",
"=",
"len",
"(",
"model",
".",
"feature_names_",
")",
"input_features",
"=",
"process_or_validate_features",
"(",
"input_features",
")",
"# Ensure that the output_features are also solid.",
"output_features",
"=",
"process_or_validate_features",
"(",
"output_features",
",",
"n_dimensions",
")",
"# The DictVectorizer in the framework outputs a sparse dictionary",
"# of index to value due to other considerations, but we are expecting",
"# the output of this to be a dense feature vector. To make that happen,",
"# put a feature_vectorizer immediately after the dict vectorizer.",
"pline",
"=",
"Pipeline",
"(",
"input_features",
",",
"output_features",
")",
"# Set the basic model parameters of the dict vectorizer component.",
"dv_spec",
"=",
"_Model_pb2",
".",
"Model",
"(",
")",
"dv_spec",
".",
"specificationVersion",
"=",
"SPECIFICATION_VERSION",
"# Set up the dict vectorizer parameters",
"tr_spec",
"=",
"dv_spec",
".",
"dictVectorizer",
"is_str",
"=",
"None",
"for",
"feature_name",
"in",
"model",
".",
"feature_names_",
":",
"if",
"isinstance",
"(",
"feature_name",
",",
"_six",
".",
"string_types",
")",
":",
"if",
"is_str",
"==",
"False",
":",
"raise",
"ValueError",
"(",
"\"Mapping of DictVectorizer mixes int and str types.\"",
")",
"tr_spec",
".",
"stringToIndex",
".",
"vector",
".",
"append",
"(",
"feature_name",
")",
"is_str",
"==",
"True",
"if",
"isinstance",
"(",
"feature_name",
",",
"_six",
".",
"integer_types",
")",
":",
"if",
"is_str",
"==",
"True",
":",
"raise",
"ValueError",
"(",
"\"Mapping of DictVectorizer mixes int and str types.\"",
")",
"tr_spec",
".",
"int64ToIndex",
".",
"vector",
".",
"append",
"(",
"feature_name",
")",
"is_str",
"==",
"False",
"intermediate_features",
"=",
"[",
"(",
"_INTERMEDIATE_FEATURE_NAME",
",",
"datatypes",
".",
"Dictionary",
"(",
"key_type",
"=",
"int",
")",
")",
"]",
"# Set the interface for the dict vectorizer with the input and the",
"# intermediate output",
"set_transform_interface_params",
"(",
"dv_spec",
",",
"input_features",
",",
"intermediate_features",
")",
"pline",
".",
"add_model",
"(",
"dv_spec",
")",
"# Follow the dict vectorizer by a feature_vectorizer to change the sparse",
"# output layer into a dense vector as expected.",
"fvec",
",",
"_num_out_dim",
"=",
"create_feature_vectorizer",
"(",
"intermediate_features",
",",
"output_features",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"{",
"\"__sparse_vector_features__\"",
":",
"n_dimensions",
"}",
")",
"pline",
".",
"add_model",
"(",
"fvec",
")",
"return",
"_MLModel",
"(",
"pline",
".",
"spec",
")"
] | Convert a _imputer model to the protobuf spec.
Parameters
----------
model: Imputer
A trained Imputer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"_imputer",
"model",
"to",
"the",
"protobuf",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_dict_vectorizer.py#L29-L102 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py | set_classifier_interface_params | def set_classifier_interface_params(spec, features, class_labels,
model_accessor_for_class_labels, output_features = None):
"""
Common utilities to set the regression interface params.
"""
# Normalize the features list.
features = _fm.process_or_validate_features(features)
if class_labels is None:
raise ValueError("List of class labels must be provided.")
n_classes = len(class_labels)
output_features = _fm.process_or_validate_classifier_output_features(output_features, class_labels)
if len(output_features) == 1:
predicted_class_output, pred_cl_type = output_features[0]
score_output = None
elif len(output_features) == 2:
predicted_class_output, pred_cl_type = output_features[0]
score_output, score_output_type = output_features[1]
else:
raise ValueError("Provided output classes for a classifier must be "
"a list of features, predicted class and (optionally) class_score.")
spec.description.predictedFeatureName = predicted_class_output
# Are they out of order?
if not (pred_cl_type == datatypes.Int64() or pred_cl_type == datatypes.String()):
raise ValueError("Provided predicted class output type not Int64 or String (%s)."
% repr(pred_cl_type))
if score_output is not None:
if not isinstance(score_output_type, datatypes.Dictionary):
raise ValueError("Provided class score output type not a Dictionary (%s)."
% repr(score_output_type))
if score_output_type.key_type != pred_cl_type:
raise ValueError(("Provided class score output (%s) key_type (%s) does not "
"match type of class prediction (%s).")
% (score_output, repr(score_output_type.key_type), repr(pred_cl_type)))
spec.description.predictedProbabilitiesName = score_output
# add input
for index, (cur_input_name, input_type) in enumerate(features):
input_ = spec.description.input.add()
input_.name = cur_input_name
datatypes._set_datatype(input_.type, input_type)
# add output
for index, (cur_output_name, output_type) in enumerate(output_features):
output_ = spec.description.output.add()
output_.name = cur_output_name
datatypes._set_datatype(output_.type, output_type)
# Worry about the class labels
if pred_cl_type == datatypes.String():
try:
for c in class_labels:
getattr(spec, model_accessor_for_class_labels).stringClassLabels.vector.append(str(c))
# Not all the classifiers have class labels; in particular the pipeline
# classifier. Thus it's not an error if we can't actually set them.
except AttributeError:
pass
else:
for c in class_labels:
conv_error = False
try:
if not (int(c) == c):
conv_error = True
except:
conv_error = True
if conv_error:
raise TypeError(("Cannot cast '%s' class to an int type " % str(c))
+ "(class type determined by type of first class).")
try:
getattr(spec, model_accessor_for_class_labels).int64ClassLabels.vector.append(int(c))
# Not all the classifiers have class labels; in particular the pipeline
# classifier. Thus it's not an error if we can't actually set them.
except AttributeError:
break
# And we are done!
return spec | python | def set_classifier_interface_params(spec, features, class_labels,
model_accessor_for_class_labels, output_features = None):
"""
Common utilities to set the regression interface params.
"""
# Normalize the features list.
features = _fm.process_or_validate_features(features)
if class_labels is None:
raise ValueError("List of class labels must be provided.")
n_classes = len(class_labels)
output_features = _fm.process_or_validate_classifier_output_features(output_features, class_labels)
if len(output_features) == 1:
predicted_class_output, pred_cl_type = output_features[0]
score_output = None
elif len(output_features) == 2:
predicted_class_output, pred_cl_type = output_features[0]
score_output, score_output_type = output_features[1]
else:
raise ValueError("Provided output classes for a classifier must be "
"a list of features, predicted class and (optionally) class_score.")
spec.description.predictedFeatureName = predicted_class_output
# Are they out of order?
if not (pred_cl_type == datatypes.Int64() or pred_cl_type == datatypes.String()):
raise ValueError("Provided predicted class output type not Int64 or String (%s)."
% repr(pred_cl_type))
if score_output is not None:
if not isinstance(score_output_type, datatypes.Dictionary):
raise ValueError("Provided class score output type not a Dictionary (%s)."
% repr(score_output_type))
if score_output_type.key_type != pred_cl_type:
raise ValueError(("Provided class score output (%s) key_type (%s) does not "
"match type of class prediction (%s).")
% (score_output, repr(score_output_type.key_type), repr(pred_cl_type)))
spec.description.predictedProbabilitiesName = score_output
# add input
for index, (cur_input_name, input_type) in enumerate(features):
input_ = spec.description.input.add()
input_.name = cur_input_name
datatypes._set_datatype(input_.type, input_type)
# add output
for index, (cur_output_name, output_type) in enumerate(output_features):
output_ = spec.description.output.add()
output_.name = cur_output_name
datatypes._set_datatype(output_.type, output_type)
# Worry about the class labels
if pred_cl_type == datatypes.String():
try:
for c in class_labels:
getattr(spec, model_accessor_for_class_labels).stringClassLabels.vector.append(str(c))
# Not all the classifiers have class labels; in particular the pipeline
# classifier. Thus it's not an error if we can't actually set them.
except AttributeError:
pass
else:
for c in class_labels:
conv_error = False
try:
if not (int(c) == c):
conv_error = True
except:
conv_error = True
if conv_error:
raise TypeError(("Cannot cast '%s' class to an int type " % str(c))
+ "(class type determined by type of first class).")
try:
getattr(spec, model_accessor_for_class_labels).int64ClassLabels.vector.append(int(c))
# Not all the classifiers have class labels; in particular the pipeline
# classifier. Thus it's not an error if we can't actually set them.
except AttributeError:
break
# And we are done!
return spec | [
"def",
"set_classifier_interface_params",
"(",
"spec",
",",
"features",
",",
"class_labels",
",",
"model_accessor_for_class_labels",
",",
"output_features",
"=",
"None",
")",
":",
"# Normalize the features list.",
"features",
"=",
"_fm",
".",
"process_or_validate_features",
"(",
"features",
")",
"if",
"class_labels",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"List of class labels must be provided.\"",
")",
"n_classes",
"=",
"len",
"(",
"class_labels",
")",
"output_features",
"=",
"_fm",
".",
"process_or_validate_classifier_output_features",
"(",
"output_features",
",",
"class_labels",
")",
"if",
"len",
"(",
"output_features",
")",
"==",
"1",
":",
"predicted_class_output",
",",
"pred_cl_type",
"=",
"output_features",
"[",
"0",
"]",
"score_output",
"=",
"None",
"elif",
"len",
"(",
"output_features",
")",
"==",
"2",
":",
"predicted_class_output",
",",
"pred_cl_type",
"=",
"output_features",
"[",
"0",
"]",
"score_output",
",",
"score_output_type",
"=",
"output_features",
"[",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Provided output classes for a classifier must be \"",
"\"a list of features, predicted class and (optionally) class_score.\"",
")",
"spec",
".",
"description",
".",
"predictedFeatureName",
"=",
"predicted_class_output",
"# Are they out of order?",
"if",
"not",
"(",
"pred_cl_type",
"==",
"datatypes",
".",
"Int64",
"(",
")",
"or",
"pred_cl_type",
"==",
"datatypes",
".",
"String",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Provided predicted class output type not Int64 or String (%s).\"",
"%",
"repr",
"(",
"pred_cl_type",
")",
")",
"if",
"score_output",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"score_output_type",
",",
"datatypes",
".",
"Dictionary",
")",
":",
"raise",
"ValueError",
"(",
"\"Provided class score output type not a Dictionary (%s).\"",
"%",
"repr",
"(",
"score_output_type",
")",
")",
"if",
"score_output_type",
".",
"key_type",
"!=",
"pred_cl_type",
":",
"raise",
"ValueError",
"(",
"(",
"\"Provided class score output (%s) key_type (%s) does not \"",
"\"match type of class prediction (%s).\"",
")",
"%",
"(",
"score_output",
",",
"repr",
"(",
"score_output_type",
".",
"key_type",
")",
",",
"repr",
"(",
"pred_cl_type",
")",
")",
")",
"spec",
".",
"description",
".",
"predictedProbabilitiesName",
"=",
"score_output",
"# add input",
"for",
"index",
",",
"(",
"cur_input_name",
",",
"input_type",
")",
"in",
"enumerate",
"(",
"features",
")",
":",
"input_",
"=",
"spec",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"input_",
".",
"name",
"=",
"cur_input_name",
"datatypes",
".",
"_set_datatype",
"(",
"input_",
".",
"type",
",",
"input_type",
")",
"# add output",
"for",
"index",
",",
"(",
"cur_output_name",
",",
"output_type",
")",
"in",
"enumerate",
"(",
"output_features",
")",
":",
"output_",
"=",
"spec",
".",
"description",
".",
"output",
".",
"add",
"(",
")",
"output_",
".",
"name",
"=",
"cur_output_name",
"datatypes",
".",
"_set_datatype",
"(",
"output_",
".",
"type",
",",
"output_type",
")",
"# Worry about the class labels",
"if",
"pred_cl_type",
"==",
"datatypes",
".",
"String",
"(",
")",
":",
"try",
":",
"for",
"c",
"in",
"class_labels",
":",
"getattr",
"(",
"spec",
",",
"model_accessor_for_class_labels",
")",
".",
"stringClassLabels",
".",
"vector",
".",
"append",
"(",
"str",
"(",
"c",
")",
")",
"# Not all the classifiers have class labels; in particular the pipeline",
"# classifier. Thus it's not an error if we can't actually set them.",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"for",
"c",
"in",
"class_labels",
":",
"conv_error",
"=",
"False",
"try",
":",
"if",
"not",
"(",
"int",
"(",
"c",
")",
"==",
"c",
")",
":",
"conv_error",
"=",
"True",
"except",
":",
"conv_error",
"=",
"True",
"if",
"conv_error",
":",
"raise",
"TypeError",
"(",
"(",
"\"Cannot cast '%s' class to an int type \"",
"%",
"str",
"(",
"c",
")",
")",
"+",
"\"(class type determined by type of first class).\"",
")",
"try",
":",
"getattr",
"(",
"spec",
",",
"model_accessor_for_class_labels",
")",
".",
"int64ClassLabels",
".",
"vector",
".",
"append",
"(",
"int",
"(",
"c",
")",
")",
"# Not all the classifiers have class labels; in particular the pipeline",
"# classifier. Thus it's not an error if we can't actually set them.",
"except",
"AttributeError",
":",
"break",
"# And we are done!",
"return",
"spec"
] | Common utilities to set the regression interface params. | [
"Common",
"utilities",
"to",
"set",
"the",
"regression",
"interface",
"params",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py#L13-L100 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py | set_regressor_interface_params | def set_regressor_interface_params(spec, features, output_features):
""" Common utilities to set the regressor interface params.
"""
if output_features is None:
output_features = [("predicted_class", datatypes.Double())]
else:
output_features = _fm.process_or_validate_features(output_features, 1)
if len(output_features) != 1:
raise ValueError("Provided output features for a regressor must be "
"one Double feature.")
if output_features[0][1] != datatypes.Double():
raise ValueError("Output type of a regressor must be a Double.")
prediction_name = output_features[0][0]
spec.description.predictedFeatureName = prediction_name
# Normalize the features list.
features = _fm.process_or_validate_features(features)
# add input and output features
for cur_input_name, feature_type in features:
input_ = spec.description.input.add()
input_.name = cur_input_name
datatypes._set_datatype(input_.type, feature_type)
output_ = spec.description.output.add()
output_.name = prediction_name
datatypes._set_datatype(output_.type, 'Double')
return spec | python | def set_regressor_interface_params(spec, features, output_features):
""" Common utilities to set the regressor interface params.
"""
if output_features is None:
output_features = [("predicted_class", datatypes.Double())]
else:
output_features = _fm.process_or_validate_features(output_features, 1)
if len(output_features) != 1:
raise ValueError("Provided output features for a regressor must be "
"one Double feature.")
if output_features[0][1] != datatypes.Double():
raise ValueError("Output type of a regressor must be a Double.")
prediction_name = output_features[0][0]
spec.description.predictedFeatureName = prediction_name
# Normalize the features list.
features = _fm.process_or_validate_features(features)
# add input and output features
for cur_input_name, feature_type in features:
input_ = spec.description.input.add()
input_.name = cur_input_name
datatypes._set_datatype(input_.type, feature_type)
output_ = spec.description.output.add()
output_.name = prediction_name
datatypes._set_datatype(output_.type, 'Double')
return spec | [
"def",
"set_regressor_interface_params",
"(",
"spec",
",",
"features",
",",
"output_features",
")",
":",
"if",
"output_features",
"is",
"None",
":",
"output_features",
"=",
"[",
"(",
"\"predicted_class\"",
",",
"datatypes",
".",
"Double",
"(",
")",
")",
"]",
"else",
":",
"output_features",
"=",
"_fm",
".",
"process_or_validate_features",
"(",
"output_features",
",",
"1",
")",
"if",
"len",
"(",
"output_features",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Provided output features for a regressor must be \"",
"\"one Double feature.\"",
")",
"if",
"output_features",
"[",
"0",
"]",
"[",
"1",
"]",
"!=",
"datatypes",
".",
"Double",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Output type of a regressor must be a Double.\"",
")",
"prediction_name",
"=",
"output_features",
"[",
"0",
"]",
"[",
"0",
"]",
"spec",
".",
"description",
".",
"predictedFeatureName",
"=",
"prediction_name",
"# Normalize the features list.",
"features",
"=",
"_fm",
".",
"process_or_validate_features",
"(",
"features",
")",
"# add input and output features",
"for",
"cur_input_name",
",",
"feature_type",
"in",
"features",
":",
"input_",
"=",
"spec",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"input_",
".",
"name",
"=",
"cur_input_name",
"datatypes",
".",
"_set_datatype",
"(",
"input_",
".",
"type",
",",
"feature_type",
")",
"output_",
"=",
"spec",
".",
"description",
".",
"output",
".",
"add",
"(",
")",
"output_",
".",
"name",
"=",
"prediction_name",
"datatypes",
".",
"_set_datatype",
"(",
"output_",
".",
"type",
",",
"'Double'",
")",
"return",
"spec"
] | Common utilities to set the regressor interface params. | [
"Common",
"utilities",
"to",
"set",
"the",
"regressor",
"interface",
"params",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py#L102-L132 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py | set_transform_interface_params | def set_transform_interface_params(spec, input_features, output_features, are_optional = False):
""" Common utilities to set transform interface params.
"""
input_features = _fm.process_or_validate_features(input_features)
output_features = _fm.process_or_validate_features(output_features)
# Add input and output features
for (fname, ftype) in input_features:
input_ = spec.description.input.add()
input_.name = fname
datatypes._set_datatype(input_.type, ftype)
if are_optional:
input_.type.isOptional = are_optional
for (fname, ftype) in output_features:
output_ = spec.description.output.add()
output_.name = fname
datatypes._set_datatype(output_.type, ftype)
return spec | python | def set_transform_interface_params(spec, input_features, output_features, are_optional = False):
""" Common utilities to set transform interface params.
"""
input_features = _fm.process_or_validate_features(input_features)
output_features = _fm.process_or_validate_features(output_features)
# Add input and output features
for (fname, ftype) in input_features:
input_ = spec.description.input.add()
input_.name = fname
datatypes._set_datatype(input_.type, ftype)
if are_optional:
input_.type.isOptional = are_optional
for (fname, ftype) in output_features:
output_ = spec.description.output.add()
output_.name = fname
datatypes._set_datatype(output_.type, ftype)
return spec | [
"def",
"set_transform_interface_params",
"(",
"spec",
",",
"input_features",
",",
"output_features",
",",
"are_optional",
"=",
"False",
")",
":",
"input_features",
"=",
"_fm",
".",
"process_or_validate_features",
"(",
"input_features",
")",
"output_features",
"=",
"_fm",
".",
"process_or_validate_features",
"(",
"output_features",
")",
"# Add input and output features",
"for",
"(",
"fname",
",",
"ftype",
")",
"in",
"input_features",
":",
"input_",
"=",
"spec",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"input_",
".",
"name",
"=",
"fname",
"datatypes",
".",
"_set_datatype",
"(",
"input_",
".",
"type",
",",
"ftype",
")",
"if",
"are_optional",
":",
"input_",
".",
"type",
".",
"isOptional",
"=",
"are_optional",
"for",
"(",
"fname",
",",
"ftype",
")",
"in",
"output_features",
":",
"output_",
"=",
"spec",
".",
"description",
".",
"output",
".",
"add",
"(",
")",
"output_",
".",
"name",
"=",
"fname",
"datatypes",
".",
"_set_datatype",
"(",
"output_",
".",
"type",
",",
"ftype",
")",
"return",
"spec"
] | Common utilities to set transform interface params. | [
"Common",
"utilities",
"to",
"set",
"transform",
"interface",
"params",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/_interface_management.py#L134-L153 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/activity_classifier/_sframe_sequence_iterator.py | prep_data | def prep_data(data, features, session_id, prediction_window, predictions_in_chunk, target=None, verbose=True):
"""
Convert SFrame to batch form, where each row contains a sequence of length
predictions_in_chunk * prediction_window, and there is a single label per
prediction window.
"""
if target is None:
target = ""
if verbose:
result_dict = _extensions._activity_classifier_prepare_data_verbose(
data, features, session_id, prediction_window, predictions_in_chunk, target)
else:
result_dict = _extensions._activity_classifier_prepare_data(
data, features, session_id, prediction_window, predictions_in_chunk, target)
return result_dict["converted_data"] , result_dict["num_of_sessions"] | python | def prep_data(data, features, session_id, prediction_window, predictions_in_chunk, target=None, verbose=True):
"""
Convert SFrame to batch form, where each row contains a sequence of length
predictions_in_chunk * prediction_window, and there is a single label per
prediction window.
"""
if target is None:
target = ""
if verbose:
result_dict = _extensions._activity_classifier_prepare_data_verbose(
data, features, session_id, prediction_window, predictions_in_chunk, target)
else:
result_dict = _extensions._activity_classifier_prepare_data(
data, features, session_id, prediction_window, predictions_in_chunk, target)
return result_dict["converted_data"] , result_dict["num_of_sessions"] | [
"def",
"prep_data",
"(",
"data",
",",
"features",
",",
"session_id",
",",
"prediction_window",
",",
"predictions_in_chunk",
",",
"target",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"target",
"is",
"None",
":",
"target",
"=",
"\"\"",
"if",
"verbose",
":",
"result_dict",
"=",
"_extensions",
".",
"_activity_classifier_prepare_data_verbose",
"(",
"data",
",",
"features",
",",
"session_id",
",",
"prediction_window",
",",
"predictions_in_chunk",
",",
"target",
")",
"else",
":",
"result_dict",
"=",
"_extensions",
".",
"_activity_classifier_prepare_data",
"(",
"data",
",",
"features",
",",
"session_id",
",",
"prediction_window",
",",
"predictions_in_chunk",
",",
"target",
")",
"return",
"result_dict",
"[",
"\"converted_data\"",
"]",
",",
"result_dict",
"[",
"\"num_of_sessions\"",
"]"
] | Convert SFrame to batch form, where each row contains a sequence of length
predictions_in_chunk * prediction_window, and there is a single label per
prediction window. | [
"Convert",
"SFrame",
"to",
"batch",
"form",
"where",
"each",
"row",
"contains",
"a",
"sequence",
"of",
"length",
"predictions_in_chunk",
"*",
"prediction_window",
"and",
"there",
"is",
"a",
"single",
"label",
"per",
"prediction",
"window",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/_sframe_sequence_iterator.py#L26-L42 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/activity_classifier/_sframe_sequence_iterator.py | _load_into_numpy | def _load_into_numpy(sf, np_array, start, end, strides=None, shape=None):
"""Loads into numpy array from SFrame, assuming SFrame stores data flattened"""
np_array[:] = 0.0
np_array_2d = np_array.reshape((np_array.shape[0], np_array.shape[1] * np_array.shape[2]))
_extensions.sframe_load_to_numpy(sf, np_array.ctypes.data,
np_array_2d.strides, np_array_2d.shape,
start, end) | python | def _load_into_numpy(sf, np_array, start, end, strides=None, shape=None):
"""Loads into numpy array from SFrame, assuming SFrame stores data flattened"""
np_array[:] = 0.0
np_array_2d = np_array.reshape((np_array.shape[0], np_array.shape[1] * np_array.shape[2]))
_extensions.sframe_load_to_numpy(sf, np_array.ctypes.data,
np_array_2d.strides, np_array_2d.shape,
start, end) | [
"def",
"_load_into_numpy",
"(",
"sf",
",",
"np_array",
",",
"start",
",",
"end",
",",
"strides",
"=",
"None",
",",
"shape",
"=",
"None",
")",
":",
"np_array",
"[",
":",
"]",
"=",
"0.0",
"np_array_2d",
"=",
"np_array",
".",
"reshape",
"(",
"(",
"np_array",
".",
"shape",
"[",
"0",
"]",
",",
"np_array",
".",
"shape",
"[",
"1",
"]",
"*",
"np_array",
".",
"shape",
"[",
"2",
"]",
")",
")",
"_extensions",
".",
"sframe_load_to_numpy",
"(",
"sf",
",",
"np_array",
".",
"ctypes",
".",
"data",
",",
"np_array_2d",
".",
"strides",
",",
"np_array_2d",
".",
"shape",
",",
"start",
",",
"end",
")"
] | Loads into numpy array from SFrame, assuming SFrame stores data flattened | [
"Loads",
"into",
"numpy",
"array",
"from",
"SFrame",
"assuming",
"SFrame",
"stores",
"data",
"flattened"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/_sframe_sequence_iterator.py#L49-L55 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.set_input | def set_input(self, input_names, input_dims):
"""
Set the inputs of the network spec.
Parameters
----------
input_names: [str]
List of input names of the network.
input_dims: [tuple]
List of input dimensions of the network. The ordering of input_dims
is the same as input_names.
Examples
--------
.. sourcecode:: python
# Set the neural network spec inputs to be 3 dimensional vector data1 and
# 4 dimensional vector data2.
>>> builder.set_input(input_names = ['data1', 'data2'], [(3,), (4,)])
See Also
--------
set_output, set_class_labels
"""
spec = self.spec
nn_spec = self.nn_spec
for idx, dim in enumerate(input_dims):
if len(dim) == 3:
input_shape = (dim[0], dim[1], dim[2])
elif len(dim) == 2:
input_shape = (dim[1], )
elif len(dim) == 1:
input_shape = tuple(dim)
else:
raise RuntimeError("Attempting to add a neural network " +
"input with rank " + str(len(dim)) +
". All networks should take inputs of rank 1 or 3.")
spec.description.input[idx].type.multiArrayType.ClearField("shape")
spec.description.input[idx].type.multiArrayType.shape.extend(input_shape)
# TODO: if it's an embedding, this should be integer
spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE | python | def set_input(self, input_names, input_dims):
"""
Set the inputs of the network spec.
Parameters
----------
input_names: [str]
List of input names of the network.
input_dims: [tuple]
List of input dimensions of the network. The ordering of input_dims
is the same as input_names.
Examples
--------
.. sourcecode:: python
# Set the neural network spec inputs to be 3 dimensional vector data1 and
# 4 dimensional vector data2.
>>> builder.set_input(input_names = ['data1', 'data2'], [(3,), (4,)])
See Also
--------
set_output, set_class_labels
"""
spec = self.spec
nn_spec = self.nn_spec
for idx, dim in enumerate(input_dims):
if len(dim) == 3:
input_shape = (dim[0], dim[1], dim[2])
elif len(dim) == 2:
input_shape = (dim[1], )
elif len(dim) == 1:
input_shape = tuple(dim)
else:
raise RuntimeError("Attempting to add a neural network " +
"input with rank " + str(len(dim)) +
". All networks should take inputs of rank 1 or 3.")
spec.description.input[idx].type.multiArrayType.ClearField("shape")
spec.description.input[idx].type.multiArrayType.shape.extend(input_shape)
# TODO: if it's an embedding, this should be integer
spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE | [
"def",
"set_input",
"(",
"self",
",",
"input_names",
",",
"input_dims",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"for",
"idx",
",",
"dim",
"in",
"enumerate",
"(",
"input_dims",
")",
":",
"if",
"len",
"(",
"dim",
")",
"==",
"3",
":",
"input_shape",
"=",
"(",
"dim",
"[",
"0",
"]",
",",
"dim",
"[",
"1",
"]",
",",
"dim",
"[",
"2",
"]",
")",
"elif",
"len",
"(",
"dim",
")",
"==",
"2",
":",
"input_shape",
"=",
"(",
"dim",
"[",
"1",
"]",
",",
")",
"elif",
"len",
"(",
"dim",
")",
"==",
"1",
":",
"input_shape",
"=",
"tuple",
"(",
"dim",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Attempting to add a neural network \"",
"+",
"\"input with rank \"",
"+",
"str",
"(",
"len",
"(",
"dim",
")",
")",
"+",
"\". All networks should take inputs of rank 1 or 3.\"",
")",
"spec",
".",
"description",
".",
"input",
"[",
"idx",
"]",
".",
"type",
".",
"multiArrayType",
".",
"ClearField",
"(",
"\"shape\"",
")",
"spec",
".",
"description",
".",
"input",
"[",
"idx",
"]",
".",
"type",
".",
"multiArrayType",
".",
"shape",
".",
"extend",
"(",
"input_shape",
")",
"# TODO: if it's an embedding, this should be integer",
"spec",
".",
"description",
".",
"input",
"[",
"idx",
"]",
".",
"type",
".",
"multiArrayType",
".",
"dataType",
"=",
"_Model_pb2",
".",
"ArrayFeatureType",
".",
"DOUBLE"
] | Set the inputs of the network spec.
Parameters
----------
input_names: [str]
List of input names of the network.
input_dims: [tuple]
List of input dimensions of the network. The ordering of input_dims
is the same as input_names.
Examples
--------
.. sourcecode:: python
# Set the neural network spec inputs to be 3 dimensional vector data1 and
# 4 dimensional vector data2.
>>> builder.set_input(input_names = ['data1', 'data2'], [(3,), (4,)])
See Also
--------
set_output, set_class_labels | [
"Set",
"the",
"inputs",
"of",
"the",
"network",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L159-L202 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.set_output | def set_output(self, output_names, output_dims):
"""
Set the outputs of the network spec.
Parameters
----------
output_names: [str]
List of output names of the network.
output_dims: [tuple]
List of output dimensions of the network. The ordering of output_dims is the same
as output_names.
Examples
--------
.. sourcecode:: python
# Set the neural network spec outputs to be 3 dimensional vector feature1 and
# 4 dimensional vector feature2.
>>> builder.set_output(output_names = ['feature1', 'feature2'], [(3,), (4,)])
See Also
--------
set_input, set_class_labels
"""
spec = self.spec
nn_spec = self.nn_spec
for idx, dim in enumerate(output_dims):
spec.description.output[idx].type.multiArrayType.ClearField("shape")
spec.description.output[idx].type.multiArrayType.shape.extend(dim)
spec.description.output[idx].type.multiArrayType.dataType = \
_Model_pb2.ArrayFeatureType.DOUBLE | python | def set_output(self, output_names, output_dims):
"""
Set the outputs of the network spec.
Parameters
----------
output_names: [str]
List of output names of the network.
output_dims: [tuple]
List of output dimensions of the network. The ordering of output_dims is the same
as output_names.
Examples
--------
.. sourcecode:: python
# Set the neural network spec outputs to be 3 dimensional vector feature1 and
# 4 dimensional vector feature2.
>>> builder.set_output(output_names = ['feature1', 'feature2'], [(3,), (4,)])
See Also
--------
set_input, set_class_labels
"""
spec = self.spec
nn_spec = self.nn_spec
for idx, dim in enumerate(output_dims):
spec.description.output[idx].type.multiArrayType.ClearField("shape")
spec.description.output[idx].type.multiArrayType.shape.extend(dim)
spec.description.output[idx].type.multiArrayType.dataType = \
_Model_pb2.ArrayFeatureType.DOUBLE | [
"def",
"set_output",
"(",
"self",
",",
"output_names",
",",
"output_dims",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"for",
"idx",
",",
"dim",
"in",
"enumerate",
"(",
"output_dims",
")",
":",
"spec",
".",
"description",
".",
"output",
"[",
"idx",
"]",
".",
"type",
".",
"multiArrayType",
".",
"ClearField",
"(",
"\"shape\"",
")",
"spec",
".",
"description",
".",
"output",
"[",
"idx",
"]",
".",
"type",
".",
"multiArrayType",
".",
"shape",
".",
"extend",
"(",
"dim",
")",
"spec",
".",
"description",
".",
"output",
"[",
"idx",
"]",
".",
"type",
".",
"multiArrayType",
".",
"dataType",
"=",
"_Model_pb2",
".",
"ArrayFeatureType",
".",
"DOUBLE"
] | Set the outputs of the network spec.
Parameters
----------
output_names: [str]
List of output names of the network.
output_dims: [tuple]
List of output dimensions of the network. The ordering of output_dims is the same
as output_names.
Examples
--------
.. sourcecode:: python
# Set the neural network spec outputs to be 3 dimensional vector feature1 and
# 4 dimensional vector feature2.
>>> builder.set_output(output_names = ['feature1', 'feature2'], [(3,), (4,)])
See Also
--------
set_input, set_class_labels | [
"Set",
"the",
"outputs",
"of",
"the",
"network",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L204-L235 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.set_class_labels | def set_class_labels(self, class_labels, predicted_feature_name = 'classLabel', prediction_blob = ''):
"""
Set class labels to the model spec to make it a neural network classifier.
Parameters
----------
class_labels: list[int or str]
A list of integers or strings that map the index of the output of a
neural network to labels in a classifier.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the
Core ML neural network classifier. Defaults to 'class_output'.
prediction_blob: str
If provided, then this is the name of the neural network blob which
generates the probabilities for each class label (typically the output
of a softmax layer). If not provided, then the last output layer is
assumed.
See Also
--------
set_input, set_output, set_pre_processing_parameters
"""
spec = self.spec
nn_spec = self.nn_spec
if len(spec.description.output) == 0:
raise ValueError(
"Model should have at least one output (the probabilities) to automatically make it a classifier.")
probOutput = spec.description.output[0]
probOutput.type.dictionaryType.MergeFromString(b'')
if len(class_labels) == 0:
return
class_type = type(class_labels[0])
if class_type not in [int, str]:
raise TypeError("Class labels must be of type Integer or String. (not %s)" % class_type)
spec.description.predictedProbabilitiesName = probOutput.name
spec.description.predictedFeatureName = predicted_feature_name
classLabel = spec.description.output.add()
classLabel.name = predicted_feature_name
if class_type == int:
nn_spec.ClearField('int64ClassLabels')
probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'')
classLabel.type.int64Type.MergeFromString(b'')
for c in class_labels:
nn_spec.int64ClassLabels.vector.append(c)
else:
nn_spec.ClearField('stringClassLabels')
probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'')
classLabel.type.stringType.MergeFromString(b'')
for c in class_labels:
nn_spec.stringClassLabels.vector.append(c)
if prediction_blob != '':
# correctness here will be checked in the validator -- i.e. to
# make sure this string corresponds to a real blob
nn_spec.labelProbabilityLayerName = prediction_blob
else: #not provided
# assume it's the last blob produced in the network
nn_spec.labelProbabilityLayerName = nn_spec.layers[-1].output[0] | python | def set_class_labels(self, class_labels, predicted_feature_name = 'classLabel', prediction_blob = ''):
"""
Set class labels to the model spec to make it a neural network classifier.
Parameters
----------
class_labels: list[int or str]
A list of integers or strings that map the index of the output of a
neural network to labels in a classifier.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the
Core ML neural network classifier. Defaults to 'class_output'.
prediction_blob: str
If provided, then this is the name of the neural network blob which
generates the probabilities for each class label (typically the output
of a softmax layer). If not provided, then the last output layer is
assumed.
See Also
--------
set_input, set_output, set_pre_processing_parameters
"""
spec = self.spec
nn_spec = self.nn_spec
if len(spec.description.output) == 0:
raise ValueError(
"Model should have at least one output (the probabilities) to automatically make it a classifier.")
probOutput = spec.description.output[0]
probOutput.type.dictionaryType.MergeFromString(b'')
if len(class_labels) == 0:
return
class_type = type(class_labels[0])
if class_type not in [int, str]:
raise TypeError("Class labels must be of type Integer or String. (not %s)" % class_type)
spec.description.predictedProbabilitiesName = probOutput.name
spec.description.predictedFeatureName = predicted_feature_name
classLabel = spec.description.output.add()
classLabel.name = predicted_feature_name
if class_type == int:
nn_spec.ClearField('int64ClassLabels')
probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'')
classLabel.type.int64Type.MergeFromString(b'')
for c in class_labels:
nn_spec.int64ClassLabels.vector.append(c)
else:
nn_spec.ClearField('stringClassLabels')
probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'')
classLabel.type.stringType.MergeFromString(b'')
for c in class_labels:
nn_spec.stringClassLabels.vector.append(c)
if prediction_blob != '':
# correctness here will be checked in the validator -- i.e. to
# make sure this string corresponds to a real blob
nn_spec.labelProbabilityLayerName = prediction_blob
else: #not provided
# assume it's the last blob produced in the network
nn_spec.labelProbabilityLayerName = nn_spec.layers[-1].output[0] | [
"def",
"set_class_labels",
"(",
"self",
",",
"class_labels",
",",
"predicted_feature_name",
"=",
"'classLabel'",
",",
"prediction_blob",
"=",
"''",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"if",
"len",
"(",
"spec",
".",
"description",
".",
"output",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Model should have at least one output (the probabilities) to automatically make it a classifier.\"",
")",
"probOutput",
"=",
"spec",
".",
"description",
".",
"output",
"[",
"0",
"]",
"probOutput",
".",
"type",
".",
"dictionaryType",
".",
"MergeFromString",
"(",
"b''",
")",
"if",
"len",
"(",
"class_labels",
")",
"==",
"0",
":",
"return",
"class_type",
"=",
"type",
"(",
"class_labels",
"[",
"0",
"]",
")",
"if",
"class_type",
"not",
"in",
"[",
"int",
",",
"str",
"]",
":",
"raise",
"TypeError",
"(",
"\"Class labels must be of type Integer or String. (not %s)\"",
"%",
"class_type",
")",
"spec",
".",
"description",
".",
"predictedProbabilitiesName",
"=",
"probOutput",
".",
"name",
"spec",
".",
"description",
".",
"predictedFeatureName",
"=",
"predicted_feature_name",
"classLabel",
"=",
"spec",
".",
"description",
".",
"output",
".",
"add",
"(",
")",
"classLabel",
".",
"name",
"=",
"predicted_feature_name",
"if",
"class_type",
"==",
"int",
":",
"nn_spec",
".",
"ClearField",
"(",
"'int64ClassLabels'",
")",
"probOutput",
".",
"type",
".",
"dictionaryType",
".",
"int64KeyType",
".",
"MergeFromString",
"(",
"b''",
")",
"classLabel",
".",
"type",
".",
"int64Type",
".",
"MergeFromString",
"(",
"b''",
")",
"for",
"c",
"in",
"class_labels",
":",
"nn_spec",
".",
"int64ClassLabels",
".",
"vector",
".",
"append",
"(",
"c",
")",
"else",
":",
"nn_spec",
".",
"ClearField",
"(",
"'stringClassLabels'",
")",
"probOutput",
".",
"type",
".",
"dictionaryType",
".",
"stringKeyType",
".",
"MergeFromString",
"(",
"b''",
")",
"classLabel",
".",
"type",
".",
"stringType",
".",
"MergeFromString",
"(",
"b''",
")",
"for",
"c",
"in",
"class_labels",
":",
"nn_spec",
".",
"stringClassLabels",
".",
"vector",
".",
"append",
"(",
"c",
")",
"if",
"prediction_blob",
"!=",
"''",
":",
"# correctness here will be checked in the validator -- i.e. to",
"# make sure this string corresponds to a real blob",
"nn_spec",
".",
"labelProbabilityLayerName",
"=",
"prediction_blob",
"else",
":",
"#not provided",
"# assume it's the last blob produced in the network",
"nn_spec",
".",
"labelProbabilityLayerName",
"=",
"nn_spec",
".",
"layers",
"[",
"-",
"1",
"]",
".",
"output",
"[",
"0",
"]"
] | Set class labels to the model spec to make it a neural network classifier.
Parameters
----------
class_labels: list[int or str]
A list of integers or strings that map the index of the output of a
neural network to labels in a classifier.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the
Core ML neural network classifier. Defaults to 'class_output'.
prediction_blob: str
If provided, then this is the name of the neural network blob which
generates the probabilities for each class label (typically the output
of a softmax layer). If not provided, then the last output layer is
assumed.
See Also
--------
set_input, set_output, set_pre_processing_parameters | [
"Set",
"class",
"labels",
"to",
"the",
"model",
"spec",
"to",
"make",
"it",
"a",
"neural",
"network",
"classifier",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L237-L299 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_optionals | def add_optionals(self, optionals_in, optionals_out):
"""
Add optional inputs and outputs to the model spec.
Parameters
----------
optionals_in: [str]
List of inputs that are optionals.
optionals_out: [str]
List of outputs that are optionals.
See Also
--------
set_input, set_output
"""
spec = self.spec
if (not optionals_in) and (not optionals_out):
return
# assuming single sizes here
input_types = [datatypes.Array(dim) for (name, dim) in optionals_in]
output_types = [datatypes.Array(dim) for (name, dim) in optionals_out]
input_names = [str(name) for (name, dim) in optionals_in]
output_names = [str(name) for (name, dim) in optionals_out]
input_features = list(zip(input_names, input_types))
output_features = list(zip(output_names, output_types))
len_before_in = len(spec.description.input)
len_before_out = len(spec.description.output)
# this appends to the existing model interface
set_transform_interface_params(spec, input_features, output_features, True)
# add types for any extra hidden inputs
for idx in range(len_before_in, len(spec.description.input)):
spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE
for idx in range(len_before_out, len(spec.description.output)):
spec.description.output[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE | python | def add_optionals(self, optionals_in, optionals_out):
"""
Add optional inputs and outputs to the model spec.
Parameters
----------
optionals_in: [str]
List of inputs that are optionals.
optionals_out: [str]
List of outputs that are optionals.
See Also
--------
set_input, set_output
"""
spec = self.spec
if (not optionals_in) and (not optionals_out):
return
# assuming single sizes here
input_types = [datatypes.Array(dim) for (name, dim) in optionals_in]
output_types = [datatypes.Array(dim) for (name, dim) in optionals_out]
input_names = [str(name) for (name, dim) in optionals_in]
output_names = [str(name) for (name, dim) in optionals_out]
input_features = list(zip(input_names, input_types))
output_features = list(zip(output_names, output_types))
len_before_in = len(spec.description.input)
len_before_out = len(spec.description.output)
# this appends to the existing model interface
set_transform_interface_params(spec, input_features, output_features, True)
# add types for any extra hidden inputs
for idx in range(len_before_in, len(spec.description.input)):
spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE
for idx in range(len_before_out, len(spec.description.output)):
spec.description.output[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE | [
"def",
"add_optionals",
"(",
"self",
",",
"optionals_in",
",",
"optionals_out",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"if",
"(",
"not",
"optionals_in",
")",
"and",
"(",
"not",
"optionals_out",
")",
":",
"return",
"# assuming single sizes here",
"input_types",
"=",
"[",
"datatypes",
".",
"Array",
"(",
"dim",
")",
"for",
"(",
"name",
",",
"dim",
")",
"in",
"optionals_in",
"]",
"output_types",
"=",
"[",
"datatypes",
".",
"Array",
"(",
"dim",
")",
"for",
"(",
"name",
",",
"dim",
")",
"in",
"optionals_out",
"]",
"input_names",
"=",
"[",
"str",
"(",
"name",
")",
"for",
"(",
"name",
",",
"dim",
")",
"in",
"optionals_in",
"]",
"output_names",
"=",
"[",
"str",
"(",
"name",
")",
"for",
"(",
"name",
",",
"dim",
")",
"in",
"optionals_out",
"]",
"input_features",
"=",
"list",
"(",
"zip",
"(",
"input_names",
",",
"input_types",
")",
")",
"output_features",
"=",
"list",
"(",
"zip",
"(",
"output_names",
",",
"output_types",
")",
")",
"len_before_in",
"=",
"len",
"(",
"spec",
".",
"description",
".",
"input",
")",
"len_before_out",
"=",
"len",
"(",
"spec",
".",
"description",
".",
"output",
")",
"# this appends to the existing model interface",
"set_transform_interface_params",
"(",
"spec",
",",
"input_features",
",",
"output_features",
",",
"True",
")",
"# add types for any extra hidden inputs",
"for",
"idx",
"in",
"range",
"(",
"len_before_in",
",",
"len",
"(",
"spec",
".",
"description",
".",
"input",
")",
")",
":",
"spec",
".",
"description",
".",
"input",
"[",
"idx",
"]",
".",
"type",
".",
"multiArrayType",
".",
"dataType",
"=",
"_Model_pb2",
".",
"ArrayFeatureType",
".",
"DOUBLE",
"for",
"idx",
"in",
"range",
"(",
"len_before_out",
",",
"len",
"(",
"spec",
".",
"description",
".",
"output",
")",
")",
":",
"spec",
".",
"description",
".",
"output",
"[",
"idx",
"]",
".",
"type",
".",
"multiArrayType",
".",
"dataType",
"=",
"_Model_pb2",
".",
"ArrayFeatureType",
".",
"DOUBLE"
] | Add optional inputs and outputs to the model spec.
Parameters
----------
optionals_in: [str]
List of inputs that are optionals.
optionals_out: [str]
List of outputs that are optionals.
See Also
--------
set_input, set_output | [
"Add",
"optional",
"inputs",
"and",
"outputs",
"to",
"the",
"model",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L302-L343 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_embedding | def add_embedding(self, name, W, b, input_dim, output_channels, has_bias,
input_name, output_name):
"""
Add an embedding layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array
Weight matrix of shape (output_channels, input_dim).
b: numpy.array
Bias vector of shape (output_channels, ).
input_dim: int
Size of the vocabulary (1 + maximum integer index of the words).
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_inner_product
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
# Fill in the parameters
spec_layer_params = spec_layer.embedding
spec_layer_params.inputDim = input_dim
spec_layer_params.outputChannels = output_channels
spec_layer_params.hasBias = has_bias
weights = spec_layer_params.weights
weights.floatValue.extend(map(float, W.flatten()))
if has_bias:
bias = spec_layer_params.bias
bias.floatValue.extend(map(float, b.flatten())) | python | def add_embedding(self, name, W, b, input_dim, output_channels, has_bias,
input_name, output_name):
"""
Add an embedding layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array
Weight matrix of shape (output_channels, input_dim).
b: numpy.array
Bias vector of shape (output_channels, ).
input_dim: int
Size of the vocabulary (1 + maximum integer index of the words).
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_inner_product
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
# Fill in the parameters
spec_layer_params = spec_layer.embedding
spec_layer_params.inputDim = input_dim
spec_layer_params.outputChannels = output_channels
spec_layer_params.hasBias = has_bias
weights = spec_layer_params.weights
weights.floatValue.extend(map(float, W.flatten()))
if has_bias:
bias = spec_layer_params.bias
bias.floatValue.extend(map(float, b.flatten())) | [
"def",
"add_embedding",
"(",
"self",
",",
"name",
",",
"W",
",",
"b",
",",
"input_dim",
",",
"output_channels",
",",
"has_bias",
",",
"input_name",
",",
"output_name",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"# Fill in the parameters",
"spec_layer_params",
"=",
"spec_layer",
".",
"embedding",
"spec_layer_params",
".",
"inputDim",
"=",
"input_dim",
"spec_layer_params",
".",
"outputChannels",
"=",
"output_channels",
"spec_layer_params",
".",
"hasBias",
"=",
"has_bias",
"weights",
"=",
"spec_layer_params",
".",
"weights",
"weights",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W",
".",
"flatten",
"(",
")",
")",
")",
"if",
"has_bias",
":",
"bias",
"=",
"spec_layer_params",
".",
"bias",
"bias",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b",
".",
"flatten",
"(",
")",
")",
")"
] | Add an embedding layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array
Weight matrix of shape (output_channels, input_dim).
b: numpy.array
Bias vector of shape (output_channels, ).
input_dim: int
Size of the vocabulary (1 + maximum integer index of the words).
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_inner_product | [
"Add",
"an",
"embedding",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L400-L453 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_softmax | def add_softmax(self, name, input_name, output_name):
"""
Add a softmax layer to the model.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_activation, add_inner_product, add_convolution
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.softmax.MergeFromString(b'') | python | def add_softmax(self, name, input_name, output_name):
"""
Add a softmax layer to the model.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_activation, add_inner_product, add_convolution
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.softmax.MergeFromString(b'') | [
"def",
"add_softmax",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"softmax",
".",
"MergeFromString",
"(",
"b''",
")"
] | Add a softmax layer to the model.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_activation, add_inner_product, add_convolution | [
"Add",
"a",
"softmax",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L456-L482 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_activation | def add_activation(self, name, non_linearity, input_name, output_name,
params=None):
"""
Add an activation layer to the model.
Parameters
----------
name: str
The name of this layer
non_linearity: str
The non_linearity (activation) function of this layer.
It can be one of the following:
- 'RELU': Rectified Linear Unit (ReLU) function.
- 'SIGMOID': sigmoid function.
- 'TANH': tanh function.
- 'SCALED_TANH': scaled tanh function, defined as:
`f(x) = alpha * tanh(beta * x)`
where alpha and beta are constant scalars.
- 'SOFTPLUS': softplus function.
- 'SOFTSIGN': softsign function.
- 'SIGMOID_HARD': hard sigmoid function, defined as:
`f(x) = min(max(alpha * x + beta, -1), 1)`
where alpha and beta are constant scalars.
- 'LEAKYRELU': leaky relu function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * alpha * x`
where alpha is a constant scalar.
- 'PRELU': Parametric ReLU function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * alpha * x`
where alpha is a multi-dimensional array of same size as x.
- 'ELU': Exponential linear unit function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * (alpha * exp(x) - 1)`
where alpha is a constant scalar.
- 'PARAMETRICSOFTPLUS': Parametric softplus function, defined as:
`f(x) = alpha * log(1 + exp(beta * x))`
where alpha and beta are two multi-dimensional arrays of same size as x.
- 'THRESHOLDEDRELU': Thresholded ReLU function, defined as:
`f(x) = (x >= alpha) * x`
where alpha is a constant scalar.
- 'LINEAR': linear function.
`f(x) = alpha * x + beta`
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
params: [float] | [numpy.array]
Parameters for the activation, depending on non_linearity. Kindly refer to NeuralNetwork.proto for details.
- When non_linearity is one of ['RELU', 'SIGMOID', 'TANH', 'SCALED_TANH', 'SOFTPLUS', 'SOFTSIGN'], params is ignored.
- When non_linearity is one of ['SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'], param is a list of 2 floats
[alpha, beta].
- When non_linearity is one of ['LEAKYRELU', 'ELU', 'THRESHOLDEDRELU'], param is a list of 1 float
[alpha].
- When non_linearity is 'PRELU', param is a list of 1 numpy array [alpha]. The shape of
alpha is (C,), where C is either the number of input channels or
1. When C = 1, same alpha is applied to all channels.
- When non_linearity is 'PARAMETRICSOFTPLUS', param is a list of 2 numpy arrays [alpha,
beta]. The shape of alpha and beta is (C, ), where C is either
the number of input channels or 1. When C = 1, same alpha and
beta are applied to all channels.
See Also
--------
add_convolution, add_softmax
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.activation
# Fill in the parameters
if non_linearity == 'RELU':
spec_layer_params.ReLU.MergeFromString(b'')
elif non_linearity == 'SIGMOID':
spec_layer_params.sigmoid.MergeFromString(b'')
elif non_linearity == 'TANH':
spec_layer_params.tanh.MergeFromString(b'')
elif non_linearity == 'SCALED_TANH':
spec_layer_params.scaledTanh.MergeFromString(b'')
if params is None:
alpha, beta = (0.0, 0.0)
else:
alpha, beta = params[0], params[1]
spec_layer_params.scaledTanh.alpha = alpha
spec_layer_params.scaledTanh.beta = beta
elif non_linearity == 'SOFTPLUS':
spec_layer_params.softplus.MergeFromString(b'')
elif non_linearity == 'SOFTSIGN':
spec_layer_params.softsign.MergeFromString(b'')
elif non_linearity == 'SIGMOID_HARD':
if params is None:
alpha, beta = (0.2, 0.5)
else:
alpha, beta = params[0], params[1]
spec_layer_params.sigmoidHard.alpha = alpha
spec_layer_params.sigmoidHard.beta = beta
elif non_linearity == 'LEAKYRELU':
if params is None:
alpha = 0.3
else:
alpha = params[0]
spec_layer_params.leakyReLU.alpha = float(alpha)
elif non_linearity == 'PRELU':
# PReLU must provide an np array in params[0]
spec_layer_params.PReLU.alpha.floatValue.extend(map(float, params.flatten()))
elif non_linearity == 'ELU':
# ELU must provide an alpha in params[0]
spec_layer_params.ELU.alpha = float(params)
elif non_linearity == 'PARAMETRICSOFTPLUS':
# Parametric softplus must provide two np arrays for alpha and beta
alphas, betas = (params[0], params[1])
# Weight alignment: Keras [H,W,C,F], Espresso [
spec_layer_params.parametricSoftplus.alpha.floatValue.extend(map(float, alphas.flatten()))
spec_layer_params.parametricSoftplus.beta.floatValue.extend(map(float, betas.flatten()))
elif non_linearity == 'THRESHOLDEDRELU':
if params is None:
theta = 1.0
else:
theta = params
spec_layer_params.thresholdedReLU.alpha = float(theta)
elif non_linearity == 'LINEAR':
if params is None:
alpha, beta = (1.0, 0.0)
else:
alpha, beta = params[0], params[1]
spec_layer_params.linear.alpha = alpha
spec_layer_params.linear.beta = beta
else:
raise TypeError("Unknown activation type %s." %(non_linearity)) | python | def add_activation(self, name, non_linearity, input_name, output_name,
params=None):
"""
Add an activation layer to the model.
Parameters
----------
name: str
The name of this layer
non_linearity: str
The non_linearity (activation) function of this layer.
It can be one of the following:
- 'RELU': Rectified Linear Unit (ReLU) function.
- 'SIGMOID': sigmoid function.
- 'TANH': tanh function.
- 'SCALED_TANH': scaled tanh function, defined as:
`f(x) = alpha * tanh(beta * x)`
where alpha and beta are constant scalars.
- 'SOFTPLUS': softplus function.
- 'SOFTSIGN': softsign function.
- 'SIGMOID_HARD': hard sigmoid function, defined as:
`f(x) = min(max(alpha * x + beta, -1), 1)`
where alpha and beta are constant scalars.
- 'LEAKYRELU': leaky relu function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * alpha * x`
where alpha is a constant scalar.
- 'PRELU': Parametric ReLU function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * alpha * x`
where alpha is a multi-dimensional array of same size as x.
- 'ELU': Exponential linear unit function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * (alpha * exp(x) - 1)`
where alpha is a constant scalar.
- 'PARAMETRICSOFTPLUS': Parametric softplus function, defined as:
`f(x) = alpha * log(1 + exp(beta * x))`
where alpha and beta are two multi-dimensional arrays of same size as x.
- 'THRESHOLDEDRELU': Thresholded ReLU function, defined as:
`f(x) = (x >= alpha) * x`
where alpha is a constant scalar.
- 'LINEAR': linear function.
`f(x) = alpha * x + beta`
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
params: [float] | [numpy.array]
Parameters for the activation, depending on non_linearity. Kindly refer to NeuralNetwork.proto for details.
- When non_linearity is one of ['RELU', 'SIGMOID', 'TANH', 'SCALED_TANH', 'SOFTPLUS', 'SOFTSIGN'], params is ignored.
- When non_linearity is one of ['SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'], param is a list of 2 floats
[alpha, beta].
- When non_linearity is one of ['LEAKYRELU', 'ELU', 'THRESHOLDEDRELU'], param is a list of 1 float
[alpha].
- When non_linearity is 'PRELU', param is a list of 1 numpy array [alpha]. The shape of
alpha is (C,), where C is either the number of input channels or
1. When C = 1, same alpha is applied to all channels.
- When non_linearity is 'PARAMETRICSOFTPLUS', param is a list of 2 numpy arrays [alpha,
beta]. The shape of alpha and beta is (C, ), where C is either
the number of input channels or 1. When C = 1, same alpha and
beta are applied to all channels.
See Also
--------
add_convolution, add_softmax
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.activation
# Fill in the parameters
if non_linearity == 'RELU':
spec_layer_params.ReLU.MergeFromString(b'')
elif non_linearity == 'SIGMOID':
spec_layer_params.sigmoid.MergeFromString(b'')
elif non_linearity == 'TANH':
spec_layer_params.tanh.MergeFromString(b'')
elif non_linearity == 'SCALED_TANH':
spec_layer_params.scaledTanh.MergeFromString(b'')
if params is None:
alpha, beta = (0.0, 0.0)
else:
alpha, beta = params[0], params[1]
spec_layer_params.scaledTanh.alpha = alpha
spec_layer_params.scaledTanh.beta = beta
elif non_linearity == 'SOFTPLUS':
spec_layer_params.softplus.MergeFromString(b'')
elif non_linearity == 'SOFTSIGN':
spec_layer_params.softsign.MergeFromString(b'')
elif non_linearity == 'SIGMOID_HARD':
if params is None:
alpha, beta = (0.2, 0.5)
else:
alpha, beta = params[0], params[1]
spec_layer_params.sigmoidHard.alpha = alpha
spec_layer_params.sigmoidHard.beta = beta
elif non_linearity == 'LEAKYRELU':
if params is None:
alpha = 0.3
else:
alpha = params[0]
spec_layer_params.leakyReLU.alpha = float(alpha)
elif non_linearity == 'PRELU':
# PReLU must provide an np array in params[0]
spec_layer_params.PReLU.alpha.floatValue.extend(map(float, params.flatten()))
elif non_linearity == 'ELU':
# ELU must provide an alpha in params[0]
spec_layer_params.ELU.alpha = float(params)
elif non_linearity == 'PARAMETRICSOFTPLUS':
# Parametric softplus must provide two np arrays for alpha and beta
alphas, betas = (params[0], params[1])
# Weight alignment: Keras [H,W,C,F], Espresso [
spec_layer_params.parametricSoftplus.alpha.floatValue.extend(map(float, alphas.flatten()))
spec_layer_params.parametricSoftplus.beta.floatValue.extend(map(float, betas.flatten()))
elif non_linearity == 'THRESHOLDEDRELU':
if params is None:
theta = 1.0
else:
theta = params
spec_layer_params.thresholdedReLU.alpha = float(theta)
elif non_linearity == 'LINEAR':
if params is None:
alpha, beta = (1.0, 0.0)
else:
alpha, beta = params[0], params[1]
spec_layer_params.linear.alpha = alpha
spec_layer_params.linear.beta = beta
else:
raise TypeError("Unknown activation type %s." %(non_linearity)) | [
"def",
"add_activation",
"(",
"self",
",",
"name",
",",
"non_linearity",
",",
"input_name",
",",
"output_name",
",",
"params",
"=",
"None",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"activation",
"# Fill in the parameters",
"if",
"non_linearity",
"==",
"'RELU'",
":",
"spec_layer_params",
".",
"ReLU",
".",
"MergeFromString",
"(",
"b''",
")",
"elif",
"non_linearity",
"==",
"'SIGMOID'",
":",
"spec_layer_params",
".",
"sigmoid",
".",
"MergeFromString",
"(",
"b''",
")",
"elif",
"non_linearity",
"==",
"'TANH'",
":",
"spec_layer_params",
".",
"tanh",
".",
"MergeFromString",
"(",
"b''",
")",
"elif",
"non_linearity",
"==",
"'SCALED_TANH'",
":",
"spec_layer_params",
".",
"scaledTanh",
".",
"MergeFromString",
"(",
"b''",
")",
"if",
"params",
"is",
"None",
":",
"alpha",
",",
"beta",
"=",
"(",
"0.0",
",",
"0.0",
")",
"else",
":",
"alpha",
",",
"beta",
"=",
"params",
"[",
"0",
"]",
",",
"params",
"[",
"1",
"]",
"spec_layer_params",
".",
"scaledTanh",
".",
"alpha",
"=",
"alpha",
"spec_layer_params",
".",
"scaledTanh",
".",
"beta",
"=",
"beta",
"elif",
"non_linearity",
"==",
"'SOFTPLUS'",
":",
"spec_layer_params",
".",
"softplus",
".",
"MergeFromString",
"(",
"b''",
")",
"elif",
"non_linearity",
"==",
"'SOFTSIGN'",
":",
"spec_layer_params",
".",
"softsign",
".",
"MergeFromString",
"(",
"b''",
")",
"elif",
"non_linearity",
"==",
"'SIGMOID_HARD'",
":",
"if",
"params",
"is",
"None",
":",
"alpha",
",",
"beta",
"=",
"(",
"0.2",
",",
"0.5",
")",
"else",
":",
"alpha",
",",
"beta",
"=",
"params",
"[",
"0",
"]",
",",
"params",
"[",
"1",
"]",
"spec_layer_params",
".",
"sigmoidHard",
".",
"alpha",
"=",
"alpha",
"spec_layer_params",
".",
"sigmoidHard",
".",
"beta",
"=",
"beta",
"elif",
"non_linearity",
"==",
"'LEAKYRELU'",
":",
"if",
"params",
"is",
"None",
":",
"alpha",
"=",
"0.3",
"else",
":",
"alpha",
"=",
"params",
"[",
"0",
"]",
"spec_layer_params",
".",
"leakyReLU",
".",
"alpha",
"=",
"float",
"(",
"alpha",
")",
"elif",
"non_linearity",
"==",
"'PRELU'",
":",
"# PReLU must provide an np array in params[0]",
"spec_layer_params",
".",
"PReLU",
".",
"alpha",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"params",
".",
"flatten",
"(",
")",
")",
")",
"elif",
"non_linearity",
"==",
"'ELU'",
":",
"# ELU must provide an alpha in params[0]",
"spec_layer_params",
".",
"ELU",
".",
"alpha",
"=",
"float",
"(",
"params",
")",
"elif",
"non_linearity",
"==",
"'PARAMETRICSOFTPLUS'",
":",
"# Parametric softplus must provide two np arrays for alpha and beta",
"alphas",
",",
"betas",
"=",
"(",
"params",
"[",
"0",
"]",
",",
"params",
"[",
"1",
"]",
")",
"# Weight alignment: Keras [H,W,C,F], Espresso [",
"spec_layer_params",
".",
"parametricSoftplus",
".",
"alpha",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"alphas",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"parametricSoftplus",
".",
"beta",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"betas",
".",
"flatten",
"(",
")",
")",
")",
"elif",
"non_linearity",
"==",
"'THRESHOLDEDRELU'",
":",
"if",
"params",
"is",
"None",
":",
"theta",
"=",
"1.0",
"else",
":",
"theta",
"=",
"params",
"spec_layer_params",
".",
"thresholdedReLU",
".",
"alpha",
"=",
"float",
"(",
"theta",
")",
"elif",
"non_linearity",
"==",
"'LINEAR'",
":",
"if",
"params",
"is",
"None",
":",
"alpha",
",",
"beta",
"=",
"(",
"1.0",
",",
"0.0",
")",
"else",
":",
"alpha",
",",
"beta",
"=",
"params",
"[",
"0",
"]",
",",
"params",
"[",
"1",
"]",
"spec_layer_params",
".",
"linear",
".",
"alpha",
"=",
"alpha",
"spec_layer_params",
".",
"linear",
".",
"beta",
"=",
"beta",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unknown activation type %s.\"",
"%",
"(",
"non_linearity",
")",
")"
] | Add an activation layer to the model.
Parameters
----------
name: str
The name of this layer
non_linearity: str
The non_linearity (activation) function of this layer.
It can be one of the following:
- 'RELU': Rectified Linear Unit (ReLU) function.
- 'SIGMOID': sigmoid function.
- 'TANH': tanh function.
- 'SCALED_TANH': scaled tanh function, defined as:
`f(x) = alpha * tanh(beta * x)`
where alpha and beta are constant scalars.
- 'SOFTPLUS': softplus function.
- 'SOFTSIGN': softsign function.
- 'SIGMOID_HARD': hard sigmoid function, defined as:
`f(x) = min(max(alpha * x + beta, -1), 1)`
where alpha and beta are constant scalars.
- 'LEAKYRELU': leaky relu function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * alpha * x`
where alpha is a constant scalar.
- 'PRELU': Parametric ReLU function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * alpha * x`
where alpha is a multi-dimensional array of same size as x.
- 'ELU': Exponential linear unit function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * (alpha * exp(x) - 1)`
where alpha is a constant scalar.
- 'PARAMETRICSOFTPLUS': Parametric softplus function, defined as:
`f(x) = alpha * log(1 + exp(beta * x))`
where alpha and beta are two multi-dimensional arrays of same size as x.
- 'THRESHOLDEDRELU': Thresholded ReLU function, defined as:
`f(x) = (x >= alpha) * x`
where alpha is a constant scalar.
- 'LINEAR': linear function.
`f(x) = alpha * x + beta`
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
params: [float] | [numpy.array]
Parameters for the activation, depending on non_linearity. Kindly refer to NeuralNetwork.proto for details.
- When non_linearity is one of ['RELU', 'SIGMOID', 'TANH', 'SCALED_TANH', 'SOFTPLUS', 'SOFTSIGN'], params is ignored.
- When non_linearity is one of ['SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'], param is a list of 2 floats
[alpha, beta].
- When non_linearity is one of ['LEAKYRELU', 'ELU', 'THRESHOLDEDRELU'], param is a list of 1 float
[alpha].
- When non_linearity is 'PRELU', param is a list of 1 numpy array [alpha]. The shape of
alpha is (C,), where C is either the number of input channels or
1. When C = 1, same alpha is applied to all channels.
- When non_linearity is 'PARAMETRICSOFTPLUS', param is a list of 2 numpy arrays [alpha,
beta]. The shape of alpha and beta is (C, ), where C is either
the number of input channels or 1. When C = 1, same alpha and
beta are applied to all channels.
See Also
--------
add_convolution, add_softmax | [
"Add",
"an",
"activation",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L484-L648 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_elementwise | def add_elementwise(self, name, input_names, output_name, mode, alpha = None):
"""
Add an element-wise operation layer to the model.
Parameters
----------
The name of this layer
name: str
input_names: [str]
A list of input blob names of this layer. The input blobs should have the same shape.
output_name: str
The output blob name of this layer.
mode: str
A string specifying the mode of the elementwise layer. It can be one of the following:
- 'CONCAT': concatenate input blobs along the channel axis.
- 'SEQUENCE_CONCAT': concatenate input blobs along the sequence axis.
- 'ADD': perform an element-wise summation over the input blobs.
- 'MULTIPLY': perform an element-wise multiplication over the input blobs.
- 'DOT': compute the dot product of the two input blobs. In this mode, the length of input_names should be 2.
- 'COS': compute the cosine similarity of the two input blobs. In this mode, the length of input_names should be 2.
- 'MAX': compute the element-wise maximum over the input blobs.
- 'MIN': compute the element-wise minimum over the input blobs.
- 'AVE': compute the element-wise average over the input blobs.
alpha: float
if mode == 'ADD' and there is only one input_name, alpha is added to the input
if mode == 'MULTIPLY' and there is only one input_name, alpha is multiplied to the input
See Also
--------
add_upsample, add_sequence_repeat
"""
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
if isinstance(input_names, list):
for input_name in input_names:
spec_layer.input.append(input_name)
else:
spec_layer.input.append(input_names)
spec_layer.output.append(output_name)
## Add the following layers.
if mode == 'CONCAT':
spec_layer.concat.sequenceConcat = False
elif mode == 'SEQUENCE_CONCAT':
spec_layer.concat.sequenceConcat = True
elif mode == 'ADD':
spec_layer.add.MergeFromString(b'')
if alpha:
spec_layer.add.alpha = alpha
elif mode == 'MULTIPLY':
spec_layer.multiply.MergeFromString(b'')
if alpha:
spec_layer.multiply.alpha = alpha
elif mode == 'COS':
spec_layer.dot.cosineSimilarity = True
elif mode == 'DOT':
spec_layer.dot.cosineSimilarity = False
elif mode == 'MAX':
spec_layer.max.MergeFromString(b'')
elif mode == 'MIN':
spec_layer.min.MergeFromString(b'')
elif mode == 'AVE':
spec_layer.average.MergeFromString(b'')
else:
raise ValueError("Unsupported elementwise mode %s" % mode) | python | def add_elementwise(self, name, input_names, output_name, mode, alpha = None):
"""
Add an element-wise operation layer to the model.
Parameters
----------
The name of this layer
name: str
input_names: [str]
A list of input blob names of this layer. The input blobs should have the same shape.
output_name: str
The output blob name of this layer.
mode: str
A string specifying the mode of the elementwise layer. It can be one of the following:
- 'CONCAT': concatenate input blobs along the channel axis.
- 'SEQUENCE_CONCAT': concatenate input blobs along the sequence axis.
- 'ADD': perform an element-wise summation over the input blobs.
- 'MULTIPLY': perform an element-wise multiplication over the input blobs.
- 'DOT': compute the dot product of the two input blobs. In this mode, the length of input_names should be 2.
- 'COS': compute the cosine similarity of the two input blobs. In this mode, the length of input_names should be 2.
- 'MAX': compute the element-wise maximum over the input blobs.
- 'MIN': compute the element-wise minimum over the input blobs.
- 'AVE': compute the element-wise average over the input blobs.
alpha: float
if mode == 'ADD' and there is only one input_name, alpha is added to the input
if mode == 'MULTIPLY' and there is only one input_name, alpha is multiplied to the input
See Also
--------
add_upsample, add_sequence_repeat
"""
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
if isinstance(input_names, list):
for input_name in input_names:
spec_layer.input.append(input_name)
else:
spec_layer.input.append(input_names)
spec_layer.output.append(output_name)
## Add the following layers.
if mode == 'CONCAT':
spec_layer.concat.sequenceConcat = False
elif mode == 'SEQUENCE_CONCAT':
spec_layer.concat.sequenceConcat = True
elif mode == 'ADD':
spec_layer.add.MergeFromString(b'')
if alpha:
spec_layer.add.alpha = alpha
elif mode == 'MULTIPLY':
spec_layer.multiply.MergeFromString(b'')
if alpha:
spec_layer.multiply.alpha = alpha
elif mode == 'COS':
spec_layer.dot.cosineSimilarity = True
elif mode == 'DOT':
spec_layer.dot.cosineSimilarity = False
elif mode == 'MAX':
spec_layer.max.MergeFromString(b'')
elif mode == 'MIN':
spec_layer.min.MergeFromString(b'')
elif mode == 'AVE':
spec_layer.average.MergeFromString(b'')
else:
raise ValueError("Unsupported elementwise mode %s" % mode) | [
"def",
"add_elementwise",
"(",
"self",
",",
"name",
",",
"input_names",
",",
"output_name",
",",
"mode",
",",
"alpha",
"=",
"None",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"if",
"isinstance",
"(",
"input_names",
",",
"list",
")",
":",
"for",
"input_name",
"in",
"input_names",
":",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"else",
":",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_names",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"## Add the following layers.",
"if",
"mode",
"==",
"'CONCAT'",
":",
"spec_layer",
".",
"concat",
".",
"sequenceConcat",
"=",
"False",
"elif",
"mode",
"==",
"'SEQUENCE_CONCAT'",
":",
"spec_layer",
".",
"concat",
".",
"sequenceConcat",
"=",
"True",
"elif",
"mode",
"==",
"'ADD'",
":",
"spec_layer",
".",
"add",
".",
"MergeFromString",
"(",
"b''",
")",
"if",
"alpha",
":",
"spec_layer",
".",
"add",
".",
"alpha",
"=",
"alpha",
"elif",
"mode",
"==",
"'MULTIPLY'",
":",
"spec_layer",
".",
"multiply",
".",
"MergeFromString",
"(",
"b''",
")",
"if",
"alpha",
":",
"spec_layer",
".",
"multiply",
".",
"alpha",
"=",
"alpha",
"elif",
"mode",
"==",
"'COS'",
":",
"spec_layer",
".",
"dot",
".",
"cosineSimilarity",
"=",
"True",
"elif",
"mode",
"==",
"'DOT'",
":",
"spec_layer",
".",
"dot",
".",
"cosineSimilarity",
"=",
"False",
"elif",
"mode",
"==",
"'MAX'",
":",
"spec_layer",
".",
"max",
".",
"MergeFromString",
"(",
"b''",
")",
"elif",
"mode",
"==",
"'MIN'",
":",
"spec_layer",
".",
"min",
".",
"MergeFromString",
"(",
"b''",
")",
"elif",
"mode",
"==",
"'AVE'",
":",
"spec_layer",
".",
"average",
".",
"MergeFromString",
"(",
"b''",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported elementwise mode %s\"",
"%",
"mode",
")"
] | Add an element-wise operation layer to the model.
Parameters
----------
The name of this layer
name: str
input_names: [str]
A list of input blob names of this layer. The input blobs should have the same shape.
output_name: str
The output blob name of this layer.
mode: str
A string specifying the mode of the elementwise layer. It can be one of the following:
- 'CONCAT': concatenate input blobs along the channel axis.
- 'SEQUENCE_CONCAT': concatenate input blobs along the sequence axis.
- 'ADD': perform an element-wise summation over the input blobs.
- 'MULTIPLY': perform an element-wise multiplication over the input blobs.
- 'DOT': compute the dot product of the two input blobs. In this mode, the length of input_names should be 2.
- 'COS': compute the cosine similarity of the two input blobs. In this mode, the length of input_names should be 2.
- 'MAX': compute the element-wise maximum over the input blobs.
- 'MIN': compute the element-wise minimum over the input blobs.
- 'AVE': compute the element-wise average over the input blobs.
alpha: float
if mode == 'ADD' and there is only one input_name, alpha is added to the input
if mode == 'MULTIPLY' and there is only one input_name, alpha is multiplied to the input
See Also
--------
add_upsample, add_sequence_repeat | [
"Add",
"an",
"element",
"-",
"wise",
"operation",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L650-L721 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_upsample | def add_upsample(self, name, scaling_factor_h, scaling_factor_w, input_name, output_name, mode = 'NN'):
"""
Add upsample layer to the model.
Parameters
----------
name: str
The name of this layer.
scaling_factor_h: int
Scaling factor on the vertical direction.
scaling_factor_w: int
Scaling factor on the horizontal direction.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
Following values are supported:
'NN': nearest neighbour
'BILINEAR' : bilinear interpolation
See Also
--------
add_sequence_repeat, add_elementwise
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new inner-product layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.upsample
spec_layer_params.scalingFactor.append(scaling_factor_h)
spec_layer_params.scalingFactor.append(scaling_factor_w)
if mode == 'NN':
spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('NN')
elif mode == 'BILINEAR':
spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('BILINEAR')
else:
raise ValueError("Unsupported upsampling mode %s" % mode) | python | def add_upsample(self, name, scaling_factor_h, scaling_factor_w, input_name, output_name, mode = 'NN'):
"""
Add upsample layer to the model.
Parameters
----------
name: str
The name of this layer.
scaling_factor_h: int
Scaling factor on the vertical direction.
scaling_factor_w: int
Scaling factor on the horizontal direction.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
Following values are supported:
'NN': nearest neighbour
'BILINEAR' : bilinear interpolation
See Also
--------
add_sequence_repeat, add_elementwise
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new inner-product layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.upsample
spec_layer_params.scalingFactor.append(scaling_factor_h)
spec_layer_params.scalingFactor.append(scaling_factor_w)
if mode == 'NN':
spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('NN')
elif mode == 'BILINEAR':
spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('BILINEAR')
else:
raise ValueError("Unsupported upsampling mode %s" % mode) | [
"def",
"add_upsample",
"(",
"self",
",",
"name",
",",
"scaling_factor_h",
",",
"scaling_factor_w",
",",
"input_name",
",",
"output_name",
",",
"mode",
"=",
"'NN'",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new inner-product layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"upsample",
"spec_layer_params",
".",
"scalingFactor",
".",
"append",
"(",
"scaling_factor_h",
")",
"spec_layer_params",
".",
"scalingFactor",
".",
"append",
"(",
"scaling_factor_w",
")",
"if",
"mode",
"==",
"'NN'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"UpsampleLayerParams",
".",
"InterpolationMode",
".",
"Value",
"(",
"'NN'",
")",
"elif",
"mode",
"==",
"'BILINEAR'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"UpsampleLayerParams",
".",
"InterpolationMode",
".",
"Value",
"(",
"'BILINEAR'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported upsampling mode %s\"",
"%",
"mode",
")"
] | Add upsample layer to the model.
Parameters
----------
name: str
The name of this layer.
scaling_factor_h: int
Scaling factor on the vertical direction.
scaling_factor_w: int
Scaling factor on the horizontal direction.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
Following values are supported:
'NN': nearest neighbour
'BILINEAR' : bilinear interpolation
See Also
--------
add_sequence_repeat, add_elementwise | [
"Add",
"upsample",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L723-L764 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_scale | def add_scale(self, name, W, b, has_bias, input_name, output_name, shape_scale = [1], shape_bias = [1]):
"""
Add scale layer to the model.
Parameters
----------
name: str
The name of this layer.
W: int | numpy.array
Scale of the input.
b: int | numpy.array
Bias to add to the input.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
shape_scale: [int]
List of ints that specifies the shape of the scale parameter. Can be [1] or [C] or [1,H,W] or [C,H,W].
shape_bias: [int]
List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W].
See Also
--------
add_bias
"""
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.scale
spec_layer_params.hasBias = has_bias
#add scale and its shape
scale = spec_layer_params.scale
spec_layer_params.shapeScale.extend(shape_scale)
if isinstance(W, int):
scale.floatValue.append(float(W))
else:
scale.floatValue.extend(map(float, W.flatten()))
if len(scale.floatValue) != np.prod(shape_scale):
raise ValueError("Dimensions of 'shape_scale' do not match the size of the provided 'scale' parameter")
#add bias and its shape
if has_bias:
bias = spec_layer_params.bias
spec_layer_params.shapeBias.extend(shape_bias)
if isinstance(b, int):
bias.floatValue.append(float(b))
else:
bias.floatValue.extend(map(float, b.flatten()))
if len(bias.floatValue) != np.prod(shape_bias):
raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter") | python | def add_scale(self, name, W, b, has_bias, input_name, output_name, shape_scale = [1], shape_bias = [1]):
"""
Add scale layer to the model.
Parameters
----------
name: str
The name of this layer.
W: int | numpy.array
Scale of the input.
b: int | numpy.array
Bias to add to the input.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
shape_scale: [int]
List of ints that specifies the shape of the scale parameter. Can be [1] or [C] or [1,H,W] or [C,H,W].
shape_bias: [int]
List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W].
See Also
--------
add_bias
"""
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.scale
spec_layer_params.hasBias = has_bias
#add scale and its shape
scale = spec_layer_params.scale
spec_layer_params.shapeScale.extend(shape_scale)
if isinstance(W, int):
scale.floatValue.append(float(W))
else:
scale.floatValue.extend(map(float, W.flatten()))
if len(scale.floatValue) != np.prod(shape_scale):
raise ValueError("Dimensions of 'shape_scale' do not match the size of the provided 'scale' parameter")
#add bias and its shape
if has_bias:
bias = spec_layer_params.bias
spec_layer_params.shapeBias.extend(shape_bias)
if isinstance(b, int):
bias.floatValue.append(float(b))
else:
bias.floatValue.extend(map(float, b.flatten()))
if len(bias.floatValue) != np.prod(shape_bias):
raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter") | [
"def",
"add_scale",
"(",
"self",
",",
"name",
",",
"W",
",",
"b",
",",
"has_bias",
",",
"input_name",
",",
"output_name",
",",
"shape_scale",
"=",
"[",
"1",
"]",
",",
"shape_bias",
"=",
"[",
"1",
"]",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"scale",
"spec_layer_params",
".",
"hasBias",
"=",
"has_bias",
"#add scale and its shape",
"scale",
"=",
"spec_layer_params",
".",
"scale",
"spec_layer_params",
".",
"shapeScale",
".",
"extend",
"(",
"shape_scale",
")",
"if",
"isinstance",
"(",
"W",
",",
"int",
")",
":",
"scale",
".",
"floatValue",
".",
"append",
"(",
"float",
"(",
"W",
")",
")",
"else",
":",
"scale",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W",
".",
"flatten",
"(",
")",
")",
")",
"if",
"len",
"(",
"scale",
".",
"floatValue",
")",
"!=",
"np",
".",
"prod",
"(",
"shape_scale",
")",
":",
"raise",
"ValueError",
"(",
"\"Dimensions of 'shape_scale' do not match the size of the provided 'scale' parameter\"",
")",
"#add bias and its shape",
"if",
"has_bias",
":",
"bias",
"=",
"spec_layer_params",
".",
"bias",
"spec_layer_params",
".",
"shapeBias",
".",
"extend",
"(",
"shape_bias",
")",
"if",
"isinstance",
"(",
"b",
",",
"int",
")",
":",
"bias",
".",
"floatValue",
".",
"append",
"(",
"float",
"(",
"b",
")",
")",
"else",
":",
"bias",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b",
".",
"flatten",
"(",
")",
")",
")",
"if",
"len",
"(",
"bias",
".",
"floatValue",
")",
"!=",
"np",
".",
"prod",
"(",
"shape_bias",
")",
":",
"raise",
"ValueError",
"(",
"\"Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter\"",
")"
] | Add scale layer to the model.
Parameters
----------
name: str
The name of this layer.
W: int | numpy.array
Scale of the input.
b: int | numpy.array
Bias to add to the input.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
shape_scale: [int]
List of ints that specifies the shape of the scale parameter. Can be [1] or [C] or [1,H,W] or [C,H,W].
shape_bias: [int]
List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W].
See Also
--------
add_bias | [
"Add",
"scale",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L766-L824 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_bias | def add_bias(self, name, b, input_name, output_name, shape_bias = [1]):
"""
Add bias layer to the model.
Parameters
----------
name: str
The name of this layer.
b: int | numpy.array
Bias to add to the input.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
shape_bias: [int]
List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W].
See Also
--------
add_scale
"""
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.bias
#add bias and its shape
bias = spec_layer_params.bias
spec_layer_params.shape.extend(shape_bias)
if isinstance(b, int):
bias.floatValue.append(float(b))
else:
bias.floatValue.extend(map(float, b.flatten()))
if len(bias.floatValue) != np.prod(shape_bias):
raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter") | python | def add_bias(self, name, b, input_name, output_name, shape_bias = [1]):
"""
Add bias layer to the model.
Parameters
----------
name: str
The name of this layer.
b: int | numpy.array
Bias to add to the input.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
shape_bias: [int]
List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W].
See Also
--------
add_scale
"""
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.bias
#add bias and its shape
bias = spec_layer_params.bias
spec_layer_params.shape.extend(shape_bias)
if isinstance(b, int):
bias.floatValue.append(float(b))
else:
bias.floatValue.extend(map(float, b.flatten()))
if len(bias.floatValue) != np.prod(shape_bias):
raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter") | [
"def",
"add_bias",
"(",
"self",
",",
"name",
",",
"b",
",",
"input_name",
",",
"output_name",
",",
"shape_bias",
"=",
"[",
"1",
"]",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"bias",
"#add bias and its shape",
"bias",
"=",
"spec_layer_params",
".",
"bias",
"spec_layer_params",
".",
"shape",
".",
"extend",
"(",
"shape_bias",
")",
"if",
"isinstance",
"(",
"b",
",",
"int",
")",
":",
"bias",
".",
"floatValue",
".",
"append",
"(",
"float",
"(",
"b",
")",
")",
"else",
":",
"bias",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b",
".",
"flatten",
"(",
")",
")",
")",
"if",
"len",
"(",
"bias",
".",
"floatValue",
")",
"!=",
"np",
".",
"prod",
"(",
"shape_bias",
")",
":",
"raise",
"ValueError",
"(",
"\"Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter\"",
")"
] | Add bias layer to the model.
Parameters
----------
name: str
The name of this layer.
b: int | numpy.array
Bias to add to the input.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
shape_bias: [int]
List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W].
See Also
--------
add_scale | [
"Add",
"bias",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L826-L864 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_sequence_repeat | def add_sequence_repeat(self, name, nrep, input_name, output_name):
"""
Add sequence repeat layer to the model.
Parameters
----------
name: str
The name of this layer.
nrep: int
Number of repetitions of the input blob along the sequence axis.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_upsample, add_elementwise
"""
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.sequenceRepeat
spec_layer_params.nRepetitions = nrep | python | def add_sequence_repeat(self, name, nrep, input_name, output_name):
"""
Add sequence repeat layer to the model.
Parameters
----------
name: str
The name of this layer.
nrep: int
Number of repetitions of the input blob along the sequence axis.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_upsample, add_elementwise
"""
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.sequenceRepeat
spec_layer_params.nRepetitions = nrep | [
"def",
"add_sequence_repeat",
"(",
"self",
",",
"name",
",",
"nrep",
",",
"input_name",
",",
"output_name",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"sequenceRepeat",
"spec_layer_params",
".",
"nRepetitions",
"=",
"nrep"
] | Add sequence repeat layer to the model.
Parameters
----------
name: str
The name of this layer.
nrep: int
Number of repetitions of the input blob along the sequence axis.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_upsample, add_elementwise | [
"Add",
"sequence",
"repeat",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L866-L892 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_convolution | def add_convolution(self, name, kernel_channels, output_channels, height,
width, stride_height, stride_width, border_mode, groups, W, b, has_bias,
is_deconv = False, output_shape = None,
input_name = 'data', output_name = 'out',
dilation_factors = [1,1],
padding_top = 0, padding_bottom = 0, padding_left = 0, padding_right = 0,
same_padding_asymmetry_mode = 'BOTTOM_RIGHT_HEAVY'):
"""
Add a convolution layer to the network.
Please see the ConvolutionLayerParams in Core ML neural network
protobuf message for more information about input and output blob dimensions.
Parameters
----------
name: str
The name of this layer.
kernel_channels: int
Number of channels for the convolution kernels.
output_channels: int
Number of filter kernels. This is equal to the number of channels in the output blob.
height: int
Height of each kernel.
width: int
Width of each kernel.
stride_height: int
Stride along the height direction.
stride_width: int
Stride along the height direction.
border_mode: str
Option for the padding type and output blob shape. Can be either 'valid' or 'same'.
Kindly refer to NeuralNetwork.proto for details.
groups: int
Number of kernel groups. Input is divided into groups along the channel axis. Each kernel group share the same weights.
W: numpy.array
Weights of the convolution kernels.
- If is_deconv is False, W should have shape (height, width, kernel_channels, output_channels), where kernel_channel = input_channels / groups
- If is_deconv is True, W should have shape (height, width, kernel_channels, output_channels / groups), where kernel_channel = input_channels
b: numpy.array
Biases of the convolution kernels. b should have shape (outputChannels, ).
has_bias: boolean
Whether bias is ignored.
- If True, bias is not ignored.
- If False, bias is ignored.
is_deconv: boolean
Whether the convolution layer is performing a convolution or a transposed convolution (deconvolution).
- If True, the convolution layer is performing transposed convolution.
- If False, the convolution layer is performing regular convolution.
output_shape: tuple | None
Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True.
When is_deconv == False, this parameter is ignored.
If it is None, the output shape is calculated automatically using the border_mode.
Kindly refer to NeuralNetwork.proto for details.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
dilation_factors: [int]
Dilation factors across height and width directions. Must be a list of two positive integers.
Defaults to [1,1]
padding_top, padding_bottom, padding_left, padding_right: int
values of height (top, bottom) and width (left, right) padding to be used if border_more is "valid".
same_padding_asymmetry_mode : str.
Type of asymmetric padding to be used when border_mode is 'same'.
Can be either 'BOTTOM_RIGHT_HEAVY' or 'TOP_LEFT_HEAVY'.
Kindly refer to NeuralNetwork.proto for details.
Depthwise convolution is a special case of convolution, where we have:
kernel_channels = 1 (== input_channels / groups)
output_channels = channel_multiplier * input_channels
groups = input_channels
W : [Kernel_height, Kernel_width, 1, channel_multiplier * input_channels]
See Also
--------
add_pooling, add_activation, add_batchnorm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer.convolution.MergeFromString(b'') # hack to set empty message
# Set the layer params
spec_layer_params = spec_layer.convolution
spec_layer_params.isDeconvolution = is_deconv
if is_deconv and output_shape:
spec_layer_params.outputShape.append(output_shape[0])
spec_layer_params.outputShape.append(output_shape[1])
spec_layer_params.outputChannels = output_channels
spec_layer_params.kernelChannels = kernel_channels
spec_layer_params.kernelSize.append(height)
spec_layer_params.kernelSize.append(width)
spec_layer_params.stride.append(stride_height)
spec_layer_params.stride.append(stride_width)
if border_mode == 'valid':
height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = padding_top
height_border.endEdgeSize = padding_bottom
width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = padding_left
width_border.endEdgeSize = padding_right
elif border_mode == 'same':
if not (same_padding_asymmetry_mode == 'BOTTOM_RIGHT_HEAVY' or same_padding_asymmetry_mode == 'TOP_LEFT_HEAVY'):
raise ValueError("Invalid value %d of same_padding_asymmetry_mode parameter" % same_padding_asymmetry_mode)
spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value(same_padding_asymmetry_mode)
else:
raise NotImplementedError(
'Border mode %s is not implemented.' % border_mode)
spec_layer_params.nGroups = groups
spec_layer_params.hasBias = has_bias
# Assign weights
weights = spec_layer_params.weights
# Weight alignment: MLModel Spec requires following weight arrangement:
# is_deconv == False ==> (output_channels, kernel_channels, height, width), where kernel_channel = input_channels / groups
# is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels
if not is_deconv:
Wt = W.transpose((3,2,0,1))
Wt = Wt.flatten()
else:
Wt = W.transpose((2,3,0,1)).flatten()
for idx in range(Wt.size):
weights.floatValue.append(float(Wt[idx]))
# Assign biases
if has_bias:
bias = spec_layer_params.bias
for f in range(output_channels):
bias.floatValue.append(float(b[f]))
# add dilation factors
spec_layer_params.dilationFactor.append(dilation_factors[0])
spec_layer_params.dilationFactor.append(dilation_factors[1]) | python | def add_convolution(self, name, kernel_channels, output_channels, height,
width, stride_height, stride_width, border_mode, groups, W, b, has_bias,
is_deconv = False, output_shape = None,
input_name = 'data', output_name = 'out',
dilation_factors = [1,1],
padding_top = 0, padding_bottom = 0, padding_left = 0, padding_right = 0,
same_padding_asymmetry_mode = 'BOTTOM_RIGHT_HEAVY'):
"""
Add a convolution layer to the network.
Please see the ConvolutionLayerParams in Core ML neural network
protobuf message for more information about input and output blob dimensions.
Parameters
----------
name: str
The name of this layer.
kernel_channels: int
Number of channels for the convolution kernels.
output_channels: int
Number of filter kernels. This is equal to the number of channels in the output blob.
height: int
Height of each kernel.
width: int
Width of each kernel.
stride_height: int
Stride along the height direction.
stride_width: int
Stride along the height direction.
border_mode: str
Option for the padding type and output blob shape. Can be either 'valid' or 'same'.
Kindly refer to NeuralNetwork.proto for details.
groups: int
Number of kernel groups. Input is divided into groups along the channel axis. Each kernel group share the same weights.
W: numpy.array
Weights of the convolution kernels.
- If is_deconv is False, W should have shape (height, width, kernel_channels, output_channels), where kernel_channel = input_channels / groups
- If is_deconv is True, W should have shape (height, width, kernel_channels, output_channels / groups), where kernel_channel = input_channels
b: numpy.array
Biases of the convolution kernels. b should have shape (outputChannels, ).
has_bias: boolean
Whether bias is ignored.
- If True, bias is not ignored.
- If False, bias is ignored.
is_deconv: boolean
Whether the convolution layer is performing a convolution or a transposed convolution (deconvolution).
- If True, the convolution layer is performing transposed convolution.
- If False, the convolution layer is performing regular convolution.
output_shape: tuple | None
Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True.
When is_deconv == False, this parameter is ignored.
If it is None, the output shape is calculated automatically using the border_mode.
Kindly refer to NeuralNetwork.proto for details.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
dilation_factors: [int]
Dilation factors across height and width directions. Must be a list of two positive integers.
Defaults to [1,1]
padding_top, padding_bottom, padding_left, padding_right: int
values of height (top, bottom) and width (left, right) padding to be used if border_more is "valid".
same_padding_asymmetry_mode : str.
Type of asymmetric padding to be used when border_mode is 'same'.
Can be either 'BOTTOM_RIGHT_HEAVY' or 'TOP_LEFT_HEAVY'.
Kindly refer to NeuralNetwork.proto for details.
Depthwise convolution is a special case of convolution, where we have:
kernel_channels = 1 (== input_channels / groups)
output_channels = channel_multiplier * input_channels
groups = input_channels
W : [Kernel_height, Kernel_width, 1, channel_multiplier * input_channels]
See Also
--------
add_pooling, add_activation, add_batchnorm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer.convolution.MergeFromString(b'') # hack to set empty message
# Set the layer params
spec_layer_params = spec_layer.convolution
spec_layer_params.isDeconvolution = is_deconv
if is_deconv and output_shape:
spec_layer_params.outputShape.append(output_shape[0])
spec_layer_params.outputShape.append(output_shape[1])
spec_layer_params.outputChannels = output_channels
spec_layer_params.kernelChannels = kernel_channels
spec_layer_params.kernelSize.append(height)
spec_layer_params.kernelSize.append(width)
spec_layer_params.stride.append(stride_height)
spec_layer_params.stride.append(stride_width)
if border_mode == 'valid':
height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = padding_top
height_border.endEdgeSize = padding_bottom
width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = padding_left
width_border.endEdgeSize = padding_right
elif border_mode == 'same':
if not (same_padding_asymmetry_mode == 'BOTTOM_RIGHT_HEAVY' or same_padding_asymmetry_mode == 'TOP_LEFT_HEAVY'):
raise ValueError("Invalid value %d of same_padding_asymmetry_mode parameter" % same_padding_asymmetry_mode)
spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value(same_padding_asymmetry_mode)
else:
raise NotImplementedError(
'Border mode %s is not implemented.' % border_mode)
spec_layer_params.nGroups = groups
spec_layer_params.hasBias = has_bias
# Assign weights
weights = spec_layer_params.weights
# Weight alignment: MLModel Spec requires following weight arrangement:
# is_deconv == False ==> (output_channels, kernel_channels, height, width), where kernel_channel = input_channels / groups
# is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels
if not is_deconv:
Wt = W.transpose((3,2,0,1))
Wt = Wt.flatten()
else:
Wt = W.transpose((2,3,0,1)).flatten()
for idx in range(Wt.size):
weights.floatValue.append(float(Wt[idx]))
# Assign biases
if has_bias:
bias = spec_layer_params.bias
for f in range(output_channels):
bias.floatValue.append(float(b[f]))
# add dilation factors
spec_layer_params.dilationFactor.append(dilation_factors[0])
spec_layer_params.dilationFactor.append(dilation_factors[1]) | [
"def",
"add_convolution",
"(",
"self",
",",
"name",
",",
"kernel_channels",
",",
"output_channels",
",",
"height",
",",
"width",
",",
"stride_height",
",",
"stride_width",
",",
"border_mode",
",",
"groups",
",",
"W",
",",
"b",
",",
"has_bias",
",",
"is_deconv",
"=",
"False",
",",
"output_shape",
"=",
"None",
",",
"input_name",
"=",
"'data'",
",",
"output_name",
"=",
"'out'",
",",
"dilation_factors",
"=",
"[",
"1",
",",
"1",
"]",
",",
"padding_top",
"=",
"0",
",",
"padding_bottom",
"=",
"0",
",",
"padding_left",
"=",
"0",
",",
"padding_right",
"=",
"0",
",",
"same_padding_asymmetry_mode",
"=",
"'BOTTOM_RIGHT_HEAVY'",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer",
".",
"convolution",
".",
"MergeFromString",
"(",
"b''",
")",
"# hack to set empty message",
"# Set the layer params",
"spec_layer_params",
"=",
"spec_layer",
".",
"convolution",
"spec_layer_params",
".",
"isDeconvolution",
"=",
"is_deconv",
"if",
"is_deconv",
"and",
"output_shape",
":",
"spec_layer_params",
".",
"outputShape",
".",
"append",
"(",
"output_shape",
"[",
"0",
"]",
")",
"spec_layer_params",
".",
"outputShape",
".",
"append",
"(",
"output_shape",
"[",
"1",
"]",
")",
"spec_layer_params",
".",
"outputChannels",
"=",
"output_channels",
"spec_layer_params",
".",
"kernelChannels",
"=",
"kernel_channels",
"spec_layer_params",
".",
"kernelSize",
".",
"append",
"(",
"height",
")",
"spec_layer_params",
".",
"kernelSize",
".",
"append",
"(",
"width",
")",
"spec_layer_params",
".",
"stride",
".",
"append",
"(",
"stride_height",
")",
"spec_layer_params",
".",
"stride",
".",
"append",
"(",
"stride_width",
")",
"if",
"border_mode",
"==",
"'valid'",
":",
"height_border",
"=",
"spec_layer_params",
".",
"valid",
".",
"paddingAmounts",
".",
"borderAmounts",
".",
"add",
"(",
")",
"height_border",
".",
"startEdgeSize",
"=",
"padding_top",
"height_border",
".",
"endEdgeSize",
"=",
"padding_bottom",
"width_border",
"=",
"spec_layer_params",
".",
"valid",
".",
"paddingAmounts",
".",
"borderAmounts",
".",
"add",
"(",
")",
"width_border",
".",
"startEdgeSize",
"=",
"padding_left",
"width_border",
".",
"endEdgeSize",
"=",
"padding_right",
"elif",
"border_mode",
"==",
"'same'",
":",
"if",
"not",
"(",
"same_padding_asymmetry_mode",
"==",
"'BOTTOM_RIGHT_HEAVY'",
"or",
"same_padding_asymmetry_mode",
"==",
"'TOP_LEFT_HEAVY'",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid value %d of same_padding_asymmetry_mode parameter\"",
"%",
"same_padding_asymmetry_mode",
")",
"spec_layer_params",
".",
"same",
".",
"asymmetryMode",
"=",
"_NeuralNetwork_pb2",
".",
"SamePadding",
".",
"SamePaddingMode",
".",
"Value",
"(",
"same_padding_asymmetry_mode",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Border mode %s is not implemented.'",
"%",
"border_mode",
")",
"spec_layer_params",
".",
"nGroups",
"=",
"groups",
"spec_layer_params",
".",
"hasBias",
"=",
"has_bias",
"# Assign weights",
"weights",
"=",
"spec_layer_params",
".",
"weights",
"# Weight alignment: MLModel Spec requires following weight arrangement:",
"# is_deconv == False ==> (output_channels, kernel_channels, height, width), where kernel_channel = input_channels / groups",
"# is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels",
"if",
"not",
"is_deconv",
":",
"Wt",
"=",
"W",
".",
"transpose",
"(",
"(",
"3",
",",
"2",
",",
"0",
",",
"1",
")",
")",
"Wt",
"=",
"Wt",
".",
"flatten",
"(",
")",
"else",
":",
"Wt",
"=",
"W",
".",
"transpose",
"(",
"(",
"2",
",",
"3",
",",
"0",
",",
"1",
")",
")",
".",
"flatten",
"(",
")",
"for",
"idx",
"in",
"range",
"(",
"Wt",
".",
"size",
")",
":",
"weights",
".",
"floatValue",
".",
"append",
"(",
"float",
"(",
"Wt",
"[",
"idx",
"]",
")",
")",
"# Assign biases",
"if",
"has_bias",
":",
"bias",
"=",
"spec_layer_params",
".",
"bias",
"for",
"f",
"in",
"range",
"(",
"output_channels",
")",
":",
"bias",
".",
"floatValue",
".",
"append",
"(",
"float",
"(",
"b",
"[",
"f",
"]",
")",
")",
"# add dilation factors",
"spec_layer_params",
".",
"dilationFactor",
".",
"append",
"(",
"dilation_factors",
"[",
"0",
"]",
")",
"spec_layer_params",
".",
"dilationFactor",
".",
"append",
"(",
"dilation_factors",
"[",
"1",
"]",
")"
] | Add a convolution layer to the network.
Please see the ConvolutionLayerParams in Core ML neural network
protobuf message for more information about input and output blob dimensions.
Parameters
----------
name: str
The name of this layer.
kernel_channels: int
Number of channels for the convolution kernels.
output_channels: int
Number of filter kernels. This is equal to the number of channels in the output blob.
height: int
Height of each kernel.
width: int
Width of each kernel.
stride_height: int
Stride along the height direction.
stride_width: int
Stride along the height direction.
border_mode: str
Option for the padding type and output blob shape. Can be either 'valid' or 'same'.
Kindly refer to NeuralNetwork.proto for details.
groups: int
Number of kernel groups. Input is divided into groups along the channel axis. Each kernel group share the same weights.
W: numpy.array
Weights of the convolution kernels.
- If is_deconv is False, W should have shape (height, width, kernel_channels, output_channels), where kernel_channel = input_channels / groups
- If is_deconv is True, W should have shape (height, width, kernel_channels, output_channels / groups), where kernel_channel = input_channels
b: numpy.array
Biases of the convolution kernels. b should have shape (outputChannels, ).
has_bias: boolean
Whether bias is ignored.
- If True, bias is not ignored.
- If False, bias is ignored.
is_deconv: boolean
Whether the convolution layer is performing a convolution or a transposed convolution (deconvolution).
- If True, the convolution layer is performing transposed convolution.
- If False, the convolution layer is performing regular convolution.
output_shape: tuple | None
Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True.
When is_deconv == False, this parameter is ignored.
If it is None, the output shape is calculated automatically using the border_mode.
Kindly refer to NeuralNetwork.proto for details.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
dilation_factors: [int]
Dilation factors across height and width directions. Must be a list of two positive integers.
Defaults to [1,1]
padding_top, padding_bottom, padding_left, padding_right: int
values of height (top, bottom) and width (left, right) padding to be used if border_more is "valid".
same_padding_asymmetry_mode : str.
Type of asymmetric padding to be used when border_mode is 'same'.
Can be either 'BOTTOM_RIGHT_HEAVY' or 'TOP_LEFT_HEAVY'.
Kindly refer to NeuralNetwork.proto for details.
Depthwise convolution is a special case of convolution, where we have:
kernel_channels = 1 (== input_channels / groups)
output_channels = channel_multiplier * input_channels
groups = input_channels
W : [Kernel_height, Kernel_width, 1, channel_multiplier * input_channels]
See Also
--------
add_pooling, add_activation, add_batchnorm | [
"Add",
"a",
"convolution",
"layer",
"to",
"the",
"network",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L894-L1049 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_padding | def add_padding(self, name,
left = 0, right = 0, top = 0, bottom = 0,
value = 0,
input_name = 'data', output_name = 'out',
padding_type = 'constant'):
"""
Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be padded on the left side of the input blob.
right: int
Number of elements to be padded on the right side of the input blob.
top: int
Number of elements to be padded on the top of the input blob.
bottom: int
Number of elements to be padded on the bottom of the input blob.
value: float
Value of the elements padded. Used only when padding_type = 'constant'
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_type: str
Type of the padding. Can be one of 'constant', 'reflection' or 'replication'
See Also
--------
add_crop, add_convolution, add_pooling
"""
# Currently only constant padding is supported.
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.padding
# Set the parameters
if padding_type == 'constant':
spec_layer_params.constant.value = value
elif padding_type == 'reflection':
spec_layer_params.reflection.MergeFromString(b'')
elif padding_type == 'replication':
spec_layer_params.replication.MergeFromString(b'')
else:
raise ValueError("Unknown padding_type %s" %(padding_type))
height_border = spec_layer_params.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = top
height_border.endEdgeSize = bottom
width_border = spec_layer_params.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = left
width_border.endEdgeSize = right | python | def add_padding(self, name,
left = 0, right = 0, top = 0, bottom = 0,
value = 0,
input_name = 'data', output_name = 'out',
padding_type = 'constant'):
"""
Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be padded on the left side of the input blob.
right: int
Number of elements to be padded on the right side of the input blob.
top: int
Number of elements to be padded on the top of the input blob.
bottom: int
Number of elements to be padded on the bottom of the input blob.
value: float
Value of the elements padded. Used only when padding_type = 'constant'
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_type: str
Type of the padding. Can be one of 'constant', 'reflection' or 'replication'
See Also
--------
add_crop, add_convolution, add_pooling
"""
# Currently only constant padding is supported.
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.padding
# Set the parameters
if padding_type == 'constant':
spec_layer_params.constant.value = value
elif padding_type == 'reflection':
spec_layer_params.reflection.MergeFromString(b'')
elif padding_type == 'replication':
spec_layer_params.replication.MergeFromString(b'')
else:
raise ValueError("Unknown padding_type %s" %(padding_type))
height_border = spec_layer_params.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = top
height_border.endEdgeSize = bottom
width_border = spec_layer_params.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = left
width_border.endEdgeSize = right | [
"def",
"add_padding",
"(",
"self",
",",
"name",
",",
"left",
"=",
"0",
",",
"right",
"=",
"0",
",",
"top",
"=",
"0",
",",
"bottom",
"=",
"0",
",",
"value",
"=",
"0",
",",
"input_name",
"=",
"'data'",
",",
"output_name",
"=",
"'out'",
",",
"padding_type",
"=",
"'constant'",
")",
":",
"# Currently only constant padding is supported.",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"padding",
"# Set the parameters",
"if",
"padding_type",
"==",
"'constant'",
":",
"spec_layer_params",
".",
"constant",
".",
"value",
"=",
"value",
"elif",
"padding_type",
"==",
"'reflection'",
":",
"spec_layer_params",
".",
"reflection",
".",
"MergeFromString",
"(",
"b''",
")",
"elif",
"padding_type",
"==",
"'replication'",
":",
"spec_layer_params",
".",
"replication",
".",
"MergeFromString",
"(",
"b''",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown padding_type %s\"",
"%",
"(",
"padding_type",
")",
")",
"height_border",
"=",
"spec_layer_params",
".",
"paddingAmounts",
".",
"borderAmounts",
".",
"add",
"(",
")",
"height_border",
".",
"startEdgeSize",
"=",
"top",
"height_border",
".",
"endEdgeSize",
"=",
"bottom",
"width_border",
"=",
"spec_layer_params",
".",
"paddingAmounts",
".",
"borderAmounts",
".",
"add",
"(",
")",
"width_border",
".",
"startEdgeSize",
"=",
"left",
"width_border",
".",
"endEdgeSize",
"=",
"right"
] | Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be padded on the left side of the input blob.
right: int
Number of elements to be padded on the right side of the input blob.
top: int
Number of elements to be padded on the top of the input blob.
bottom: int
Number of elements to be padded on the bottom of the input blob.
value: float
Value of the elements padded. Used only when padding_type = 'constant'
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_type: str
Type of the padding. Can be one of 'constant', 'reflection' or 'replication'
See Also
--------
add_crop, add_convolution, add_pooling | [
"Add",
"a",
"padding",
"layer",
"to",
"the",
"model",
".",
"Kindly",
"refer",
"to",
"NeuralNetwork",
".",
"proto",
"for",
"details",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1148-L1208 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_crop | def add_crop(self, name, left, right, top, bottom, offset, input_names,
output_name):
"""
Add a cropping layer to the model.
The cropping layer have two functional modes:
- When it has 1 input blob, it crops the input blob based
on the 4 parameters [left, right, top, bottom].
- When it has 2 input blobs, it crops the first input blob based
on the dimension of the second blob with an offset.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be cropped on the left side of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
right: int
Number of elements to be cropped on the right side of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
top: int
Number of elements to be cropped on the top of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
bottom: int
Number of elements to be cropped on the bottom of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
offset: [int]
Offset along the height and width directions when the crop layer takes 2 inputs. Must be a list of length 2.
When the crop layer takes 1 input, this parameter is ignored.
input_names: [str]
The input blob name(s) of this layer. Must be either a list of 1 string (1 input crop layer),
or a list of 2 strings (2-input crop layer).
output_name: str
The output blob name of this layer.
See Also
--------
add_padding, add_convolution, add_pooling
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for input_name in input_names:
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.crop
# Set the parameters
offset = [0,0] if len(input_name) == 1 else offset
spec_layer_params.offset.extend(offset)
height_border = spec_layer_params.cropAmounts.borderAmounts.add()
height_border.startEdgeSize = top
height_border.endEdgeSize = bottom
width_border = spec_layer_params.cropAmounts.borderAmounts.add()
width_border.startEdgeSize = left
width_border.endEdgeSize = right | python | def add_crop(self, name, left, right, top, bottom, offset, input_names,
output_name):
"""
Add a cropping layer to the model.
The cropping layer have two functional modes:
- When it has 1 input blob, it crops the input blob based
on the 4 parameters [left, right, top, bottom].
- When it has 2 input blobs, it crops the first input blob based
on the dimension of the second blob with an offset.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be cropped on the left side of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
right: int
Number of elements to be cropped on the right side of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
top: int
Number of elements to be cropped on the top of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
bottom: int
Number of elements to be cropped on the bottom of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
offset: [int]
Offset along the height and width directions when the crop layer takes 2 inputs. Must be a list of length 2.
When the crop layer takes 1 input, this parameter is ignored.
input_names: [str]
The input blob name(s) of this layer. Must be either a list of 1 string (1 input crop layer),
or a list of 2 strings (2-input crop layer).
output_name: str
The output blob name of this layer.
See Also
--------
add_padding, add_convolution, add_pooling
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for input_name in input_names:
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.crop
# Set the parameters
offset = [0,0] if len(input_name) == 1 else offset
spec_layer_params.offset.extend(offset)
height_border = spec_layer_params.cropAmounts.borderAmounts.add()
height_border.startEdgeSize = top
height_border.endEdgeSize = bottom
width_border = spec_layer_params.cropAmounts.borderAmounts.add()
width_border.startEdgeSize = left
width_border.endEdgeSize = right | [
"def",
"add_crop",
"(",
"self",
",",
"name",
",",
"left",
",",
"right",
",",
"top",
",",
"bottom",
",",
"offset",
",",
"input_names",
",",
"output_name",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"for",
"input_name",
"in",
"input_names",
":",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"crop",
"# Set the parameters",
"offset",
"=",
"[",
"0",
",",
"0",
"]",
"if",
"len",
"(",
"input_name",
")",
"==",
"1",
"else",
"offset",
"spec_layer_params",
".",
"offset",
".",
"extend",
"(",
"offset",
")",
"height_border",
"=",
"spec_layer_params",
".",
"cropAmounts",
".",
"borderAmounts",
".",
"add",
"(",
")",
"height_border",
".",
"startEdgeSize",
"=",
"top",
"height_border",
".",
"endEdgeSize",
"=",
"bottom",
"width_border",
"=",
"spec_layer_params",
".",
"cropAmounts",
".",
"borderAmounts",
".",
"add",
"(",
")",
"width_border",
".",
"startEdgeSize",
"=",
"left",
"width_border",
".",
"endEdgeSize",
"=",
"right"
] | Add a cropping layer to the model.
The cropping layer have two functional modes:
- When it has 1 input blob, it crops the input blob based
on the 4 parameters [left, right, top, bottom].
- When it has 2 input blobs, it crops the first input blob based
on the dimension of the second blob with an offset.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be cropped on the left side of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
right: int
Number of elements to be cropped on the right side of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
top: int
Number of elements to be cropped on the top of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
bottom: int
Number of elements to be cropped on the bottom of the input blob.
When the crop layer takes 2 inputs, this parameter is ignored.
offset: [int]
Offset along the height and width directions when the crop layer takes 2 inputs. Must be a list of length 2.
When the crop layer takes 1 input, this parameter is ignored.
input_names: [str]
The input blob name(s) of this layer. Must be either a list of 1 string (1 input crop layer),
or a list of 2 strings (2-input crop layer).
output_name: str
The output blob name of this layer.
See Also
--------
add_padding, add_convolution, add_pooling | [
"Add",
"a",
"cropping",
"layer",
"to",
"the",
"model",
".",
"The",
"cropping",
"layer",
"have",
"two",
"functional",
"modes",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1210-L1269 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_simple_rnn | def add_simple_rnn(self,name, W_h, W_x, b, hidden_size, input_size, activation, input_names, output_names, output_all = False, reverse_input = False):
"""
Add a simple recurrent layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: numpy.array
Weights of the recurrent layer's hidden state. Must be of shape (hidden_size, hidden_size).
W_x: numpy.array
Weights of the recurrent layer's input. Must be of shape (hidden_size, input_size).
b: numpy.array | None
Bias of the recurrent layer's output. If None, bias is ignored. Otherwise it must be of shape (hidden_size, ).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
activation: str
Activation function name. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
See add_activation for more detailed description.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input].
output_name: [str]
The output blob name list of this layer, in the order of [y, h_output].
output_all: boolean
Whether the recurrent layer should output at every time step.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
reverse_input: boolean
Whether the recurrent layer should process the input sequence in the reverse order.
- If False, the input sequence order is not reversed.
- If True, the input sequence order is reversed.
See Also
--------
add_activation, add_gru, add_unilstm, add_bidirlstm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new Layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for name in input_names:
spec_layer.input.append(name)
for name in output_names:
spec_layer.output.append(name)
spec_layer_params = spec_layer.simpleRecurrent
spec_layer_params.reverseInput = reverse_input
#set the parameters
spec_layer_params.inputVectorSize = input_size
spec_layer_params.outputVectorSize = hidden_size
if b is not None:
spec_layer_params.hasBiasVector = True
spec_layer_params.sequenceOutput = output_all
activation_f = spec_layer_params.activation
_set_recurrent_activation(activation_f, activation)
# Write the weights
spec_layer_params.weightMatrix.floatValue.extend(map(float, W_x.flatten()))
spec_layer_params.recursionMatrix.floatValue.extend(map(float, W_h.flatten()))
if b is not None:
spec_layer_params.biasVector.floatValue.extend(map(float, b.flatten())) | python | def add_simple_rnn(self,name, W_h, W_x, b, hidden_size, input_size, activation, input_names, output_names, output_all = False, reverse_input = False):
"""
Add a simple recurrent layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: numpy.array
Weights of the recurrent layer's hidden state. Must be of shape (hidden_size, hidden_size).
W_x: numpy.array
Weights of the recurrent layer's input. Must be of shape (hidden_size, input_size).
b: numpy.array | None
Bias of the recurrent layer's output. If None, bias is ignored. Otherwise it must be of shape (hidden_size, ).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
activation: str
Activation function name. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
See add_activation for more detailed description.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input].
output_name: [str]
The output blob name list of this layer, in the order of [y, h_output].
output_all: boolean
Whether the recurrent layer should output at every time step.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
reverse_input: boolean
Whether the recurrent layer should process the input sequence in the reverse order.
- If False, the input sequence order is not reversed.
- If True, the input sequence order is reversed.
See Also
--------
add_activation, add_gru, add_unilstm, add_bidirlstm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new Layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for name in input_names:
spec_layer.input.append(name)
for name in output_names:
spec_layer.output.append(name)
spec_layer_params = spec_layer.simpleRecurrent
spec_layer_params.reverseInput = reverse_input
#set the parameters
spec_layer_params.inputVectorSize = input_size
spec_layer_params.outputVectorSize = hidden_size
if b is not None:
spec_layer_params.hasBiasVector = True
spec_layer_params.sequenceOutput = output_all
activation_f = spec_layer_params.activation
_set_recurrent_activation(activation_f, activation)
# Write the weights
spec_layer_params.weightMatrix.floatValue.extend(map(float, W_x.flatten()))
spec_layer_params.recursionMatrix.floatValue.extend(map(float, W_h.flatten()))
if b is not None:
spec_layer_params.biasVector.floatValue.extend(map(float, b.flatten())) | [
"def",
"add_simple_rnn",
"(",
"self",
",",
"name",
",",
"W_h",
",",
"W_x",
",",
"b",
",",
"hidden_size",
",",
"input_size",
",",
"activation",
",",
"input_names",
",",
"output_names",
",",
"output_all",
"=",
"False",
",",
"reverse_input",
"=",
"False",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new Layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"for",
"name",
"in",
"input_names",
":",
"spec_layer",
".",
"input",
".",
"append",
"(",
"name",
")",
"for",
"name",
"in",
"output_names",
":",
"spec_layer",
".",
"output",
".",
"append",
"(",
"name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"simpleRecurrent",
"spec_layer_params",
".",
"reverseInput",
"=",
"reverse_input",
"#set the parameters",
"spec_layer_params",
".",
"inputVectorSize",
"=",
"input_size",
"spec_layer_params",
".",
"outputVectorSize",
"=",
"hidden_size",
"if",
"b",
"is",
"not",
"None",
":",
"spec_layer_params",
".",
"hasBiasVector",
"=",
"True",
"spec_layer_params",
".",
"sequenceOutput",
"=",
"output_all",
"activation_f",
"=",
"spec_layer_params",
".",
"activation",
"_set_recurrent_activation",
"(",
"activation_f",
",",
"activation",
")",
"# Write the weights",
"spec_layer_params",
".",
"weightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_x",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"recursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_h",
".",
"flatten",
"(",
")",
")",
")",
"if",
"b",
"is",
"not",
"None",
":",
"spec_layer_params",
".",
"biasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b",
".",
"flatten",
"(",
")",
")",
")"
] | Add a simple recurrent layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: numpy.array
Weights of the recurrent layer's hidden state. Must be of shape (hidden_size, hidden_size).
W_x: numpy.array
Weights of the recurrent layer's input. Must be of shape (hidden_size, input_size).
b: numpy.array | None
Bias of the recurrent layer's output. If None, bias is ignored. Otherwise it must be of shape (hidden_size, ).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
activation: str
Activation function name. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
See add_activation for more detailed description.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input].
output_name: [str]
The output blob name list of this layer, in the order of [y, h_output].
output_all: boolean
Whether the recurrent layer should output at every time step.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
reverse_input: boolean
Whether the recurrent layer should process the input sequence in the reverse order.
- If False, the input sequence order is not reversed.
- If True, the input sequence order is reversed.
See Also
--------
add_activation, add_gru, add_unilstm, add_bidirlstm | [
"Add",
"a",
"simple",
"recurrent",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1271-L1341 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_gru | def add_gru(self, name, W_h, W_x, b, hidden_size, input_size,
input_names, output_names, activation = 'TANH', inner_activation = 'SIGMOID_HARD',
output_all = False, reverse_input = False):
"""
Add a Gated-Recurrent Unit (GRU) layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: [numpy.array]
List of recursion weight matrices. The ordering is [R_z, R_r, R_o],
where R_z, R_r and R_o are weight matrices at update gate, reset gate and output gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x: [numpy.array]
List of input weight matrices. The ordering is [W_z, W_r, W_o],
where W_z, W_r, and W_o are weight matrices at update gate, reset gate and output gate.
The shapes of these matrices are (hidden_size, input_size).
b: [numpy.array] | None
List of biases of the GRU layer. The ordering is [b_z, b_r, b_o],
where b_z, b_r, b_o are biases at update gate, reset gate and output gate.
If None, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
activation: str
Activation function used at the output gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
See add_activation for more detailed description.
inner_activation: str
Inner activation function used at update and reset gates. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'SIGMOID_HARD'.
See add_activation for more detailed description.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input].
output_names: [str]
The output blob name list of this layer, in the order of [y, h_output].
output_all: boolean
Whether the recurrent layer should output at every time step.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
reverse_input: boolean
Whether the recurrent layer should process the input sequence in the reverse order.
- If False, the input sequence order is not reversed.
- If True, the input sequence order is reversed.
See Also
--------
add_activation, add_simple_rnn, add_unilstm, add_bidirlstm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new Layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for name in input_names:
spec_layer.input.append(name)
for name in output_names:
spec_layer.output.append(name)
spec_layer_params = spec_layer.gru
# set the parameters
spec_layer_params.inputVectorSize = input_size
spec_layer_params.outputVectorSize = hidden_size
if b is not None:
spec_layer_params.hasBiasVectors = True
spec_layer_params.sequenceOutput = output_all
spec_layer_params.reverseInput = reverse_input
activation_f = spec_layer_params.activations.add()
activation_g = spec_layer_params.activations.add()
_set_recurrent_activation(activation_f, inner_activation)
_set_recurrent_activation(activation_g, activation)
# Write the weights
R_z, R_r, R_o = W_h
W_z, W_r, W_o = W_x
spec_layer_params.updateGateWeightMatrix.floatValue.extend(map(float, W_z.flatten()))
spec_layer_params.resetGateWeightMatrix.floatValue.extend(map(float, W_r.flatten()))
spec_layer_params.outputGateWeightMatrix.floatValue.extend(map(float, W_o.flatten()))
spec_layer_params.updateGateRecursionMatrix.floatValue.extend(map(float, R_z.flatten()))
spec_layer_params.resetGateRecursionMatrix.floatValue.extend(map(float, R_r.flatten()))
spec_layer_params.outputGateRecursionMatrix.floatValue.extend(map(float, R_o.flatten()))
if b is not None:
b_z, b_r, b_o = b
spec_layer_params.updateGateBiasVector.floatValue.extend(map(float, b_z.flatten()))
spec_layer_params.resetGateBiasVector.floatValue.extend(map(float, b_r.flatten()))
spec_layer_params.outputGateBiasVector.floatValue.extend(map(float, b_o.flatten())) | python | def add_gru(self, name, W_h, W_x, b, hidden_size, input_size,
input_names, output_names, activation = 'TANH', inner_activation = 'SIGMOID_HARD',
output_all = False, reverse_input = False):
"""
Add a Gated-Recurrent Unit (GRU) layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: [numpy.array]
List of recursion weight matrices. The ordering is [R_z, R_r, R_o],
where R_z, R_r and R_o are weight matrices at update gate, reset gate and output gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x: [numpy.array]
List of input weight matrices. The ordering is [W_z, W_r, W_o],
where W_z, W_r, and W_o are weight matrices at update gate, reset gate and output gate.
The shapes of these matrices are (hidden_size, input_size).
b: [numpy.array] | None
List of biases of the GRU layer. The ordering is [b_z, b_r, b_o],
where b_z, b_r, b_o are biases at update gate, reset gate and output gate.
If None, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
activation: str
Activation function used at the output gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
See add_activation for more detailed description.
inner_activation: str
Inner activation function used at update and reset gates. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'SIGMOID_HARD'.
See add_activation for more detailed description.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input].
output_names: [str]
The output blob name list of this layer, in the order of [y, h_output].
output_all: boolean
Whether the recurrent layer should output at every time step.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
reverse_input: boolean
Whether the recurrent layer should process the input sequence in the reverse order.
- If False, the input sequence order is not reversed.
- If True, the input sequence order is reversed.
See Also
--------
add_activation, add_simple_rnn, add_unilstm, add_bidirlstm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new Layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for name in input_names:
spec_layer.input.append(name)
for name in output_names:
spec_layer.output.append(name)
spec_layer_params = spec_layer.gru
# set the parameters
spec_layer_params.inputVectorSize = input_size
spec_layer_params.outputVectorSize = hidden_size
if b is not None:
spec_layer_params.hasBiasVectors = True
spec_layer_params.sequenceOutput = output_all
spec_layer_params.reverseInput = reverse_input
activation_f = spec_layer_params.activations.add()
activation_g = spec_layer_params.activations.add()
_set_recurrent_activation(activation_f, inner_activation)
_set_recurrent_activation(activation_g, activation)
# Write the weights
R_z, R_r, R_o = W_h
W_z, W_r, W_o = W_x
spec_layer_params.updateGateWeightMatrix.floatValue.extend(map(float, W_z.flatten()))
spec_layer_params.resetGateWeightMatrix.floatValue.extend(map(float, W_r.flatten()))
spec_layer_params.outputGateWeightMatrix.floatValue.extend(map(float, W_o.flatten()))
spec_layer_params.updateGateRecursionMatrix.floatValue.extend(map(float, R_z.flatten()))
spec_layer_params.resetGateRecursionMatrix.floatValue.extend(map(float, R_r.flatten()))
spec_layer_params.outputGateRecursionMatrix.floatValue.extend(map(float, R_o.flatten()))
if b is not None:
b_z, b_r, b_o = b
spec_layer_params.updateGateBiasVector.floatValue.extend(map(float, b_z.flatten()))
spec_layer_params.resetGateBiasVector.floatValue.extend(map(float, b_r.flatten()))
spec_layer_params.outputGateBiasVector.floatValue.extend(map(float, b_o.flatten())) | [
"def",
"add_gru",
"(",
"self",
",",
"name",
",",
"W_h",
",",
"W_x",
",",
"b",
",",
"hidden_size",
",",
"input_size",
",",
"input_names",
",",
"output_names",
",",
"activation",
"=",
"'TANH'",
",",
"inner_activation",
"=",
"'SIGMOID_HARD'",
",",
"output_all",
"=",
"False",
",",
"reverse_input",
"=",
"False",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new Layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"for",
"name",
"in",
"input_names",
":",
"spec_layer",
".",
"input",
".",
"append",
"(",
"name",
")",
"for",
"name",
"in",
"output_names",
":",
"spec_layer",
".",
"output",
".",
"append",
"(",
"name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"gru",
"# set the parameters",
"spec_layer_params",
".",
"inputVectorSize",
"=",
"input_size",
"spec_layer_params",
".",
"outputVectorSize",
"=",
"hidden_size",
"if",
"b",
"is",
"not",
"None",
":",
"spec_layer_params",
".",
"hasBiasVectors",
"=",
"True",
"spec_layer_params",
".",
"sequenceOutput",
"=",
"output_all",
"spec_layer_params",
".",
"reverseInput",
"=",
"reverse_input",
"activation_f",
"=",
"spec_layer_params",
".",
"activations",
".",
"add",
"(",
")",
"activation_g",
"=",
"spec_layer_params",
".",
"activations",
".",
"add",
"(",
")",
"_set_recurrent_activation",
"(",
"activation_f",
",",
"inner_activation",
")",
"_set_recurrent_activation",
"(",
"activation_g",
",",
"activation",
")",
"# Write the weights",
"R_z",
",",
"R_r",
",",
"R_o",
"=",
"W_h",
"W_z",
",",
"W_r",
",",
"W_o",
"=",
"W_x",
"spec_layer_params",
".",
"updateGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_z",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"resetGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_r",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"outputGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_o",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"updateGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_z",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"resetGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_r",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"outputGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_o",
".",
"flatten",
"(",
")",
")",
")",
"if",
"b",
"is",
"not",
"None",
":",
"b_z",
",",
"b_r",
",",
"b_o",
"=",
"b",
"spec_layer_params",
".",
"updateGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_z",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"resetGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_r",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"outputGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_o",
".",
"flatten",
"(",
")",
")",
")"
] | Add a Gated-Recurrent Unit (GRU) layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: [numpy.array]
List of recursion weight matrices. The ordering is [R_z, R_r, R_o],
where R_z, R_r and R_o are weight matrices at update gate, reset gate and output gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x: [numpy.array]
List of input weight matrices. The ordering is [W_z, W_r, W_o],
where W_z, W_r, and W_o are weight matrices at update gate, reset gate and output gate.
The shapes of these matrices are (hidden_size, input_size).
b: [numpy.array] | None
List of biases of the GRU layer. The ordering is [b_z, b_r, b_o],
where b_z, b_r, b_o are biases at update gate, reset gate and output gate.
If None, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
activation: str
Activation function used at the output gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
See add_activation for more detailed description.
inner_activation: str
Inner activation function used at update and reset gates. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'SIGMOID_HARD'.
See add_activation for more detailed description.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input].
output_names: [str]
The output blob name list of this layer, in the order of [y, h_output].
output_all: boolean
Whether the recurrent layer should output at every time step.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
reverse_input: boolean
Whether the recurrent layer should process the input sequence in the reverse order.
- If False, the input sequence order is not reversed.
- If True, the input sequence order is reversed.
See Also
--------
add_activation, add_simple_rnn, add_unilstm, add_bidirlstm | [
"Add",
"a",
"Gated",
"-",
"Recurrent",
"Unit",
"(",
"GRU",
")",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1343-L1440 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_unilstm | def add_unilstm(self, name, W_h, W_x, b, hidden_size, input_size, input_names, output_names,
inner_activation = 'SIGMOID',
cell_state_update_activation = 'TANH',
output_activation = 'TANH',
peep = None,
output_all = False,
forget_bias = False, coupled_input_forget_gate = False,
cell_clip_threshold = 50000.0, reverse_input = False):
"""
Add a Uni-directional LSTM layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: [numpy.array]
List of recursion weight matrices. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x: [numpy.array]
List of input weight matrices. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b: [numpy.array] | None
List of biases. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
If None, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input, c_input].
output_names: [str]
The output blob name list of this layer, in the order of [y, h_output, c_output].
inner_activation: str
Inner activation function used at input and forget gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
cell_state_update_activation: str
Cell state update activation function used at the cell state update gate.
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
output_activation: str
Activation function used at the output gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
peep: [numpy.array] | None
List of peephole vectors. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,).
output_all: boolean
Whether the LSTM layer should output at every time step.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
forget_bias: boolean
If True, a vector of 1s is added to forget gate bias.
coupled_input_forget_gate: boolean
If True, the input gate and forget gate is coupled. i.e. forget gate is not used.
cell_clip_threshold: float
The limit on the maximum and minimum values on the cell state.
If not provided, it is defaulted to 50.0.
reverse_input: boolean
Whether the LSTM layer should process the input sequence in the reverse order.
- If False, the input sequence order is not reversed.
- If True, the input sequence order is reversed.
See Also
--------
add_activation, add_simple_rnn, add_gru, add_bidirlstm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new Layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for name in input_names:
spec_layer.input.append(name)
for name in output_names:
spec_layer.output.append(name)
spec_layer_params = spec_layer.uniDirectionalLSTM
params = spec_layer_params.params
weight_params = spec_layer_params.weightParams
# set the parameters
spec_layer_params.inputVectorSize = input_size
spec_layer_params.outputVectorSize = hidden_size
params.sequenceOutput = output_all
params.forgetBias = False
if b is not None:
params.hasBiasVectors = True
if peep is not None:
params.hasPeepholeVectors = True
params.coupledInputAndForgetGate = coupled_input_forget_gate
params.cellClipThreshold = cell_clip_threshold
params.forgetBias = forget_bias
spec_layer_params.reverseInput = reverse_input
activation_f = spec_layer_params.activations.add()
activation_g = spec_layer_params.activations.add()
activation_h = spec_layer_params.activations.add()
_set_recurrent_activation(activation_f, inner_activation)
_set_recurrent_activation(activation_g, cell_state_update_activation)
_set_recurrent_activation(activation_h, output_activation)
# Write the weights
R_i, R_f, R_o, R_z = W_h
W_i, W_f, W_o, W_z = W_x
weight_params.inputGateWeightMatrix.floatValue.extend(map(float, W_i.flatten()))
weight_params.forgetGateWeightMatrix.floatValue.extend(map(float, W_f.flatten()))
weight_params.outputGateWeightMatrix.floatValue.extend(map(float, W_o.flatten()))
weight_params.blockInputWeightMatrix.floatValue.extend(map(float, W_z.flatten()))
weight_params.inputGateRecursionMatrix.floatValue.extend(map(float, R_i.flatten()))
weight_params.forgetGateRecursionMatrix.floatValue.extend(map(float, R_f.flatten()))
weight_params.outputGateRecursionMatrix.floatValue.extend(map(float, R_o.flatten()))
weight_params.blockInputRecursionMatrix.floatValue.extend(map(float, R_z.flatten()))
if b is not None:
b_i, b_f, b_o, b_z = b
weight_params.inputGateBiasVector.floatValue.extend(map(float, b_i.flatten()))
weight_params.forgetGateBiasVector.floatValue.extend(map(float, b_f.flatten()))
weight_params.outputGateBiasVector.floatValue.extend(map(float, b_o.flatten()))
weight_params.blockInputBiasVector.floatValue.extend(map(float, b_z.flatten()))
if peep is not None:
p_i, p_f, p_o = peep
weight_params.inputGatePeepholeVector.floatValue.extend(map(float, p_i.flatten()))
weight_params.forgetGatePeepholeVector.floatValue.extend(map(float, p_f.flatten()))
weight_params.outputGatePeepholeVector.floatValue.extend(map(float, p_o.flatten())) | python | def add_unilstm(self, name, W_h, W_x, b, hidden_size, input_size, input_names, output_names,
inner_activation = 'SIGMOID',
cell_state_update_activation = 'TANH',
output_activation = 'TANH',
peep = None,
output_all = False,
forget_bias = False, coupled_input_forget_gate = False,
cell_clip_threshold = 50000.0, reverse_input = False):
"""
Add a Uni-directional LSTM layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: [numpy.array]
List of recursion weight matrices. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x: [numpy.array]
List of input weight matrices. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b: [numpy.array] | None
List of biases. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
If None, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input, c_input].
output_names: [str]
The output blob name list of this layer, in the order of [y, h_output, c_output].
inner_activation: str
Inner activation function used at input and forget gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
cell_state_update_activation: str
Cell state update activation function used at the cell state update gate.
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
output_activation: str
Activation function used at the output gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
peep: [numpy.array] | None
List of peephole vectors. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,).
output_all: boolean
Whether the LSTM layer should output at every time step.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
forget_bias: boolean
If True, a vector of 1s is added to forget gate bias.
coupled_input_forget_gate: boolean
If True, the input gate and forget gate is coupled. i.e. forget gate is not used.
cell_clip_threshold: float
The limit on the maximum and minimum values on the cell state.
If not provided, it is defaulted to 50.0.
reverse_input: boolean
Whether the LSTM layer should process the input sequence in the reverse order.
- If False, the input sequence order is not reversed.
- If True, the input sequence order is reversed.
See Also
--------
add_activation, add_simple_rnn, add_gru, add_bidirlstm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new Layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for name in input_names:
spec_layer.input.append(name)
for name in output_names:
spec_layer.output.append(name)
spec_layer_params = spec_layer.uniDirectionalLSTM
params = spec_layer_params.params
weight_params = spec_layer_params.weightParams
# set the parameters
spec_layer_params.inputVectorSize = input_size
spec_layer_params.outputVectorSize = hidden_size
params.sequenceOutput = output_all
params.forgetBias = False
if b is not None:
params.hasBiasVectors = True
if peep is not None:
params.hasPeepholeVectors = True
params.coupledInputAndForgetGate = coupled_input_forget_gate
params.cellClipThreshold = cell_clip_threshold
params.forgetBias = forget_bias
spec_layer_params.reverseInput = reverse_input
activation_f = spec_layer_params.activations.add()
activation_g = spec_layer_params.activations.add()
activation_h = spec_layer_params.activations.add()
_set_recurrent_activation(activation_f, inner_activation)
_set_recurrent_activation(activation_g, cell_state_update_activation)
_set_recurrent_activation(activation_h, output_activation)
# Write the weights
R_i, R_f, R_o, R_z = W_h
W_i, W_f, W_o, W_z = W_x
weight_params.inputGateWeightMatrix.floatValue.extend(map(float, W_i.flatten()))
weight_params.forgetGateWeightMatrix.floatValue.extend(map(float, W_f.flatten()))
weight_params.outputGateWeightMatrix.floatValue.extend(map(float, W_o.flatten()))
weight_params.blockInputWeightMatrix.floatValue.extend(map(float, W_z.flatten()))
weight_params.inputGateRecursionMatrix.floatValue.extend(map(float, R_i.flatten()))
weight_params.forgetGateRecursionMatrix.floatValue.extend(map(float, R_f.flatten()))
weight_params.outputGateRecursionMatrix.floatValue.extend(map(float, R_o.flatten()))
weight_params.blockInputRecursionMatrix.floatValue.extend(map(float, R_z.flatten()))
if b is not None:
b_i, b_f, b_o, b_z = b
weight_params.inputGateBiasVector.floatValue.extend(map(float, b_i.flatten()))
weight_params.forgetGateBiasVector.floatValue.extend(map(float, b_f.flatten()))
weight_params.outputGateBiasVector.floatValue.extend(map(float, b_o.flatten()))
weight_params.blockInputBiasVector.floatValue.extend(map(float, b_z.flatten()))
if peep is not None:
p_i, p_f, p_o = peep
weight_params.inputGatePeepholeVector.floatValue.extend(map(float, p_i.flatten()))
weight_params.forgetGatePeepholeVector.floatValue.extend(map(float, p_f.flatten()))
weight_params.outputGatePeepholeVector.floatValue.extend(map(float, p_o.flatten())) | [
"def",
"add_unilstm",
"(",
"self",
",",
"name",
",",
"W_h",
",",
"W_x",
",",
"b",
",",
"hidden_size",
",",
"input_size",
",",
"input_names",
",",
"output_names",
",",
"inner_activation",
"=",
"'SIGMOID'",
",",
"cell_state_update_activation",
"=",
"'TANH'",
",",
"output_activation",
"=",
"'TANH'",
",",
"peep",
"=",
"None",
",",
"output_all",
"=",
"False",
",",
"forget_bias",
"=",
"False",
",",
"coupled_input_forget_gate",
"=",
"False",
",",
"cell_clip_threshold",
"=",
"50000.0",
",",
"reverse_input",
"=",
"False",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new Layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"for",
"name",
"in",
"input_names",
":",
"spec_layer",
".",
"input",
".",
"append",
"(",
"name",
")",
"for",
"name",
"in",
"output_names",
":",
"spec_layer",
".",
"output",
".",
"append",
"(",
"name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"uniDirectionalLSTM",
"params",
"=",
"spec_layer_params",
".",
"params",
"weight_params",
"=",
"spec_layer_params",
".",
"weightParams",
"# set the parameters",
"spec_layer_params",
".",
"inputVectorSize",
"=",
"input_size",
"spec_layer_params",
".",
"outputVectorSize",
"=",
"hidden_size",
"params",
".",
"sequenceOutput",
"=",
"output_all",
"params",
".",
"forgetBias",
"=",
"False",
"if",
"b",
"is",
"not",
"None",
":",
"params",
".",
"hasBiasVectors",
"=",
"True",
"if",
"peep",
"is",
"not",
"None",
":",
"params",
".",
"hasPeepholeVectors",
"=",
"True",
"params",
".",
"coupledInputAndForgetGate",
"=",
"coupled_input_forget_gate",
"params",
".",
"cellClipThreshold",
"=",
"cell_clip_threshold",
"params",
".",
"forgetBias",
"=",
"forget_bias",
"spec_layer_params",
".",
"reverseInput",
"=",
"reverse_input",
"activation_f",
"=",
"spec_layer_params",
".",
"activations",
".",
"add",
"(",
")",
"activation_g",
"=",
"spec_layer_params",
".",
"activations",
".",
"add",
"(",
")",
"activation_h",
"=",
"spec_layer_params",
".",
"activations",
".",
"add",
"(",
")",
"_set_recurrent_activation",
"(",
"activation_f",
",",
"inner_activation",
")",
"_set_recurrent_activation",
"(",
"activation_g",
",",
"cell_state_update_activation",
")",
"_set_recurrent_activation",
"(",
"activation_h",
",",
"output_activation",
")",
"# Write the weights",
"R_i",
",",
"R_f",
",",
"R_o",
",",
"R_z",
"=",
"W_h",
"W_i",
",",
"W_f",
",",
"W_o",
",",
"W_z",
"=",
"W_x",
"weight_params",
".",
"inputGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"forgetGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"outputGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_o",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"blockInputWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_z",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"inputGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"forgetGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"outputGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_o",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"blockInputRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_z",
".",
"flatten",
"(",
")",
")",
")",
"if",
"b",
"is",
"not",
"None",
":",
"b_i",
",",
"b_f",
",",
"b_o",
",",
"b_z",
"=",
"b",
"weight_params",
".",
"inputGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"forgetGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"outputGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_o",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"blockInputBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_z",
".",
"flatten",
"(",
")",
")",
")",
"if",
"peep",
"is",
"not",
"None",
":",
"p_i",
",",
"p_f",
",",
"p_o",
"=",
"peep",
"weight_params",
".",
"inputGatePeepholeVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"p_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"forgetGatePeepholeVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"p_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"outputGatePeepholeVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"p_o",
".",
"flatten",
"(",
")",
")",
")"
] | Add a Uni-directional LSTM layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: [numpy.array]
List of recursion weight matrices. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x: [numpy.array]
List of input weight matrices. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b: [numpy.array] | None
List of biases. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
If None, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input, c_input].
output_names: [str]
The output blob name list of this layer, in the order of [y, h_output, c_output].
inner_activation: str
Inner activation function used at input and forget gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
cell_state_update_activation: str
Cell state update activation function used at the cell state update gate.
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
output_activation: str
Activation function used at the output gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
peep: [numpy.array] | None
List of peephole vectors. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,).
output_all: boolean
Whether the LSTM layer should output at every time step.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
forget_bias: boolean
If True, a vector of 1s is added to forget gate bias.
coupled_input_forget_gate: boolean
If True, the input gate and forget gate is coupled. i.e. forget gate is not used.
cell_clip_threshold: float
The limit on the maximum and minimum values on the cell state.
If not provided, it is defaulted to 50.0.
reverse_input: boolean
Whether the LSTM layer should process the input sequence in the reverse order.
- If False, the input sequence order is not reversed.
- If True, the input sequence order is reversed.
See Also
--------
add_activation, add_simple_rnn, add_gru, add_bidirlstm | [
"Add",
"a",
"Uni",
"-",
"directional",
"LSTM",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1442-L1574 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_bidirlstm | def add_bidirlstm(self, name, W_h, W_x, b, W_h_back, W_x_back, b_back, hidden_size, input_size,
input_names, output_names,
inner_activation = 'SIGMOID',
cell_state_update_activation = 'TANH',
output_activation = 'TANH',
peep = None, peep_back = None,
output_all = False,
forget_bias = False, coupled_input_forget_gate= False, cell_clip_threshold = 50000.0):
"""
Add a Bi-directional LSTM layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: [numpy.array]
List of recursion weight matrices for the forward layer. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x: [numpy.array]
List of input weight matrices for the forward layer. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b: [numpy.array]
List of biases for the forward layer. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
If None, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ).
W_h_back: [numpy.array]
List of recursion weight matrices for the backward layer. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x_back: [numpy.array]
List of input weight matrices for the backward layer. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b_back: [numpy.array]
List of biases for the backward layer. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
The shapes of the biases (hidden_size).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input, c_input, h_reverse_input, c_reverse_input].
output_names: [str]
The output blob name list of this layer, in the order of [y, h_output, c_output, h_reverse_output, c_reverse_output].
inner_activation: str
Inner activation function used at input and forget gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'SIGMOID'.
cell_state_update_activation: str
Cell state update activation function used at the cell state update gate.
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
output_activation: str
Activation function used at the output gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
peep: [numpy.array] | None
List of peephole vectors for the forward layer. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,). Defaults to None.
peep_back: [numpy.array] | None
List of peephole vectors for the backward layer. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,). Defaults to None.
output_all: boolean
Whether the LSTM layer should output at every time step. Defaults to False.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
forget_bias: boolean
If True, a vector of 1s is added to forget gate bias. Defaults to False.
coupled_input_forget_gate : boolean
If True, the input gate and forget gate is coupled. i.e. forget gate is not used.
Defaults to False.
cell_clip_threshold : float
The limit on the maximum and minimum values on the cell state.
Defaults to 50.0.
See Also
--------
add_activation, add_simple_rnn, add_unilstm, add_bidirlstm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new Layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for name in input_names:
spec_layer.input.append(name)
for name in output_names:
spec_layer.output.append(name)
spec_layer_params = spec_layer.biDirectionalLSTM
params = spec_layer_params.params
weight_params = spec_layer_params.weightParams.add()
weight_params_back = spec_layer_params.weightParams.add()
# set the parameters
spec_layer_params.inputVectorSize = input_size
spec_layer_params.outputVectorSize = hidden_size
if b is not None:
params.hasBiasVectors = True
params.sequenceOutput = output_all
params.forgetBias = forget_bias
if peep is not None:
params.hasPeepholeVectors = True
params.coupledInputAndForgetGate = coupled_input_forget_gate
params.cellClipThreshold = cell_clip_threshold
#set activations
activation_f = spec_layer_params.activationsForwardLSTM.add()
activation_g = spec_layer_params.activationsForwardLSTM.add()
activation_h = spec_layer_params.activationsForwardLSTM.add()
_set_recurrent_activation(activation_f, inner_activation)
_set_recurrent_activation(activation_g, cell_state_update_activation)
_set_recurrent_activation(activation_h, output_activation)
activation_f_back = spec_layer_params.activationsBackwardLSTM.add()
activation_g_back = spec_layer_params.activationsBackwardLSTM.add()
activation_h_back = spec_layer_params.activationsBackwardLSTM.add()
_set_recurrent_activation(activation_f_back, inner_activation)
_set_recurrent_activation(activation_g_back, cell_state_update_activation)
_set_recurrent_activation(activation_h_back, output_activation)
# Write the forward lstm weights
R_i, R_f, R_o, R_z = W_h
W_i, W_f, W_o, W_z = W_x
weight_params.inputGateWeightMatrix.floatValue.extend(map(float, W_i.flatten()))
weight_params.forgetGateWeightMatrix.floatValue.extend(map(float, W_f.flatten()))
weight_params.outputGateWeightMatrix.floatValue.extend(map(float, W_o.flatten()))
weight_params.blockInputWeightMatrix.floatValue.extend(map(float, W_z.flatten()))
weight_params.inputGateRecursionMatrix.floatValue.extend(map(float, R_i.flatten()))
weight_params.forgetGateRecursionMatrix.floatValue.extend(map(float, R_f.flatten()))
weight_params.outputGateRecursionMatrix.floatValue.extend(map(float, R_o.flatten()))
weight_params.blockInputRecursionMatrix.floatValue.extend(map(float, R_z.flatten()))
if b is not None:
b_i, b_f, b_o, b_z = b
weight_params.inputGateBiasVector.floatValue.extend(map(float, b_i.flatten()))
weight_params.forgetGateBiasVector.floatValue.extend(map(float, b_f.flatten()))
weight_params.outputGateBiasVector.floatValue.extend(map(float, b_o.flatten()))
weight_params.blockInputBiasVector.floatValue.extend(map(float, b_z.flatten()))
if peep is not None:
p_i, p_f, p_o = peep
weight_params.inputGatePeepholeVector.floatValue.extend(map(float, p_i.flatten()))
weight_params.forgetGatePeepholeVector.floatValue.extend(map(float, p_f.flatten()))
weight_params.outputGatePeepholeVector.floatValue.extend(map(float, p_o.flatten()))
# Write the backward lstm weights
R_i, R_f, R_o, R_z = W_h_back
W_i, W_f, W_o, W_z = W_x_back
weight_params_back.inputGateWeightMatrix.floatValue.extend(map(float, W_i.flatten()))
weight_params_back.forgetGateWeightMatrix.floatValue.extend(map(float, W_f.flatten()))
weight_params_back.outputGateWeightMatrix.floatValue.extend(map(float, W_o.flatten()))
weight_params_back.blockInputWeightMatrix.floatValue.extend(map(float, W_z.flatten()))
weight_params_back.inputGateRecursionMatrix.floatValue.extend(map(float, R_i.flatten()))
weight_params_back.forgetGateRecursionMatrix.floatValue.extend(map(float, R_f.flatten()))
weight_params_back.outputGateRecursionMatrix.floatValue.extend(map(float, R_o.flatten()))
weight_params_back.blockInputRecursionMatrix.floatValue.extend(map(float, R_z.flatten()))
if b_back is not None:
b_i, b_f, b_o, b_z = b_back
weight_params_back.inputGateBiasVector.floatValue.extend(map(float, b_i.flatten()))
weight_params_back.forgetGateBiasVector.floatValue.extend(map(float, b_f.flatten()))
weight_params_back.outputGateBiasVector.floatValue.extend(map(float, b_o.flatten()))
weight_params_back.blockInputBiasVector.floatValue.extend(map(float, b_z.flatten()))
if peep_back is not None:
p_i, p_f, p_o = peep_back
weight_params_back.inputGatePeepholeVector.floatValue.extend(map(float, p_i.flatten()))
weight_params_back.forgetGatePeepholeVector.floatValue.extend(map(float, p_f.flatten()))
weight_params_back.outputGatePeepholeVector.floatValue.extend(map(float, p_o.flatten())) | python | def add_bidirlstm(self, name, W_h, W_x, b, W_h_back, W_x_back, b_back, hidden_size, input_size,
input_names, output_names,
inner_activation = 'SIGMOID',
cell_state_update_activation = 'TANH',
output_activation = 'TANH',
peep = None, peep_back = None,
output_all = False,
forget_bias = False, coupled_input_forget_gate= False, cell_clip_threshold = 50000.0):
"""
Add a Bi-directional LSTM layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: [numpy.array]
List of recursion weight matrices for the forward layer. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x: [numpy.array]
List of input weight matrices for the forward layer. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b: [numpy.array]
List of biases for the forward layer. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
If None, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ).
W_h_back: [numpy.array]
List of recursion weight matrices for the backward layer. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x_back: [numpy.array]
List of input weight matrices for the backward layer. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b_back: [numpy.array]
List of biases for the backward layer. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
The shapes of the biases (hidden_size).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input, c_input, h_reverse_input, c_reverse_input].
output_names: [str]
The output blob name list of this layer, in the order of [y, h_output, c_output, h_reverse_output, c_reverse_output].
inner_activation: str
Inner activation function used at input and forget gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'SIGMOID'.
cell_state_update_activation: str
Cell state update activation function used at the cell state update gate.
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
output_activation: str
Activation function used at the output gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
peep: [numpy.array] | None
List of peephole vectors for the forward layer. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,). Defaults to None.
peep_back: [numpy.array] | None
List of peephole vectors for the backward layer. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,). Defaults to None.
output_all: boolean
Whether the LSTM layer should output at every time step. Defaults to False.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
forget_bias: boolean
If True, a vector of 1s is added to forget gate bias. Defaults to False.
coupled_input_forget_gate : boolean
If True, the input gate and forget gate is coupled. i.e. forget gate is not used.
Defaults to False.
cell_clip_threshold : float
The limit on the maximum and minimum values on the cell state.
Defaults to 50.0.
See Also
--------
add_activation, add_simple_rnn, add_unilstm, add_bidirlstm
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new Layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for name in input_names:
spec_layer.input.append(name)
for name in output_names:
spec_layer.output.append(name)
spec_layer_params = spec_layer.biDirectionalLSTM
params = spec_layer_params.params
weight_params = spec_layer_params.weightParams.add()
weight_params_back = spec_layer_params.weightParams.add()
# set the parameters
spec_layer_params.inputVectorSize = input_size
spec_layer_params.outputVectorSize = hidden_size
if b is not None:
params.hasBiasVectors = True
params.sequenceOutput = output_all
params.forgetBias = forget_bias
if peep is not None:
params.hasPeepholeVectors = True
params.coupledInputAndForgetGate = coupled_input_forget_gate
params.cellClipThreshold = cell_clip_threshold
#set activations
activation_f = spec_layer_params.activationsForwardLSTM.add()
activation_g = spec_layer_params.activationsForwardLSTM.add()
activation_h = spec_layer_params.activationsForwardLSTM.add()
_set_recurrent_activation(activation_f, inner_activation)
_set_recurrent_activation(activation_g, cell_state_update_activation)
_set_recurrent_activation(activation_h, output_activation)
activation_f_back = spec_layer_params.activationsBackwardLSTM.add()
activation_g_back = spec_layer_params.activationsBackwardLSTM.add()
activation_h_back = spec_layer_params.activationsBackwardLSTM.add()
_set_recurrent_activation(activation_f_back, inner_activation)
_set_recurrent_activation(activation_g_back, cell_state_update_activation)
_set_recurrent_activation(activation_h_back, output_activation)
# Write the forward lstm weights
R_i, R_f, R_o, R_z = W_h
W_i, W_f, W_o, W_z = W_x
weight_params.inputGateWeightMatrix.floatValue.extend(map(float, W_i.flatten()))
weight_params.forgetGateWeightMatrix.floatValue.extend(map(float, W_f.flatten()))
weight_params.outputGateWeightMatrix.floatValue.extend(map(float, W_o.flatten()))
weight_params.blockInputWeightMatrix.floatValue.extend(map(float, W_z.flatten()))
weight_params.inputGateRecursionMatrix.floatValue.extend(map(float, R_i.flatten()))
weight_params.forgetGateRecursionMatrix.floatValue.extend(map(float, R_f.flatten()))
weight_params.outputGateRecursionMatrix.floatValue.extend(map(float, R_o.flatten()))
weight_params.blockInputRecursionMatrix.floatValue.extend(map(float, R_z.flatten()))
if b is not None:
b_i, b_f, b_o, b_z = b
weight_params.inputGateBiasVector.floatValue.extend(map(float, b_i.flatten()))
weight_params.forgetGateBiasVector.floatValue.extend(map(float, b_f.flatten()))
weight_params.outputGateBiasVector.floatValue.extend(map(float, b_o.flatten()))
weight_params.blockInputBiasVector.floatValue.extend(map(float, b_z.flatten()))
if peep is not None:
p_i, p_f, p_o = peep
weight_params.inputGatePeepholeVector.floatValue.extend(map(float, p_i.flatten()))
weight_params.forgetGatePeepholeVector.floatValue.extend(map(float, p_f.flatten()))
weight_params.outputGatePeepholeVector.floatValue.extend(map(float, p_o.flatten()))
# Write the backward lstm weights
R_i, R_f, R_o, R_z = W_h_back
W_i, W_f, W_o, W_z = W_x_back
weight_params_back.inputGateWeightMatrix.floatValue.extend(map(float, W_i.flatten()))
weight_params_back.forgetGateWeightMatrix.floatValue.extend(map(float, W_f.flatten()))
weight_params_back.outputGateWeightMatrix.floatValue.extend(map(float, W_o.flatten()))
weight_params_back.blockInputWeightMatrix.floatValue.extend(map(float, W_z.flatten()))
weight_params_back.inputGateRecursionMatrix.floatValue.extend(map(float, R_i.flatten()))
weight_params_back.forgetGateRecursionMatrix.floatValue.extend(map(float, R_f.flatten()))
weight_params_back.outputGateRecursionMatrix.floatValue.extend(map(float, R_o.flatten()))
weight_params_back.blockInputRecursionMatrix.floatValue.extend(map(float, R_z.flatten()))
if b_back is not None:
b_i, b_f, b_o, b_z = b_back
weight_params_back.inputGateBiasVector.floatValue.extend(map(float, b_i.flatten()))
weight_params_back.forgetGateBiasVector.floatValue.extend(map(float, b_f.flatten()))
weight_params_back.outputGateBiasVector.floatValue.extend(map(float, b_o.flatten()))
weight_params_back.blockInputBiasVector.floatValue.extend(map(float, b_z.flatten()))
if peep_back is not None:
p_i, p_f, p_o = peep_back
weight_params_back.inputGatePeepholeVector.floatValue.extend(map(float, p_i.flatten()))
weight_params_back.forgetGatePeepholeVector.floatValue.extend(map(float, p_f.flatten()))
weight_params_back.outputGatePeepholeVector.floatValue.extend(map(float, p_o.flatten())) | [
"def",
"add_bidirlstm",
"(",
"self",
",",
"name",
",",
"W_h",
",",
"W_x",
",",
"b",
",",
"W_h_back",
",",
"W_x_back",
",",
"b_back",
",",
"hidden_size",
",",
"input_size",
",",
"input_names",
",",
"output_names",
",",
"inner_activation",
"=",
"'SIGMOID'",
",",
"cell_state_update_activation",
"=",
"'TANH'",
",",
"output_activation",
"=",
"'TANH'",
",",
"peep",
"=",
"None",
",",
"peep_back",
"=",
"None",
",",
"output_all",
"=",
"False",
",",
"forget_bias",
"=",
"False",
",",
"coupled_input_forget_gate",
"=",
"False",
",",
"cell_clip_threshold",
"=",
"50000.0",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new Layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"for",
"name",
"in",
"input_names",
":",
"spec_layer",
".",
"input",
".",
"append",
"(",
"name",
")",
"for",
"name",
"in",
"output_names",
":",
"spec_layer",
".",
"output",
".",
"append",
"(",
"name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"biDirectionalLSTM",
"params",
"=",
"spec_layer_params",
".",
"params",
"weight_params",
"=",
"spec_layer_params",
".",
"weightParams",
".",
"add",
"(",
")",
"weight_params_back",
"=",
"spec_layer_params",
".",
"weightParams",
".",
"add",
"(",
")",
"# set the parameters",
"spec_layer_params",
".",
"inputVectorSize",
"=",
"input_size",
"spec_layer_params",
".",
"outputVectorSize",
"=",
"hidden_size",
"if",
"b",
"is",
"not",
"None",
":",
"params",
".",
"hasBiasVectors",
"=",
"True",
"params",
".",
"sequenceOutput",
"=",
"output_all",
"params",
".",
"forgetBias",
"=",
"forget_bias",
"if",
"peep",
"is",
"not",
"None",
":",
"params",
".",
"hasPeepholeVectors",
"=",
"True",
"params",
".",
"coupledInputAndForgetGate",
"=",
"coupled_input_forget_gate",
"params",
".",
"cellClipThreshold",
"=",
"cell_clip_threshold",
"#set activations",
"activation_f",
"=",
"spec_layer_params",
".",
"activationsForwardLSTM",
".",
"add",
"(",
")",
"activation_g",
"=",
"spec_layer_params",
".",
"activationsForwardLSTM",
".",
"add",
"(",
")",
"activation_h",
"=",
"spec_layer_params",
".",
"activationsForwardLSTM",
".",
"add",
"(",
")",
"_set_recurrent_activation",
"(",
"activation_f",
",",
"inner_activation",
")",
"_set_recurrent_activation",
"(",
"activation_g",
",",
"cell_state_update_activation",
")",
"_set_recurrent_activation",
"(",
"activation_h",
",",
"output_activation",
")",
"activation_f_back",
"=",
"spec_layer_params",
".",
"activationsBackwardLSTM",
".",
"add",
"(",
")",
"activation_g_back",
"=",
"spec_layer_params",
".",
"activationsBackwardLSTM",
".",
"add",
"(",
")",
"activation_h_back",
"=",
"spec_layer_params",
".",
"activationsBackwardLSTM",
".",
"add",
"(",
")",
"_set_recurrent_activation",
"(",
"activation_f_back",
",",
"inner_activation",
")",
"_set_recurrent_activation",
"(",
"activation_g_back",
",",
"cell_state_update_activation",
")",
"_set_recurrent_activation",
"(",
"activation_h_back",
",",
"output_activation",
")",
"# Write the forward lstm weights",
"R_i",
",",
"R_f",
",",
"R_o",
",",
"R_z",
"=",
"W_h",
"W_i",
",",
"W_f",
",",
"W_o",
",",
"W_z",
"=",
"W_x",
"weight_params",
".",
"inputGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"forgetGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"outputGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_o",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"blockInputWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_z",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"inputGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"forgetGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"outputGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_o",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"blockInputRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_z",
".",
"flatten",
"(",
")",
")",
")",
"if",
"b",
"is",
"not",
"None",
":",
"b_i",
",",
"b_f",
",",
"b_o",
",",
"b_z",
"=",
"b",
"weight_params",
".",
"inputGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"forgetGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"outputGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_o",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"blockInputBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_z",
".",
"flatten",
"(",
")",
")",
")",
"if",
"peep",
"is",
"not",
"None",
":",
"p_i",
",",
"p_f",
",",
"p_o",
"=",
"peep",
"weight_params",
".",
"inputGatePeepholeVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"p_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"forgetGatePeepholeVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"p_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params",
".",
"outputGatePeepholeVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"p_o",
".",
"flatten",
"(",
")",
")",
")",
"# Write the backward lstm weights",
"R_i",
",",
"R_f",
",",
"R_o",
",",
"R_z",
"=",
"W_h_back",
"W_i",
",",
"W_f",
",",
"W_o",
",",
"W_z",
"=",
"W_x_back",
"weight_params_back",
".",
"inputGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"forgetGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"outputGateWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_o",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"blockInputWeightMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"W_z",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"inputGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"forgetGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"outputGateRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_o",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"blockInputRecursionMatrix",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"R_z",
".",
"flatten",
"(",
")",
")",
")",
"if",
"b_back",
"is",
"not",
"None",
":",
"b_i",
",",
"b_f",
",",
"b_o",
",",
"b_z",
"=",
"b_back",
"weight_params_back",
".",
"inputGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"forgetGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"outputGateBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_o",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"blockInputBiasVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"b_z",
".",
"flatten",
"(",
")",
")",
")",
"if",
"peep_back",
"is",
"not",
"None",
":",
"p_i",
",",
"p_f",
",",
"p_o",
"=",
"peep_back",
"weight_params_back",
".",
"inputGatePeepholeVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"p_i",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"forgetGatePeepholeVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"p_f",
".",
"flatten",
"(",
")",
")",
")",
"weight_params_back",
".",
"outputGatePeepholeVector",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"p_o",
".",
"flatten",
"(",
")",
")",
")"
] | Add a Bi-directional LSTM layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: [numpy.array]
List of recursion weight matrices for the forward layer. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x: [numpy.array]
List of input weight matrices for the forward layer. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b: [numpy.array]
List of biases for the forward layer. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
If None, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ).
W_h_back: [numpy.array]
List of recursion weight matrices for the backward layer. The ordering is [R_i, R_f, R_o, R_z],
where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, hidden_size).
W_x_back: [numpy.array]
List of input weight matrices for the backward layer. The ordering is [W_i, W_f, W_o, W_z],
where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate.
The shapes of these matrices are (hidden_size, input_size).
b_back: [numpy.array]
List of biases for the backward layer. The ordering is [b_i, b_f, b_o, b_z],
where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate.
The shapes of the biases (hidden_size).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input, c_input, h_reverse_input, c_reverse_input].
output_names: [str]
The output blob name list of this layer, in the order of [y, h_output, c_output, h_reverse_output, c_reverse_output].
inner_activation: str
Inner activation function used at input and forget gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'SIGMOID'.
cell_state_update_activation: str
Cell state update activation function used at the cell state update gate.
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
output_activation: str
Activation function used at the output gate. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
Defaults to 'TANH'.
peep: [numpy.array] | None
List of peephole vectors for the forward layer. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,). Defaults to None.
peep_back: [numpy.array] | None
List of peephole vectors for the backward layer. The ordering is [p_i, p_f, p_o],
where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate.
The shapes of the peephole vectors are (hidden_size,). Defaults to None.
output_all: boolean
Whether the LSTM layer should output at every time step. Defaults to False.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
forget_bias: boolean
If True, a vector of 1s is added to forget gate bias. Defaults to False.
coupled_input_forget_gate : boolean
If True, the input gate and forget gate is coupled. i.e. forget gate is not used.
Defaults to False.
cell_clip_threshold : float
The limit on the maximum and minimum values on the cell state.
Defaults to 50.0.
See Also
--------
add_activation, add_simple_rnn, add_unilstm, add_bidirlstm | [
"Add",
"a",
"Bi",
"-",
"directional",
"LSTM",
"layer",
"to",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1576-L1759 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_flatten | def add_flatten(self, name, mode, input_name, output_name):
"""
Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is.
Parameters
----------
name: str
The name of this layer.
mode: int
- If mode == 0, the flatten layer is in CHANNEL_FIRST mode.
- If mode == 1, the flatten layer is in CHANNEL_LAST mode.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_permute, add_reshape
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.flatten
# Set the parameters
if mode == 0:
spec_layer_params.mode = \
_NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_FIRST')
elif mode == 1:
spec_layer_params.mode = \
_NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_LAST')
else:
raise NotImplementedError(
'Unknown flatten mode %d ' % mode) | python | def add_flatten(self, name, mode, input_name, output_name):
"""
Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is.
Parameters
----------
name: str
The name of this layer.
mode: int
- If mode == 0, the flatten layer is in CHANNEL_FIRST mode.
- If mode == 1, the flatten layer is in CHANNEL_LAST mode.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_permute, add_reshape
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.flatten
# Set the parameters
if mode == 0:
spec_layer_params.mode = \
_NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_FIRST')
elif mode == 1:
spec_layer_params.mode = \
_NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_LAST')
else:
raise NotImplementedError(
'Unknown flatten mode %d ' % mode) | [
"def",
"add_flatten",
"(",
"self",
",",
"name",
",",
"mode",
",",
"input_name",
",",
"output_name",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"flatten",
"# Set the parameters",
"if",
"mode",
"==",
"0",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"FlattenLayerParams",
".",
"FlattenOrder",
".",
"Value",
"(",
"'CHANNEL_FIRST'",
")",
"elif",
"mode",
"==",
"1",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"FlattenLayerParams",
".",
"FlattenOrder",
".",
"Value",
"(",
"'CHANNEL_LAST'",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unknown flatten mode %d '",
"%",
"mode",
")"
] | Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is.
Parameters
----------
name: str
The name of this layer.
mode: int
- If mode == 0, the flatten layer is in CHANNEL_FIRST mode.
- If mode == 1, the flatten layer is in CHANNEL_LAST mode.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_permute, add_reshape | [
"Add",
"a",
"flatten",
"layer",
".",
"Only",
"flattens",
"the",
"channel",
"height",
"and",
"width",
"axis",
".",
"Leaves",
"the",
"sequence",
"axis",
"as",
"is",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1761-L1802 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_slice | def add_slice(self, name, input_name, output_name, axis, start_index = 0, end_index = -1, stride = 1):
"""
Add a slice layer. Equivalent to to numpy slice [start_index:end_index:stride],
start_index is included, while end_index is exclusive.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
axis: str
axis along which input is sliced.
allowed values: 'channel', 'height', 'width'
start_index: int
must be non-negative.
end_index: int
negative indexing is supported.
stride: int
must be positive.
See Also
--------
add_permute, add_reshape
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.slice
# Set the parameters
if start_index < 0:
raise ValueError("Invalid start_index value %d. Must be non-negative." % start_index)
if stride < 1:
raise ValueError("Invalid stride value %d. Must be positive." % stride)
spec_layer_params.startIndex = start_index
spec_layer_params.endIndex = end_index
spec_layer_params.stride = stride
if axis == 'channel':
spec_layer_params.axis = \
_NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('CHANNEL_AXIS')
elif axis == 'height':
spec_layer_params.axis = \
_NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('HEIGHT_AXIS')
elif axis == 'width':
spec_layer_params.axis = \
_NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('WIDTH_AXIS')
else:
raise NotImplementedError(
'Unsupported Slice axis %s ' % axis) | python | def add_slice(self, name, input_name, output_name, axis, start_index = 0, end_index = -1, stride = 1):
"""
Add a slice layer. Equivalent to to numpy slice [start_index:end_index:stride],
start_index is included, while end_index is exclusive.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
axis: str
axis along which input is sliced.
allowed values: 'channel', 'height', 'width'
start_index: int
must be non-negative.
end_index: int
negative indexing is supported.
stride: int
must be positive.
See Also
--------
add_permute, add_reshape
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.slice
# Set the parameters
if start_index < 0:
raise ValueError("Invalid start_index value %d. Must be non-negative." % start_index)
if stride < 1:
raise ValueError("Invalid stride value %d. Must be positive." % stride)
spec_layer_params.startIndex = start_index
spec_layer_params.endIndex = end_index
spec_layer_params.stride = stride
if axis == 'channel':
spec_layer_params.axis = \
_NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('CHANNEL_AXIS')
elif axis == 'height':
spec_layer_params.axis = \
_NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('HEIGHT_AXIS')
elif axis == 'width':
spec_layer_params.axis = \
_NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value('WIDTH_AXIS')
else:
raise NotImplementedError(
'Unsupported Slice axis %s ' % axis) | [
"def",
"add_slice",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"axis",
",",
"start_index",
"=",
"0",
",",
"end_index",
"=",
"-",
"1",
",",
"stride",
"=",
"1",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"slice",
"# Set the parameters",
"if",
"start_index",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid start_index value %d. Must be non-negative.\"",
"%",
"start_index",
")",
"if",
"stride",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Invalid stride value %d. Must be positive.\"",
"%",
"stride",
")",
"spec_layer_params",
".",
"startIndex",
"=",
"start_index",
"spec_layer_params",
".",
"endIndex",
"=",
"end_index",
"spec_layer_params",
".",
"stride",
"=",
"stride",
"if",
"axis",
"==",
"'channel'",
":",
"spec_layer_params",
".",
"axis",
"=",
"_NeuralNetwork_pb2",
".",
"SliceLayerParams",
".",
"SliceAxis",
".",
"Value",
"(",
"'CHANNEL_AXIS'",
")",
"elif",
"axis",
"==",
"'height'",
":",
"spec_layer_params",
".",
"axis",
"=",
"_NeuralNetwork_pb2",
".",
"SliceLayerParams",
".",
"SliceAxis",
".",
"Value",
"(",
"'HEIGHT_AXIS'",
")",
"elif",
"axis",
"==",
"'width'",
":",
"spec_layer_params",
".",
"axis",
"=",
"_NeuralNetwork_pb2",
".",
"SliceLayerParams",
".",
"SliceAxis",
".",
"Value",
"(",
"'WIDTH_AXIS'",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unsupported Slice axis %s '",
"%",
"axis",
")"
] | Add a slice layer. Equivalent to to numpy slice [start_index:end_index:stride],
start_index is included, while end_index is exclusive.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
axis: str
axis along which input is sliced.
allowed values: 'channel', 'height', 'width'
start_index: int
must be non-negative.
end_index: int
negative indexing is supported.
stride: int
must be positive.
See Also
--------
add_permute, add_reshape | [
"Add",
"a",
"slice",
"layer",
".",
"Equivalent",
"to",
"to",
"numpy",
"slice",
"[",
"start_index",
":",
"end_index",
":",
"stride",
"]",
"start_index",
"is",
"included",
"while",
"end_index",
"is",
"exclusive",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1804-L1864 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_reorganize_data | def add_reorganize_data(self, name, input_name, output_name, mode = 'SPACE_TO_DEPTH', block_size = 2):
"""
Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE".
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
- If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension.
Input is spatially divided into non-overlapping blocks of size block_size X block_size
and data from each block is moved to the channel dimension.
Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size].
- If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension.
Reverse of the operation 'SPACE_TO_DEPTH'.
Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size].
block_size: int
Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size)
must divide C when mode is 'DEPTH_TO_SPACE'.
See Also
--------
add_flatten, add_reshape
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.reorganizeData
# Set the parameters
if block_size < 2:
raise ValueError("Invalid block_size value %d. Must be greater than 1." % block_size)
spec_layer_params.blockSize = block_size
if mode == 'SPACE_TO_DEPTH':
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('SPACE_TO_DEPTH')
elif mode == 'DEPTH_TO_SPACE':
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('DEPTH_TO_SPACE')
else:
raise NotImplementedError(
'Unknown reorganization mode %s ' % mode) | python | def add_reorganize_data(self, name, input_name, output_name, mode = 'SPACE_TO_DEPTH', block_size = 2):
"""
Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE".
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
- If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension.
Input is spatially divided into non-overlapping blocks of size block_size X block_size
and data from each block is moved to the channel dimension.
Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size].
- If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension.
Reverse of the operation 'SPACE_TO_DEPTH'.
Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size].
block_size: int
Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size)
must divide C when mode is 'DEPTH_TO_SPACE'.
See Also
--------
add_flatten, add_reshape
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.reorganizeData
# Set the parameters
if block_size < 2:
raise ValueError("Invalid block_size value %d. Must be greater than 1." % block_size)
spec_layer_params.blockSize = block_size
if mode == 'SPACE_TO_DEPTH':
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('SPACE_TO_DEPTH')
elif mode == 'DEPTH_TO_SPACE':
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('DEPTH_TO_SPACE')
else:
raise NotImplementedError(
'Unknown reorganization mode %s ' % mode) | [
"def",
"add_reorganize_data",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"mode",
"=",
"'SPACE_TO_DEPTH'",
",",
"block_size",
"=",
"2",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"reorganizeData",
"# Set the parameters",
"if",
"block_size",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Invalid block_size value %d. Must be greater than 1.\"",
"%",
"block_size",
")",
"spec_layer_params",
".",
"blockSize",
"=",
"block_size",
"if",
"mode",
"==",
"'SPACE_TO_DEPTH'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReorganizeDataLayerParams",
".",
"ReorganizationType",
".",
"Value",
"(",
"'SPACE_TO_DEPTH'",
")",
"elif",
"mode",
"==",
"'DEPTH_TO_SPACE'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReorganizeDataLayerParams",
".",
"ReorganizationType",
".",
"Value",
"(",
"'DEPTH_TO_SPACE'",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unknown reorganization mode %s '",
"%",
"mode",
")"
] | Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE".
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
- If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension.
Input is spatially divided into non-overlapping blocks of size block_size X block_size
and data from each block is moved to the channel dimension.
Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size].
- If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension.
Reverse of the operation 'SPACE_TO_DEPTH'.
Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size].
block_size: int
Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size)
must divide C when mode is 'DEPTH_TO_SPACE'.
See Also
--------
add_flatten, add_reshape | [
"Add",
"a",
"data",
"reorganization",
"layer",
"of",
"type",
"SPACE_TO_DEPTH",
"or",
"DEPTH_TO_SPACE",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1867-L1922 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_batchnorm | def add_batchnorm(self, name, channels, gamma, beta,
mean = None, variance = None,
input_name = 'data', output_name = 'out',
compute_mean_var = False,
instance_normalization = False, epsilon = 1e-5):
"""
Add a Batch Normalization layer. Batch Normalization operation is
defined as:
`y = gamma * (x - mean) / sqrt(variance + epsilon) + beta`
Parameters
----------
name: str
The name of this layer.
channels: int
Number of channels of the input blob.
gamma: numpy.array
Values of gamma. Must be numpy array of shape (channels, ).
beta: numpy.array
Values of beta. Must be numpy array of shape (channels, ).
mean: numpy.array
Means of the input blob on each channel. Must be numpy array of shape (channels, ).
variance:
Variances of the input blob on each channel. Must be numpy array of shape (channels, ).
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
compute_mean_var: bool
Set to True if mean and variance is to be computed from the input data.
instance_normalization: bool
Set compute_mean_var and this to True to perform
instance normalization i.e., mean and variance are computed from the single input instance.
epsilon: float
Value of epsilon. Defaults to 1e-5 if not specified.
See Also
--------
add_convolution, add_pooling, add_inner_product
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.batchnorm
# Set the parameters
spec_layer_params.channels = channels
spec_layer_params.gamma.floatValue.extend(map(float, gamma.flatten()))
spec_layer_params.beta.floatValue.extend(map(float, beta.flatten()))
spec_layer_params.epsilon = epsilon
spec_layer_params.computeMeanVar = compute_mean_var
spec_layer_params.instanceNormalization = instance_normalization
if compute_mean_var:
if not instance_normalization:
raise NotImplementedError('Batch-instance norm is currently not supported')
if not compute_mean_var:
spec_layer_params.mean.floatValue.extend(map(float, mean.flatten()))
spec_layer_params.variance.floatValue.extend(map(float, variance.flatten())) | python | def add_batchnorm(self, name, channels, gamma, beta,
mean = None, variance = None,
input_name = 'data', output_name = 'out',
compute_mean_var = False,
instance_normalization = False, epsilon = 1e-5):
"""
Add a Batch Normalization layer. Batch Normalization operation is
defined as:
`y = gamma * (x - mean) / sqrt(variance + epsilon) + beta`
Parameters
----------
name: str
The name of this layer.
channels: int
Number of channels of the input blob.
gamma: numpy.array
Values of gamma. Must be numpy array of shape (channels, ).
beta: numpy.array
Values of beta. Must be numpy array of shape (channels, ).
mean: numpy.array
Means of the input blob on each channel. Must be numpy array of shape (channels, ).
variance:
Variances of the input blob on each channel. Must be numpy array of shape (channels, ).
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
compute_mean_var: bool
Set to True if mean and variance is to be computed from the input data.
instance_normalization: bool
Set compute_mean_var and this to True to perform
instance normalization i.e., mean and variance are computed from the single input instance.
epsilon: float
Value of epsilon. Defaults to 1e-5 if not specified.
See Also
--------
add_convolution, add_pooling, add_inner_product
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.batchnorm
# Set the parameters
spec_layer_params.channels = channels
spec_layer_params.gamma.floatValue.extend(map(float, gamma.flatten()))
spec_layer_params.beta.floatValue.extend(map(float, beta.flatten()))
spec_layer_params.epsilon = epsilon
spec_layer_params.computeMeanVar = compute_mean_var
spec_layer_params.instanceNormalization = instance_normalization
if compute_mean_var:
if not instance_normalization:
raise NotImplementedError('Batch-instance norm is currently not supported')
if not compute_mean_var:
spec_layer_params.mean.floatValue.extend(map(float, mean.flatten()))
spec_layer_params.variance.floatValue.extend(map(float, variance.flatten())) | [
"def",
"add_batchnorm",
"(",
"self",
",",
"name",
",",
"channels",
",",
"gamma",
",",
"beta",
",",
"mean",
"=",
"None",
",",
"variance",
"=",
"None",
",",
"input_name",
"=",
"'data'",
",",
"output_name",
"=",
"'out'",
",",
"compute_mean_var",
"=",
"False",
",",
"instance_normalization",
"=",
"False",
",",
"epsilon",
"=",
"1e-5",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"batchnorm",
"# Set the parameters",
"spec_layer_params",
".",
"channels",
"=",
"channels",
"spec_layer_params",
".",
"gamma",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"gamma",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"beta",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"beta",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"epsilon",
"=",
"epsilon",
"spec_layer_params",
".",
"computeMeanVar",
"=",
"compute_mean_var",
"spec_layer_params",
".",
"instanceNormalization",
"=",
"instance_normalization",
"if",
"compute_mean_var",
":",
"if",
"not",
"instance_normalization",
":",
"raise",
"NotImplementedError",
"(",
"'Batch-instance norm is currently not supported'",
")",
"if",
"not",
"compute_mean_var",
":",
"spec_layer_params",
".",
"mean",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"mean",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"variance",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"variance",
".",
"flatten",
"(",
")",
")",
")"
] | Add a Batch Normalization layer. Batch Normalization operation is
defined as:
`y = gamma * (x - mean) / sqrt(variance + epsilon) + beta`
Parameters
----------
name: str
The name of this layer.
channels: int
Number of channels of the input blob.
gamma: numpy.array
Values of gamma. Must be numpy array of shape (channels, ).
beta: numpy.array
Values of beta. Must be numpy array of shape (channels, ).
mean: numpy.array
Means of the input blob on each channel. Must be numpy array of shape (channels, ).
variance:
Variances of the input blob on each channel. Must be numpy array of shape (channels, ).
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
compute_mean_var: bool
Set to True if mean and variance is to be computed from the input data.
instance_normalization: bool
Set compute_mean_var and this to True to perform
instance normalization i.e., mean and variance are computed from the single input instance.
epsilon: float
Value of epsilon. Defaults to 1e-5 if not specified.
See Also
--------
add_convolution, add_pooling, add_inner_product | [
"Add",
"a",
"Batch",
"Normalization",
"layer",
".",
"Batch",
"Normalization",
"operation",
"is",
"defined",
"as",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1924-L1991 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_permute | def add_permute(self, name, dim, input_name, output_name):
"""
Add a permute layer. Assumes that the input has dimensions in the order [Seq, C, H, W]
Parameters
----------
name: str
The name of this layer.
dim: tuple
The order in which to permute the input dimensions = [seq,C,H,W].
Must have length 4 and a permutation of ``[0, 1, 2, 3]``.
examples:
Lets say input has shape: [seq, C, H, W].
If ``dim`` is set to ``[0, 3, 1, 2]``,
then the output has shape ``[W,C,H]``
and has the same sequence length that of the input.
If ``dim`` is set to ``[3, 1, 2, 0]``,
and the input is a sequence of data
with length ``Seq`` and shape ``[C, 1, 1]``,
then the output is a unit sequence of data with shape ``[C, 1, Seq]``.
If ``dim`` is set to ``[0, 3, 2, 1]``,
the output is a reverse of the input: ``[C, H, W] -> [W, H, C]``.
If ``dim`` is not set, or is set to ``[0, 1, 2, 3]``,
the output is the same as the input.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_flatten, add_reshape
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.permute
spec_layer_params.axis.extend(list(dim))
if len(dim) != 4:
raise ValueError("Length of the 'dim' parameter must be equal to 4") | python | def add_permute(self, name, dim, input_name, output_name):
"""
Add a permute layer. Assumes that the input has dimensions in the order [Seq, C, H, W]
Parameters
----------
name: str
The name of this layer.
dim: tuple
The order in which to permute the input dimensions = [seq,C,H,W].
Must have length 4 and a permutation of ``[0, 1, 2, 3]``.
examples:
Lets say input has shape: [seq, C, H, W].
If ``dim`` is set to ``[0, 3, 1, 2]``,
then the output has shape ``[W,C,H]``
and has the same sequence length that of the input.
If ``dim`` is set to ``[3, 1, 2, 0]``,
and the input is a sequence of data
with length ``Seq`` and shape ``[C, 1, 1]``,
then the output is a unit sequence of data with shape ``[C, 1, Seq]``.
If ``dim`` is set to ``[0, 3, 2, 1]``,
the output is a reverse of the input: ``[C, H, W] -> [W, H, C]``.
If ``dim`` is not set, or is set to ``[0, 1, 2, 3]``,
the output is the same as the input.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_flatten, add_reshape
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.permute
spec_layer_params.axis.extend(list(dim))
if len(dim) != 4:
raise ValueError("Length of the 'dim' parameter must be equal to 4") | [
"def",
"add_permute",
"(",
"self",
",",
"name",
",",
"dim",
",",
"input_name",
",",
"output_name",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"permute",
"spec_layer_params",
".",
"axis",
".",
"extend",
"(",
"list",
"(",
"dim",
")",
")",
"if",
"len",
"(",
"dim",
")",
"!=",
"4",
":",
"raise",
"ValueError",
"(",
"\"Length of the 'dim' parameter must be equal to 4\"",
")"
] | Add a permute layer. Assumes that the input has dimensions in the order [Seq, C, H, W]
Parameters
----------
name: str
The name of this layer.
dim: tuple
The order in which to permute the input dimensions = [seq,C,H,W].
Must have length 4 and a permutation of ``[0, 1, 2, 3]``.
examples:
Lets say input has shape: [seq, C, H, W].
If ``dim`` is set to ``[0, 3, 1, 2]``,
then the output has shape ``[W,C,H]``
and has the same sequence length that of the input.
If ``dim`` is set to ``[3, 1, 2, 0]``,
and the input is a sequence of data
with length ``Seq`` and shape ``[C, 1, 1]``,
then the output is a unit sequence of data with shape ``[C, 1, Seq]``.
If ``dim`` is set to ``[0, 3, 2, 1]``,
the output is a reverse of the input: ``[C, H, W] -> [W, H, C]``.
If ``dim`` is not set, or is set to ``[0, 1, 2, 3]``,
the output is the same as the input.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_flatten, add_reshape | [
"Add",
"a",
"permute",
"layer",
".",
"Assumes",
"that",
"the",
"input",
"has",
"dimensions",
"in",
"the",
"order",
"[",
"Seq",
"C",
"H",
"W",
"]"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L1994-L2046 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_reshape | def add_reshape(self, name, input_name, output_name, target_shape, mode):
"""
Add a reshape layer. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
target_shape: tuple
Shape of the output blob. The product of target_shape must be equal
to the shape of the input blob.
Can be either length 3 (C,H,W) or length 4 (Seq,C,H,W).
mode: int
- If mode == 0, the reshape layer is in CHANNEL_FIRST mode.
- If mode == 1, the reshape layer is in CHANNEL_LAST mode.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_flatten, add_permute
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.reshape
spec_layer_params.targetShape.extend(target_shape)
if mode == 0:
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_FIRST')
else:
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_LAST')
if len(target_shape) != 4 and len(target_shape) != 3:
raise ValueError("Length of the 'target-shape' parameter must be equal to 3 or 4") | python | def add_reshape(self, name, input_name, output_name, target_shape, mode):
"""
Add a reshape layer. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
target_shape: tuple
Shape of the output blob. The product of target_shape must be equal
to the shape of the input blob.
Can be either length 3 (C,H,W) or length 4 (Seq,C,H,W).
mode: int
- If mode == 0, the reshape layer is in CHANNEL_FIRST mode.
- If mode == 1, the reshape layer is in CHANNEL_LAST mode.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_flatten, add_permute
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.reshape
spec_layer_params.targetShape.extend(target_shape)
if mode == 0:
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_FIRST')
else:
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_LAST')
if len(target_shape) != 4 and len(target_shape) != 3:
raise ValueError("Length of the 'target-shape' parameter must be equal to 3 or 4") | [
"def",
"add_reshape",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"target_shape",
",",
"mode",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"reshape",
"spec_layer_params",
".",
"targetShape",
".",
"extend",
"(",
"target_shape",
")",
"if",
"mode",
"==",
"0",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReshapeLayerParams",
".",
"ReshapeOrder",
".",
"Value",
"(",
"'CHANNEL_FIRST'",
")",
"else",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReshapeLayerParams",
".",
"ReshapeOrder",
".",
"Value",
"(",
"'CHANNEL_LAST'",
")",
"if",
"len",
"(",
"target_shape",
")",
"!=",
"4",
"and",
"len",
"(",
"target_shape",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"\"Length of the 'target-shape' parameter must be equal to 3 or 4\"",
")"
] | Add a reshape layer. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
target_shape: tuple
Shape of the output blob. The product of target_shape must be equal
to the shape of the input blob.
Can be either length 3 (C,H,W) or length 4 (Seq,C,H,W).
mode: int
- If mode == 0, the reshape layer is in CHANNEL_FIRST mode.
- If mode == 1, the reshape layer is in CHANNEL_LAST mode.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_flatten, add_permute | [
"Add",
"a",
"reshape",
"layer",
".",
"Kindly",
"refer",
"to",
"NeuralNetwork",
".",
"proto",
"for",
"details",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2048-L2094 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_reduce | def add_reduce(self, name, input_name, output_name, axis, mode, epsilon = 1e-6):
"""
Add a reduce layer. Applies the function specified by the parameter mode,
along dimension(s) specified by the parameter axis.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
axis: str
dimensions along which the reduction operation is applied.
Allowed values: 'CHW', 'HW', 'C', 'H', 'W'
mode: str
Reduction operation to be applied.
Allowed values:
'sum', 'avg', 'prod', 'logsum', 'sumsquare', 'L1', 'L2', 'max', 'min', 'argmax'.
'argmax' is only suuported with axis values 'C', 'H' and 'W'.
epsilon: float
number that is added to the input when 'logsum' function is applied.
See Also
--------
add_activation
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.reduce
spec_layer_params.epsilon = epsilon
if mode == 'sum':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('SUM')
elif mode == 'avg':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('AVG')
elif mode == 'prod':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('PROD')
elif mode == 'logsum':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('LOGSUM')
elif mode == 'sumsquare':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('SUMSQUARE')
elif mode == 'L1':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('L1')
elif mode == 'L2':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('L2')
elif mode == 'max':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('MAX')
elif mode == 'min':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('MIN')
elif mode == 'argmax':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('ARGMAX')
else:
raise NotImplementedError('Unknown reduction operation %s ' % mode)
if axis == 'CHW':
spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value('CHW')
elif axis == 'HW':
spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value('HW')
elif axis == 'C':
spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value('C')
elif axis == 'H':
spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value('H')
elif axis == 'W':
spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value('W')
else:
raise NotImplementedError('Unknown reduction axis %s ' % axis) | python | def add_reduce(self, name, input_name, output_name, axis, mode, epsilon = 1e-6):
"""
Add a reduce layer. Applies the function specified by the parameter mode,
along dimension(s) specified by the parameter axis.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
axis: str
dimensions along which the reduction operation is applied.
Allowed values: 'CHW', 'HW', 'C', 'H', 'W'
mode: str
Reduction operation to be applied.
Allowed values:
'sum', 'avg', 'prod', 'logsum', 'sumsquare', 'L1', 'L2', 'max', 'min', 'argmax'.
'argmax' is only suuported with axis values 'C', 'H' and 'W'.
epsilon: float
number that is added to the input when 'logsum' function is applied.
See Also
--------
add_activation
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.reduce
spec_layer_params.epsilon = epsilon
if mode == 'sum':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('SUM')
elif mode == 'avg':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('AVG')
elif mode == 'prod':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('PROD')
elif mode == 'logsum':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('LOGSUM')
elif mode == 'sumsquare':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('SUMSQUARE')
elif mode == 'L1':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('L1')
elif mode == 'L2':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('L2')
elif mode == 'max':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('MAX')
elif mode == 'min':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('MIN')
elif mode == 'argmax':
spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value('ARGMAX')
else:
raise NotImplementedError('Unknown reduction operation %s ' % mode)
if axis == 'CHW':
spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value('CHW')
elif axis == 'HW':
spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value('HW')
elif axis == 'C':
spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value('C')
elif axis == 'H':
spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value('H')
elif axis == 'W':
spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value('W')
else:
raise NotImplementedError('Unknown reduction axis %s ' % axis) | [
"def",
"add_reduce",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"axis",
",",
"mode",
",",
"epsilon",
"=",
"1e-6",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"reduce",
"spec_layer_params",
".",
"epsilon",
"=",
"epsilon",
"if",
"mode",
"==",
"'sum'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceOperation",
".",
"Value",
"(",
"'SUM'",
")",
"elif",
"mode",
"==",
"'avg'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceOperation",
".",
"Value",
"(",
"'AVG'",
")",
"elif",
"mode",
"==",
"'prod'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceOperation",
".",
"Value",
"(",
"'PROD'",
")",
"elif",
"mode",
"==",
"'logsum'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceOperation",
".",
"Value",
"(",
"'LOGSUM'",
")",
"elif",
"mode",
"==",
"'sumsquare'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceOperation",
".",
"Value",
"(",
"'SUMSQUARE'",
")",
"elif",
"mode",
"==",
"'L1'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceOperation",
".",
"Value",
"(",
"'L1'",
")",
"elif",
"mode",
"==",
"'L2'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceOperation",
".",
"Value",
"(",
"'L2'",
")",
"elif",
"mode",
"==",
"'max'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceOperation",
".",
"Value",
"(",
"'MAX'",
")",
"elif",
"mode",
"==",
"'min'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceOperation",
".",
"Value",
"(",
"'MIN'",
")",
"elif",
"mode",
"==",
"'argmax'",
":",
"spec_layer_params",
".",
"mode",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceOperation",
".",
"Value",
"(",
"'ARGMAX'",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unknown reduction operation %s '",
"%",
"mode",
")",
"if",
"axis",
"==",
"'CHW'",
":",
"spec_layer_params",
".",
"axis",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceAxis",
".",
"Value",
"(",
"'CHW'",
")",
"elif",
"axis",
"==",
"'HW'",
":",
"spec_layer_params",
".",
"axis",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceAxis",
".",
"Value",
"(",
"'HW'",
")",
"elif",
"axis",
"==",
"'C'",
":",
"spec_layer_params",
".",
"axis",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceAxis",
".",
"Value",
"(",
"'C'",
")",
"elif",
"axis",
"==",
"'H'",
":",
"spec_layer_params",
".",
"axis",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceAxis",
".",
"Value",
"(",
"'H'",
")",
"elif",
"axis",
"==",
"'W'",
":",
"spec_layer_params",
".",
"axis",
"=",
"_NeuralNetwork_pb2",
".",
"ReduceLayerParams",
".",
"ReduceAxis",
".",
"Value",
"(",
"'W'",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unknown reduction axis %s '",
"%",
"axis",
")"
] | Add a reduce layer. Applies the function specified by the parameter mode,
along dimension(s) specified by the parameter axis.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
axis: str
dimensions along which the reduction operation is applied.
Allowed values: 'CHW', 'HW', 'C', 'H', 'W'
mode: str
Reduction operation to be applied.
Allowed values:
'sum', 'avg', 'prod', 'logsum', 'sumsquare', 'L1', 'L2', 'max', 'min', 'argmax'.
'argmax' is only suuported with axis values 'C', 'H' and 'W'.
epsilon: float
number that is added to the input when 'logsum' function is applied.
See Also
--------
add_activation | [
"Add",
"a",
"reduce",
"layer",
".",
"Applies",
"the",
"function",
"specified",
"by",
"the",
"parameter",
"mode",
"along",
"dimension",
"(",
"s",
")",
"specified",
"by",
"the",
"parameter",
"axis",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2096-L2175 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_lrn | def add_lrn(self, name, input_name, output_name, alpha, beta, local_size, k = 1.0):
"""
Add a LRN (local response normalization) layer. Please see the LRNLayerParams message in Core ML neural network
protobuf for more information about the operation of this layer. Supports "across" channels normalization.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
alpha: float
multiplicative constant in the denominator.
beta: float
exponent of the normalizing term in the denominator.
k: float
bias term in the denominator. Must be positive.
local_size: int
size of the neighborhood along the channel axis.
See Also
--------
add_l2_normalize, add_mvn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.lrn
spec_layer_params.alpha = alpha
spec_layer_params.beta = beta
spec_layer_params.localSize = local_size
spec_layer_params.k = k | python | def add_lrn(self, name, input_name, output_name, alpha, beta, local_size, k = 1.0):
"""
Add a LRN (local response normalization) layer. Please see the LRNLayerParams message in Core ML neural network
protobuf for more information about the operation of this layer. Supports "across" channels normalization.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
alpha: float
multiplicative constant in the denominator.
beta: float
exponent of the normalizing term in the denominator.
k: float
bias term in the denominator. Must be positive.
local_size: int
size of the neighborhood along the channel axis.
See Also
--------
add_l2_normalize, add_mvn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.lrn
spec_layer_params.alpha = alpha
spec_layer_params.beta = beta
spec_layer_params.localSize = local_size
spec_layer_params.k = k | [
"def",
"add_lrn",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"alpha",
",",
"beta",
",",
"local_size",
",",
"k",
"=",
"1.0",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"lrn",
"spec_layer_params",
".",
"alpha",
"=",
"alpha",
"spec_layer_params",
".",
"beta",
"=",
"beta",
"spec_layer_params",
".",
"localSize",
"=",
"local_size",
"spec_layer_params",
".",
"k",
"=",
"k"
] | Add a LRN (local response normalization) layer. Please see the LRNLayerParams message in Core ML neural network
protobuf for more information about the operation of this layer. Supports "across" channels normalization.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
alpha: float
multiplicative constant in the denominator.
beta: float
exponent of the normalizing term in the denominator.
k: float
bias term in the denominator. Must be positive.
local_size: int
size of the neighborhood along the channel axis.
See Also
--------
add_l2_normalize, add_mvn | [
"Add",
"a",
"LRN",
"(",
"local",
"response",
"normalization",
")",
"layer",
".",
"Please",
"see",
"the",
"LRNLayerParams",
"message",
"in",
"Core",
"ML",
"neural",
"network",
"protobuf",
"for",
"more",
"information",
"about",
"the",
"operation",
"of",
"this",
"layer",
".",
"Supports",
"across",
"channels",
"normalization",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2178-L2224 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_mvn | def add_mvn(self, name, input_name, output_name, across_channels = True, normalize_variance = True, epsilon = 1e-5):
"""
Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
across_channels: boolean
If False, each channel plane is normalized separately
If True, mean/variance is computed across all C, H and W dimensions
normalize_variance: boolean
If False, only mean subtraction is performed.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_l2_normalize, add_lrn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.mvn
spec_layer_params.acrossChannels = across_channels
spec_layer_params.normalizeVariance = normalize_variance
spec_layer_params.epsilon = epsilon | python | def add_mvn(self, name, input_name, output_name, across_channels = True, normalize_variance = True, epsilon = 1e-5):
"""
Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
across_channels: boolean
If False, each channel plane is normalized separately
If True, mean/variance is computed across all C, H and W dimensions
normalize_variance: boolean
If False, only mean subtraction is performed.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_l2_normalize, add_lrn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.mvn
spec_layer_params.acrossChannels = across_channels
spec_layer_params.normalizeVariance = normalize_variance
spec_layer_params.epsilon = epsilon | [
"def",
"add_mvn",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"across_channels",
"=",
"True",
",",
"normalize_variance",
"=",
"True",
",",
"epsilon",
"=",
"1e-5",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"mvn",
"spec_layer_params",
".",
"acrossChannels",
"=",
"across_channels",
"spec_layer_params",
".",
"normalizeVariance",
"=",
"normalize_variance",
"spec_layer_params",
".",
"epsilon",
"=",
"epsilon"
] | Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
across_channels: boolean
If False, each channel plane is normalized separately
If True, mean/variance is computed across all C, H and W dimensions
normalize_variance: boolean
If False, only mean subtraction is performed.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_l2_normalize, add_lrn | [
"Add",
"an",
"MVN",
"(",
"mean",
"variance",
"normalization",
")",
"layer",
".",
"Computes",
"mean",
"variance",
"and",
"normalizes",
"the",
"input",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2226-L2268 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_l2_normalize | def add_l2_normalize(self, name, input_name, output_name, epsilon = 1e-5):
"""
Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the
the square root of the sum of squares of all elements of the input along C, H and W dimensions.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_mvn, add_lrn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.l2normalize
spec_layer_params.epsilon = epsilon | python | def add_l2_normalize(self, name, input_name, output_name, epsilon = 1e-5):
"""
Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the
the square root of the sum of squares of all elements of the input along C, H and W dimensions.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_mvn, add_lrn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.l2normalize
spec_layer_params.epsilon = epsilon | [
"def",
"add_l2_normalize",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"epsilon",
"=",
"1e-5",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"l2normalize",
"spec_layer_params",
".",
"epsilon",
"=",
"epsilon"
] | Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the
the square root of the sum of squares of all elements of the input along C, H and W dimensions.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_mvn, add_lrn | [
"Add",
"L2",
"normalize",
"layer",
".",
"Normalizes",
"the",
"input",
"by",
"the",
"L2",
"norm",
"i",
".",
"e",
".",
"divides",
"by",
"the",
"the",
"square",
"root",
"of",
"the",
"sum",
"of",
"squares",
"of",
"all",
"elements",
"of",
"the",
"input",
"along",
"C",
"H",
"and",
"W",
"dimensions",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2271-L2305 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_unary | def add_unary(self, name, input_name, output_name, mode, alpha = 1.0,
shift = 0, scale = 1.0, epsilon = 1e-6):
"""
Add a Unary layer. Applies the specified function (mode) to all the elements of the input.
Please see the UnaryFunctionLayerParams message in Core ML neural network
protobuf for more information about the operation of this layer.
Prior to the application of the function the input can be scaled and shifted by using the 'scale',
'shift' parameters.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
Unary function.
Allowed values: 'sqrt', 'rsqrt', 'inverse', 'power', 'exp', 'log', 'abs', threshold'.
alpha: float
constant used in with modes 'power' and 'threshold'.
shift, scale: float
input is modified by scale and shift prior to the application of the unary function.
epsilon: float
small bias to prevent division by zero.
See Also
--------
add_activation
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.unary
spec_layer_params.epsilon = epsilon
spec_layer_params.alpha = alpha
spec_layer_params.shift = shift
spec_layer_params.scale = scale
if mode == 'sqrt':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('SQRT')
elif mode == 'rsqrt':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('RSQRT')
elif mode == 'inverse':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('INVERSE')
elif mode == 'power':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('POWER')
elif mode == 'exp':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('EXP')
elif mode == 'log':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('LOG')
elif mode == 'abs':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('ABS')
elif mode == 'threshold':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('THRESHOLD')
else:
raise NotImplementedError('Unknown unary function %s ' % mode) | python | def add_unary(self, name, input_name, output_name, mode, alpha = 1.0,
shift = 0, scale = 1.0, epsilon = 1e-6):
"""
Add a Unary layer. Applies the specified function (mode) to all the elements of the input.
Please see the UnaryFunctionLayerParams message in Core ML neural network
protobuf for more information about the operation of this layer.
Prior to the application of the function the input can be scaled and shifted by using the 'scale',
'shift' parameters.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
Unary function.
Allowed values: 'sqrt', 'rsqrt', 'inverse', 'power', 'exp', 'log', 'abs', threshold'.
alpha: float
constant used in with modes 'power' and 'threshold'.
shift, scale: float
input is modified by scale and shift prior to the application of the unary function.
epsilon: float
small bias to prevent division by zero.
See Also
--------
add_activation
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.unary
spec_layer_params.epsilon = epsilon
spec_layer_params.alpha = alpha
spec_layer_params.shift = shift
spec_layer_params.scale = scale
if mode == 'sqrt':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('SQRT')
elif mode == 'rsqrt':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('RSQRT')
elif mode == 'inverse':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('INVERSE')
elif mode == 'power':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('POWER')
elif mode == 'exp':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('EXP')
elif mode == 'log':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('LOG')
elif mode == 'abs':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('ABS')
elif mode == 'threshold':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('THRESHOLD')
else:
raise NotImplementedError('Unknown unary function %s ' % mode) | [
"def",
"add_unary",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"mode",
",",
"alpha",
"=",
"1.0",
",",
"shift",
"=",
"0",
",",
"scale",
"=",
"1.0",
",",
"epsilon",
"=",
"1e-6",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"unary",
"spec_layer_params",
".",
"epsilon",
"=",
"epsilon",
"spec_layer_params",
".",
"alpha",
"=",
"alpha",
"spec_layer_params",
".",
"shift",
"=",
"shift",
"spec_layer_params",
".",
"scale",
"=",
"scale",
"if",
"mode",
"==",
"'sqrt'",
":",
"spec_layer_params",
".",
"type",
"=",
"_NeuralNetwork_pb2",
".",
"UnaryFunctionLayerParams",
".",
"Operation",
".",
"Value",
"(",
"'SQRT'",
")",
"elif",
"mode",
"==",
"'rsqrt'",
":",
"spec_layer_params",
".",
"type",
"=",
"_NeuralNetwork_pb2",
".",
"UnaryFunctionLayerParams",
".",
"Operation",
".",
"Value",
"(",
"'RSQRT'",
")",
"elif",
"mode",
"==",
"'inverse'",
":",
"spec_layer_params",
".",
"type",
"=",
"_NeuralNetwork_pb2",
".",
"UnaryFunctionLayerParams",
".",
"Operation",
".",
"Value",
"(",
"'INVERSE'",
")",
"elif",
"mode",
"==",
"'power'",
":",
"spec_layer_params",
".",
"type",
"=",
"_NeuralNetwork_pb2",
".",
"UnaryFunctionLayerParams",
".",
"Operation",
".",
"Value",
"(",
"'POWER'",
")",
"elif",
"mode",
"==",
"'exp'",
":",
"spec_layer_params",
".",
"type",
"=",
"_NeuralNetwork_pb2",
".",
"UnaryFunctionLayerParams",
".",
"Operation",
".",
"Value",
"(",
"'EXP'",
")",
"elif",
"mode",
"==",
"'log'",
":",
"spec_layer_params",
".",
"type",
"=",
"_NeuralNetwork_pb2",
".",
"UnaryFunctionLayerParams",
".",
"Operation",
".",
"Value",
"(",
"'LOG'",
")",
"elif",
"mode",
"==",
"'abs'",
":",
"spec_layer_params",
".",
"type",
"=",
"_NeuralNetwork_pb2",
".",
"UnaryFunctionLayerParams",
".",
"Operation",
".",
"Value",
"(",
"'ABS'",
")",
"elif",
"mode",
"==",
"'threshold'",
":",
"spec_layer_params",
".",
"type",
"=",
"_NeuralNetwork_pb2",
".",
"UnaryFunctionLayerParams",
".",
"Operation",
".",
"Value",
"(",
"'THRESHOLD'",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unknown unary function %s '",
"%",
"mode",
")"
] | Add a Unary layer. Applies the specified function (mode) to all the elements of the input.
Please see the UnaryFunctionLayerParams message in Core ML neural network
protobuf for more information about the operation of this layer.
Prior to the application of the function the input can be scaled and shifted by using the 'scale',
'shift' parameters.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
Unary function.
Allowed values: 'sqrt', 'rsqrt', 'inverse', 'power', 'exp', 'log', 'abs', threshold'.
alpha: float
constant used in with modes 'power' and 'threshold'.
shift, scale: float
input is modified by scale and shift prior to the application of the unary function.
epsilon: float
small bias to prevent division by zero.
See Also
--------
add_activation | [
"Add",
"a",
"Unary",
"layer",
".",
"Applies",
"the",
"specified",
"function",
"(",
"mode",
")",
"to",
"all",
"the",
"elements",
"of",
"the",
"input",
".",
"Please",
"see",
"the",
"UnaryFunctionLayerParams",
"message",
"in",
"Core",
"ML",
"neural",
"network",
"protobuf",
"for",
"more",
"information",
"about",
"the",
"operation",
"of",
"this",
"layer",
".",
"Prior",
"to",
"the",
"application",
"of",
"the",
"function",
"the",
"input",
"can",
"be",
"scaled",
"and",
"shifted",
"by",
"using",
"the",
"scale",
"shift",
"parameters",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2308-L2377 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_split | def add_split(self, name, input_name, output_names):
"""
Add a Split layer that uniformly splits the input along the channel dimension
to produce multiple outputs.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_names: [str]
List of output blob names of this layer.
See Also
--------
add_elementwise
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.extend(output_names)
spec_layer_params = spec_layer.split
spec_layer_params.nOutputs = len(output_names) | python | def add_split(self, name, input_name, output_names):
"""
Add a Split layer that uniformly splits the input along the channel dimension
to produce multiple outputs.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_names: [str]
List of output blob names of this layer.
See Also
--------
add_elementwise
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.extend(output_names)
spec_layer_params = spec_layer.split
spec_layer_params.nOutputs = len(output_names) | [
"def",
"add_split",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_names",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"extend",
"(",
"output_names",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"split",
"spec_layer_params",
".",
"nOutputs",
"=",
"len",
"(",
"output_names",
")"
] | Add a Split layer that uniformly splits the input along the channel dimension
to produce multiple outputs.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_names: [str]
List of output blob names of this layer.
See Also
--------
add_elementwise | [
"Add",
"a",
"Split",
"layer",
"that",
"uniformly",
"splits",
"the",
"input",
"along",
"the",
"channel",
"dimension",
"to",
"produce",
"multiple",
"outputs",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2379-L2409 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_load_constant | def add_load_constant(self, name, output_name, constant_value, shape):
"""
Add a load constant layer.
Parameters
----------
name: str
The name of this layer.
output_name: str
The output blob name of this layer.
constant_value: numpy.array
value of the constant as a numpy array.
shape: [int]
List of ints representing the shape of the constant. Must be of length 3: [C,H,W]
See Also
--------
add_elementwise
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.loadConstant
data = spec_layer_params.data
data.floatValue.extend(map(float, constant_value.flatten()))
spec_layer_params.shape.extend(shape)
if len(data.floatValue) != np.prod(shape):
raise ValueError("Dimensions of 'shape' do not match the size of the provided constant")
if len(shape) != 3:
raise ValueError("'shape' must be of length 3") | python | def add_load_constant(self, name, output_name, constant_value, shape):
"""
Add a load constant layer.
Parameters
----------
name: str
The name of this layer.
output_name: str
The output blob name of this layer.
constant_value: numpy.array
value of the constant as a numpy array.
shape: [int]
List of ints representing the shape of the constant. Must be of length 3: [C,H,W]
See Also
--------
add_elementwise
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.loadConstant
data = spec_layer_params.data
data.floatValue.extend(map(float, constant_value.flatten()))
spec_layer_params.shape.extend(shape)
if len(data.floatValue) != np.prod(shape):
raise ValueError("Dimensions of 'shape' do not match the size of the provided constant")
if len(shape) != 3:
raise ValueError("'shape' must be of length 3") | [
"def",
"add_load_constant",
"(",
"self",
",",
"name",
",",
"output_name",
",",
"constant_value",
",",
"shape",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"loadConstant",
"data",
"=",
"spec_layer_params",
".",
"data",
"data",
".",
"floatValue",
".",
"extend",
"(",
"map",
"(",
"float",
",",
"constant_value",
".",
"flatten",
"(",
")",
")",
")",
"spec_layer_params",
".",
"shape",
".",
"extend",
"(",
"shape",
")",
"if",
"len",
"(",
"data",
".",
"floatValue",
")",
"!=",
"np",
".",
"prod",
"(",
"shape",
")",
":",
"raise",
"ValueError",
"(",
"\"Dimensions of 'shape' do not match the size of the provided constant\"",
")",
"if",
"len",
"(",
"shape",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"\"'shape' must be of length 3\"",
")"
] | Add a load constant layer.
Parameters
----------
name: str
The name of this layer.
output_name: str
The output blob name of this layer.
constant_value: numpy.array
value of the constant as a numpy array.
shape: [int]
List of ints representing the shape of the constant. Must be of length 3: [C,H,W]
See Also
--------
add_elementwise | [
"Add",
"a",
"load",
"constant",
"layer",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2411-L2452 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.add_custom | def add_custom(self, name, input_names, output_names, custom_proto_spec = None):
"""
Add a custom layer.
Parameters
----------
name: str
The name of this layer.
input_names: [str]
The input blob names to this layer.
output_names: [str]
The output blob names from this layer.
custom_proto_spec: CustomLayerParams
A protobuf CustomLayerParams message. This can also be left blank and filled in later.
"""
spec = self.spec
nn_spec = self.nn_spec
# custom layers require a newer specification version
from coremltools import _MINIMUM_CUSTOM_LAYER_SPEC_VERSION
spec.specificationVersion = max(spec.specificationVersion, _MINIMUM_CUSTOM_LAYER_SPEC_VERSION)
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for inname in input_names:
spec_layer.input.append(inname)
for outname in output_names:
spec_layer.output.append(outname)
# Have to do it this way since I can't just assign custom in a layer
spec_layer.custom.MergeFromString(b'')
if custom_proto_spec:
spec_layer.custom.CopyFrom(custom_proto_spec) | python | def add_custom(self, name, input_names, output_names, custom_proto_spec = None):
"""
Add a custom layer.
Parameters
----------
name: str
The name of this layer.
input_names: [str]
The input blob names to this layer.
output_names: [str]
The output blob names from this layer.
custom_proto_spec: CustomLayerParams
A protobuf CustomLayerParams message. This can also be left blank and filled in later.
"""
spec = self.spec
nn_spec = self.nn_spec
# custom layers require a newer specification version
from coremltools import _MINIMUM_CUSTOM_LAYER_SPEC_VERSION
spec.specificationVersion = max(spec.specificationVersion, _MINIMUM_CUSTOM_LAYER_SPEC_VERSION)
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for inname in input_names:
spec_layer.input.append(inname)
for outname in output_names:
spec_layer.output.append(outname)
# Have to do it this way since I can't just assign custom in a layer
spec_layer.custom.MergeFromString(b'')
if custom_proto_spec:
spec_layer.custom.CopyFrom(custom_proto_spec) | [
"def",
"add_custom",
"(",
"self",
",",
"name",
",",
"input_names",
",",
"output_names",
",",
"custom_proto_spec",
"=",
"None",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# custom layers require a newer specification version",
"from",
"coremltools",
"import",
"_MINIMUM_CUSTOM_LAYER_SPEC_VERSION",
"spec",
".",
"specificationVersion",
"=",
"max",
"(",
"spec",
".",
"specificationVersion",
",",
"_MINIMUM_CUSTOM_LAYER_SPEC_VERSION",
")",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"for",
"inname",
"in",
"input_names",
":",
"spec_layer",
".",
"input",
".",
"append",
"(",
"inname",
")",
"for",
"outname",
"in",
"output_names",
":",
"spec_layer",
".",
"output",
".",
"append",
"(",
"outname",
")",
"# Have to do it this way since I can't just assign custom in a layer",
"spec_layer",
".",
"custom",
".",
"MergeFromString",
"(",
"b''",
")",
"if",
"custom_proto_spec",
":",
"spec_layer",
".",
"custom",
".",
"CopyFrom",
"(",
"custom_proto_spec",
")"
] | Add a custom layer.
Parameters
----------
name: str
The name of this layer.
input_names: [str]
The input blob names to this layer.
output_names: [str]
The output blob names from this layer.
custom_proto_spec: CustomLayerParams
A protobuf CustomLayerParams message. This can also be left blank and filled in later. | [
"Add",
"a",
"custom",
"layer",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2455-L2491 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py | NeuralNetworkBuilder.set_pre_processing_parameters | def set_pre_processing_parameters(self, image_input_names = [], is_bgr = False,
red_bias = 0.0, green_bias = 0.0, blue_bias = 0.0, gray_bias = 0.0, image_scale = 1.0):
"""Add pre-processing parameters to the neural network object
Parameters
----------
image_input_names: [str]
Name of input blobs that are images
is_bgr: boolean | dict()
Channel order for input blobs that are images. BGR if True else RGB.
To specify a different value for each image input,
provide a dictionary with input names as keys.
red_bias: float | dict()
Image re-centering parameter (red channel)
blue_bias: float | dict()
Image re-centering parameter (blue channel)
green_bias: float | dict()
Image re-centering parameter (green channel)
gray_bias: float | dict()
Image re-centering parameter (for grayscale images)
image_scale: float | dict()
Value by which to scale the images.
See Also
--------
set_input, set_output, set_class_labels
"""
spec = self.spec
if not image_input_names:
return # nothing to do here
if not isinstance(is_bgr, dict): is_bgr = dict.fromkeys(image_input_names, is_bgr)
if not isinstance(red_bias, dict): red_bias = dict.fromkeys(image_input_names, red_bias)
if not isinstance(blue_bias, dict): blue_bias = dict.fromkeys(image_input_names, blue_bias)
if not isinstance(green_bias, dict): green_bias = dict.fromkeys(image_input_names, green_bias)
if not isinstance(gray_bias, dict): gray_bias = dict.fromkeys(image_input_names, gray_bias)
if not isinstance(image_scale, dict): image_scale = dict.fromkeys(image_input_names, image_scale)
# Add image inputs
for input_ in spec.description.input:
if input_.name in image_input_names:
if input_.type.WhichOneof('Type') == 'multiArrayType':
array_shape = tuple(input_.type.multiArrayType.shape)
channels, height, width = array_shape
if channels == 1:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
elif channels == 3:
if input_.name in is_bgr:
if is_bgr[input_.name]:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
else:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('RGB')
else:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('RGB')
else:
raise ValueError("Channel Value %d not supported for image inputs" % channels)
input_.type.imageType.width = width
input_.type.imageType.height = height
preprocessing = self.nn_spec.preprocessing.add()
preprocessing.featureName = input_.name
scaler = preprocessing.scaler
if input_.name in image_scale:
scaler.channelScale = image_scale[input_.name]
else:
scaler.channelScale = 1.0
if input_.name in red_bias: scaler.redBias = red_bias[input_.name]
if input_.name in blue_bias: scaler.blueBias = blue_bias[input_.name]
if input_.name in green_bias: scaler.greenBias = green_bias[input_.name]
if input_.name in gray_bias: scaler.grayBias = gray_bias[input_.name] | python | def set_pre_processing_parameters(self, image_input_names = [], is_bgr = False,
red_bias = 0.0, green_bias = 0.0, blue_bias = 0.0, gray_bias = 0.0, image_scale = 1.0):
"""Add pre-processing parameters to the neural network object
Parameters
----------
image_input_names: [str]
Name of input blobs that are images
is_bgr: boolean | dict()
Channel order for input blobs that are images. BGR if True else RGB.
To specify a different value for each image input,
provide a dictionary with input names as keys.
red_bias: float | dict()
Image re-centering parameter (red channel)
blue_bias: float | dict()
Image re-centering parameter (blue channel)
green_bias: float | dict()
Image re-centering parameter (green channel)
gray_bias: float | dict()
Image re-centering parameter (for grayscale images)
image_scale: float | dict()
Value by which to scale the images.
See Also
--------
set_input, set_output, set_class_labels
"""
spec = self.spec
if not image_input_names:
return # nothing to do here
if not isinstance(is_bgr, dict): is_bgr = dict.fromkeys(image_input_names, is_bgr)
if not isinstance(red_bias, dict): red_bias = dict.fromkeys(image_input_names, red_bias)
if not isinstance(blue_bias, dict): blue_bias = dict.fromkeys(image_input_names, blue_bias)
if not isinstance(green_bias, dict): green_bias = dict.fromkeys(image_input_names, green_bias)
if not isinstance(gray_bias, dict): gray_bias = dict.fromkeys(image_input_names, gray_bias)
if not isinstance(image_scale, dict): image_scale = dict.fromkeys(image_input_names, image_scale)
# Add image inputs
for input_ in spec.description.input:
if input_.name in image_input_names:
if input_.type.WhichOneof('Type') == 'multiArrayType':
array_shape = tuple(input_.type.multiArrayType.shape)
channels, height, width = array_shape
if channels == 1:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
elif channels == 3:
if input_.name in is_bgr:
if is_bgr[input_.name]:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
else:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('RGB')
else:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('RGB')
else:
raise ValueError("Channel Value %d not supported for image inputs" % channels)
input_.type.imageType.width = width
input_.type.imageType.height = height
preprocessing = self.nn_spec.preprocessing.add()
preprocessing.featureName = input_.name
scaler = preprocessing.scaler
if input_.name in image_scale:
scaler.channelScale = image_scale[input_.name]
else:
scaler.channelScale = 1.0
if input_.name in red_bias: scaler.redBias = red_bias[input_.name]
if input_.name in blue_bias: scaler.blueBias = blue_bias[input_.name]
if input_.name in green_bias: scaler.greenBias = green_bias[input_.name]
if input_.name in gray_bias: scaler.grayBias = gray_bias[input_.name] | [
"def",
"set_pre_processing_parameters",
"(",
"self",
",",
"image_input_names",
"=",
"[",
"]",
",",
"is_bgr",
"=",
"False",
",",
"red_bias",
"=",
"0.0",
",",
"green_bias",
"=",
"0.0",
",",
"blue_bias",
"=",
"0.0",
",",
"gray_bias",
"=",
"0.0",
",",
"image_scale",
"=",
"1.0",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"if",
"not",
"image_input_names",
":",
"return",
"# nothing to do here",
"if",
"not",
"isinstance",
"(",
"is_bgr",
",",
"dict",
")",
":",
"is_bgr",
"=",
"dict",
".",
"fromkeys",
"(",
"image_input_names",
",",
"is_bgr",
")",
"if",
"not",
"isinstance",
"(",
"red_bias",
",",
"dict",
")",
":",
"red_bias",
"=",
"dict",
".",
"fromkeys",
"(",
"image_input_names",
",",
"red_bias",
")",
"if",
"not",
"isinstance",
"(",
"blue_bias",
",",
"dict",
")",
":",
"blue_bias",
"=",
"dict",
".",
"fromkeys",
"(",
"image_input_names",
",",
"blue_bias",
")",
"if",
"not",
"isinstance",
"(",
"green_bias",
",",
"dict",
")",
":",
"green_bias",
"=",
"dict",
".",
"fromkeys",
"(",
"image_input_names",
",",
"green_bias",
")",
"if",
"not",
"isinstance",
"(",
"gray_bias",
",",
"dict",
")",
":",
"gray_bias",
"=",
"dict",
".",
"fromkeys",
"(",
"image_input_names",
",",
"gray_bias",
")",
"if",
"not",
"isinstance",
"(",
"image_scale",
",",
"dict",
")",
":",
"image_scale",
"=",
"dict",
".",
"fromkeys",
"(",
"image_input_names",
",",
"image_scale",
")",
"# Add image inputs",
"for",
"input_",
"in",
"spec",
".",
"description",
".",
"input",
":",
"if",
"input_",
".",
"name",
"in",
"image_input_names",
":",
"if",
"input_",
".",
"type",
".",
"WhichOneof",
"(",
"'Type'",
")",
"==",
"'multiArrayType'",
":",
"array_shape",
"=",
"tuple",
"(",
"input_",
".",
"type",
".",
"multiArrayType",
".",
"shape",
")",
"channels",
",",
"height",
",",
"width",
"=",
"array_shape",
"if",
"channels",
"==",
"1",
":",
"input_",
".",
"type",
".",
"imageType",
".",
"colorSpace",
"=",
"_FeatureTypes_pb2",
".",
"ImageFeatureType",
".",
"ColorSpace",
".",
"Value",
"(",
"'GRAYSCALE'",
")",
"elif",
"channels",
"==",
"3",
":",
"if",
"input_",
".",
"name",
"in",
"is_bgr",
":",
"if",
"is_bgr",
"[",
"input_",
".",
"name",
"]",
":",
"input_",
".",
"type",
".",
"imageType",
".",
"colorSpace",
"=",
"_FeatureTypes_pb2",
".",
"ImageFeatureType",
".",
"ColorSpace",
".",
"Value",
"(",
"'BGR'",
")",
"else",
":",
"input_",
".",
"type",
".",
"imageType",
".",
"colorSpace",
"=",
"_FeatureTypes_pb2",
".",
"ImageFeatureType",
".",
"ColorSpace",
".",
"Value",
"(",
"'RGB'",
")",
"else",
":",
"input_",
".",
"type",
".",
"imageType",
".",
"colorSpace",
"=",
"_FeatureTypes_pb2",
".",
"ImageFeatureType",
".",
"ColorSpace",
".",
"Value",
"(",
"'RGB'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Channel Value %d not supported for image inputs\"",
"%",
"channels",
")",
"input_",
".",
"type",
".",
"imageType",
".",
"width",
"=",
"width",
"input_",
".",
"type",
".",
"imageType",
".",
"height",
"=",
"height",
"preprocessing",
"=",
"self",
".",
"nn_spec",
".",
"preprocessing",
".",
"add",
"(",
")",
"preprocessing",
".",
"featureName",
"=",
"input_",
".",
"name",
"scaler",
"=",
"preprocessing",
".",
"scaler",
"if",
"input_",
".",
"name",
"in",
"image_scale",
":",
"scaler",
".",
"channelScale",
"=",
"image_scale",
"[",
"input_",
".",
"name",
"]",
"else",
":",
"scaler",
".",
"channelScale",
"=",
"1.0",
"if",
"input_",
".",
"name",
"in",
"red_bias",
":",
"scaler",
".",
"redBias",
"=",
"red_bias",
"[",
"input_",
".",
"name",
"]",
"if",
"input_",
".",
"name",
"in",
"blue_bias",
":",
"scaler",
".",
"blueBias",
"=",
"blue_bias",
"[",
"input_",
".",
"name",
"]",
"if",
"input_",
".",
"name",
"in",
"green_bias",
":",
"scaler",
".",
"greenBias",
"=",
"green_bias",
"[",
"input_",
".",
"name",
"]",
"if",
"input_",
".",
"name",
"in",
"gray_bias",
":",
"scaler",
".",
"grayBias",
"=",
"gray_bias",
"[",
"input_",
".",
"name",
"]"
] | Add pre-processing parameters to the neural network object
Parameters
----------
image_input_names: [str]
Name of input blobs that are images
is_bgr: boolean | dict()
Channel order for input blobs that are images. BGR if True else RGB.
To specify a different value for each image input,
provide a dictionary with input names as keys.
red_bias: float | dict()
Image re-centering parameter (red channel)
blue_bias: float | dict()
Image re-centering parameter (blue channel)
green_bias: float | dict()
Image re-centering parameter (green channel)
gray_bias: float | dict()
Image re-centering parameter (for grayscale images)
image_scale: float | dict()
Value by which to scale the images.
See Also
--------
set_input, set_output, set_class_labels | [
"Add",
"pre",
"-",
"processing",
"parameters",
"to",
"the",
"neural",
"network",
"object"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2494-L2570 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/scanner.py | register | def register(scanner_class, relevant_properties):
""" Registers a new generator class, specifying a set of
properties relevant to this scanner. Ctor for that class
should have one parameter: list of properties.
"""
assert issubclass(scanner_class, Scanner)
assert isinstance(relevant_properties, basestring)
__scanners[str(scanner_class)] = relevant_properties | python | def register(scanner_class, relevant_properties):
""" Registers a new generator class, specifying a set of
properties relevant to this scanner. Ctor for that class
should have one parameter: list of properties.
"""
assert issubclass(scanner_class, Scanner)
assert isinstance(relevant_properties, basestring)
__scanners[str(scanner_class)] = relevant_properties | [
"def",
"register",
"(",
"scanner_class",
",",
"relevant_properties",
")",
":",
"assert",
"issubclass",
"(",
"scanner_class",
",",
"Scanner",
")",
"assert",
"isinstance",
"(",
"relevant_properties",
",",
"basestring",
")",
"__scanners",
"[",
"str",
"(",
"scanner_class",
")",
"]",
"=",
"relevant_properties"
] | Registers a new generator class, specifying a set of
properties relevant to this scanner. Ctor for that class
should have one parameter: list of properties. | [
"Registers",
"a",
"new",
"generator",
"class",
"specifying",
"a",
"set",
"of",
"properties",
"relevant",
"to",
"this",
"scanner",
".",
"Ctor",
"for",
"that",
"class",
"should",
"have",
"one",
"parameter",
":",
"list",
"of",
"properties",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/scanner.py#L54-L61 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/scanner.py | get | def get(scanner_class, properties):
""" Returns an instance of previously registered scanner
with the specified properties.
"""
assert issubclass(scanner_class, Scanner)
assert is_iterable_typed(properties, basestring)
scanner_name = str(scanner_class)
if not registered(scanner_name):
raise BaseException ("attempt to get unregisted scanner: %s" % scanner_name)
relevant_properties = __scanners[scanner_name]
r = property.select(relevant_properties, properties)
scanner_id = scanner_name + '.' + '-'.join(r)
if scanner_id not in __scanner_cache:
__scanner_cache[scanner_id] = scanner_class(r)
return __scanner_cache[scanner_id] | python | def get(scanner_class, properties):
""" Returns an instance of previously registered scanner
with the specified properties.
"""
assert issubclass(scanner_class, Scanner)
assert is_iterable_typed(properties, basestring)
scanner_name = str(scanner_class)
if not registered(scanner_name):
raise BaseException ("attempt to get unregisted scanner: %s" % scanner_name)
relevant_properties = __scanners[scanner_name]
r = property.select(relevant_properties, properties)
scanner_id = scanner_name + '.' + '-'.join(r)
if scanner_id not in __scanner_cache:
__scanner_cache[scanner_id] = scanner_class(r)
return __scanner_cache[scanner_id] | [
"def",
"get",
"(",
"scanner_class",
",",
"properties",
")",
":",
"assert",
"issubclass",
"(",
"scanner_class",
",",
"Scanner",
")",
"assert",
"is_iterable_typed",
"(",
"properties",
",",
"basestring",
")",
"scanner_name",
"=",
"str",
"(",
"scanner_class",
")",
"if",
"not",
"registered",
"(",
"scanner_name",
")",
":",
"raise",
"BaseException",
"(",
"\"attempt to get unregisted scanner: %s\"",
"%",
"scanner_name",
")",
"relevant_properties",
"=",
"__scanners",
"[",
"scanner_name",
"]",
"r",
"=",
"property",
".",
"select",
"(",
"relevant_properties",
",",
"properties",
")",
"scanner_id",
"=",
"scanner_name",
"+",
"'.'",
"+",
"'-'",
".",
"join",
"(",
"r",
")",
"if",
"scanner_id",
"not",
"in",
"__scanner_cache",
":",
"__scanner_cache",
"[",
"scanner_id",
"]",
"=",
"scanner_class",
"(",
"r",
")",
"return",
"__scanner_cache",
"[",
"scanner_id",
"]"
] | Returns an instance of previously registered scanner
with the specified properties. | [
"Returns",
"an",
"instance",
"of",
"previously",
"registered",
"scanner",
"with",
"the",
"specified",
"properties",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/scanner.py#L68-L87 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/scanner.py | ScannerRegistry.install | def install (self, scanner, target, vtarget):
""" Installs the specified scanner on actual target 'target'.
vtarget: virtual target from which 'target' was actualized.
"""
assert isinstance(scanner, Scanner)
assert isinstance(target, basestring)
assert isinstance(vtarget, basestring)
engine = self.manager_.engine()
engine.set_target_variable(target, "HDRSCAN", scanner.pattern())
if scanner not in self.exported_scanners_:
exported_name = "scanner_" + str(self.count_)
self.count_ = self.count_ + 1
self.exported_scanners_[scanner] = exported_name
bjam.import_rule("", exported_name, scanner.process)
else:
exported_name = self.exported_scanners_[scanner]
engine.set_target_variable(target, "HDRRULE", exported_name)
# scanner reflects difference in properties affecting
# binding of 'target', which will be known when processing
# includes for it, will give information on how to
# interpret quoted includes.
engine.set_target_variable(target, "HDRGRIST", str(id(scanner)))
pass | python | def install (self, scanner, target, vtarget):
""" Installs the specified scanner on actual target 'target'.
vtarget: virtual target from which 'target' was actualized.
"""
assert isinstance(scanner, Scanner)
assert isinstance(target, basestring)
assert isinstance(vtarget, basestring)
engine = self.manager_.engine()
engine.set_target_variable(target, "HDRSCAN", scanner.pattern())
if scanner not in self.exported_scanners_:
exported_name = "scanner_" + str(self.count_)
self.count_ = self.count_ + 1
self.exported_scanners_[scanner] = exported_name
bjam.import_rule("", exported_name, scanner.process)
else:
exported_name = self.exported_scanners_[scanner]
engine.set_target_variable(target, "HDRRULE", exported_name)
# scanner reflects difference in properties affecting
# binding of 'target', which will be known when processing
# includes for it, will give information on how to
# interpret quoted includes.
engine.set_target_variable(target, "HDRGRIST", str(id(scanner)))
pass | [
"def",
"install",
"(",
"self",
",",
"scanner",
",",
"target",
",",
"vtarget",
")",
":",
"assert",
"isinstance",
"(",
"scanner",
",",
"Scanner",
")",
"assert",
"isinstance",
"(",
"target",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"vtarget",
",",
"basestring",
")",
"engine",
"=",
"self",
".",
"manager_",
".",
"engine",
"(",
")",
"engine",
".",
"set_target_variable",
"(",
"target",
",",
"\"HDRSCAN\"",
",",
"scanner",
".",
"pattern",
"(",
")",
")",
"if",
"scanner",
"not",
"in",
"self",
".",
"exported_scanners_",
":",
"exported_name",
"=",
"\"scanner_\"",
"+",
"str",
"(",
"self",
".",
"count_",
")",
"self",
".",
"count_",
"=",
"self",
".",
"count_",
"+",
"1",
"self",
".",
"exported_scanners_",
"[",
"scanner",
"]",
"=",
"exported_name",
"bjam",
".",
"import_rule",
"(",
"\"\"",
",",
"exported_name",
",",
"scanner",
".",
"process",
")",
"else",
":",
"exported_name",
"=",
"self",
".",
"exported_scanners_",
"[",
"scanner",
"]",
"engine",
".",
"set_target_variable",
"(",
"target",
",",
"\"HDRRULE\"",
",",
"exported_name",
")",
"# scanner reflects difference in properties affecting",
"# binding of 'target', which will be known when processing",
"# includes for it, will give information on how to",
"# interpret quoted includes.",
"engine",
".",
"set_target_variable",
"(",
"target",
",",
"\"HDRGRIST\"",
",",
"str",
"(",
"id",
"(",
"scanner",
")",
")",
")",
"pass"
] | Installs the specified scanner on actual target 'target'.
vtarget: virtual target from which 'target' was actualized. | [
"Installs",
"the",
"specified",
"scanner",
"on",
"actual",
"target",
"target",
".",
"vtarget",
":",
"virtual",
"target",
"from",
"which",
"target",
"was",
"actualized",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/scanner.py#L133-L157 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | _fill_function | def _fill_function(func, globals, defaults, dict, module, closure_values):
""" Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func().
"""
func.__globals__.update(globals)
func.__defaults__ = defaults
func.__dict__ = dict
func.__module__ = module
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, closure_values):
if value is not _empty_cell_value:
cell_set(cell, value)
return func | python | def _fill_function(func, globals, defaults, dict, module, closure_values):
""" Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func().
"""
func.__globals__.update(globals)
func.__defaults__ = defaults
func.__dict__ = dict
func.__module__ = module
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, closure_values):
if value is not _empty_cell_value:
cell_set(cell, value)
return func | [
"def",
"_fill_function",
"(",
"func",
",",
"globals",
",",
"defaults",
",",
"dict",
",",
"module",
",",
"closure_values",
")",
":",
"func",
".",
"__globals__",
".",
"update",
"(",
"globals",
")",
"func",
".",
"__defaults__",
"=",
"defaults",
"func",
".",
"__dict__",
"=",
"dict",
"func",
".",
"__module__",
"=",
"module",
"cells",
"=",
"func",
".",
"__closure__",
"if",
"cells",
"is",
"not",
"None",
":",
"for",
"cell",
",",
"value",
"in",
"zip",
"(",
"cells",
",",
"closure_values",
")",
":",
"if",
"value",
"is",
"not",
"_empty_cell_value",
":",
"cell_set",
"(",
"cell",
",",
"value",
")",
"return",
"func"
] | Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func(). | [
"Fills",
"in",
"the",
"rest",
"of",
"function",
"data",
"into",
"the",
"skeleton",
"function",
"object",
"that",
"were",
"created",
"via",
"_make_skel_func",
"()",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L1090-L1105 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | _make_skel_func | def _make_skel_func(code, cell_count, base_globals=None):
""" Creates a skeleton function object that contains just the provided
code and the correct number of cells in func_closure. All other
func attributes (e.g. func_globals) are empty.
"""
if base_globals is None:
base_globals = {}
base_globals['__builtins__'] = __builtins__
closure = (
tuple(_make_empty_cell() for _ in range(cell_count))
if cell_count >= 0 else
None
)
return types.FunctionType(code, base_globals, None, None, closure) | python | def _make_skel_func(code, cell_count, base_globals=None):
""" Creates a skeleton function object that contains just the provided
code and the correct number of cells in func_closure. All other
func attributes (e.g. func_globals) are empty.
"""
if base_globals is None:
base_globals = {}
base_globals['__builtins__'] = __builtins__
closure = (
tuple(_make_empty_cell() for _ in range(cell_count))
if cell_count >= 0 else
None
)
return types.FunctionType(code, base_globals, None, None, closure) | [
"def",
"_make_skel_func",
"(",
"code",
",",
"cell_count",
",",
"base_globals",
"=",
"None",
")",
":",
"if",
"base_globals",
"is",
"None",
":",
"base_globals",
"=",
"{",
"}",
"base_globals",
"[",
"'__builtins__'",
"]",
"=",
"__builtins__",
"closure",
"=",
"(",
"tuple",
"(",
"_make_empty_cell",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"cell_count",
")",
")",
"if",
"cell_count",
">=",
"0",
"else",
"None",
")",
"return",
"types",
".",
"FunctionType",
"(",
"code",
",",
"base_globals",
",",
"None",
",",
"None",
",",
"closure",
")"
] | Creates a skeleton function object that contains just the provided
code and the correct number of cells in func_closure. All other
func attributes (e.g. func_globals) are empty. | [
"Creates",
"a",
"skeleton",
"function",
"object",
"that",
"contains",
"just",
"the",
"provided",
"code",
"and",
"the",
"correct",
"number",
"of",
"cells",
"in",
"func_closure",
".",
"All",
"other",
"func",
"attributes",
"(",
"e",
".",
"g",
".",
"func_globals",
")",
"are",
"empty",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L1117-L1131 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | _rehydrate_skeleton_class | def _rehydrate_skeleton_class(skeleton_class, class_dict):
"""Put attributes from `class_dict` back on `skeleton_class`.
See CloudPickler.save_dynamic_class for more info.
"""
for attrname, attr in class_dict.items():
setattr(skeleton_class, attrname, attr)
return skeleton_class | python | def _rehydrate_skeleton_class(skeleton_class, class_dict):
"""Put attributes from `class_dict` back on `skeleton_class`.
See CloudPickler.save_dynamic_class for more info.
"""
for attrname, attr in class_dict.items():
setattr(skeleton_class, attrname, attr)
return skeleton_class | [
"def",
"_rehydrate_skeleton_class",
"(",
"skeleton_class",
",",
"class_dict",
")",
":",
"for",
"attrname",
",",
"attr",
"in",
"class_dict",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"skeleton_class",
",",
"attrname",
",",
"attr",
")",
"return",
"skeleton_class"
] | Put attributes from `class_dict` back on `skeleton_class`.
See CloudPickler.save_dynamic_class for more info. | [
"Put",
"attributes",
"from",
"class_dict",
"back",
"on",
"skeleton_class",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L1134-L1141 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | _find_module | def _find_module(mod_name):
"""
Iterate over each part instead of calling imp.find_module directly.
This function is able to find submodules (e.g. sickit.tree)
"""
path = None
for part in mod_name.split('.'):
if path is not None:
path = [path]
file, path, description = imp.find_module(part, path)
if file is not None:
file.close()
return path, description | python | def _find_module(mod_name):
"""
Iterate over each part instead of calling imp.find_module directly.
This function is able to find submodules (e.g. sickit.tree)
"""
path = None
for part in mod_name.split('.'):
if path is not None:
path = [path]
file, path, description = imp.find_module(part, path)
if file is not None:
file.close()
return path, description | [
"def",
"_find_module",
"(",
"mod_name",
")",
":",
"path",
"=",
"None",
"for",
"part",
"in",
"mod_name",
".",
"split",
"(",
"'.'",
")",
":",
"if",
"path",
"is",
"not",
"None",
":",
"path",
"=",
"[",
"path",
"]",
"file",
",",
"path",
",",
"description",
"=",
"imp",
".",
"find_module",
"(",
"part",
",",
"path",
")",
"if",
"file",
"is",
"not",
"None",
":",
"file",
".",
"close",
"(",
")",
"return",
"path",
",",
"description"
] | Iterate over each part instead of calling imp.find_module directly.
This function is able to find submodules (e.g. sickit.tree) | [
"Iterate",
"over",
"each",
"part",
"instead",
"of",
"calling",
"imp",
".",
"find_module",
"directly",
".",
"This",
"function",
"is",
"able",
"to",
"find",
"submodules",
"(",
"e",
".",
"g",
".",
"sickit",
".",
"tree",
")"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L1144-L1156 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | CloudPickler.save_module | def save_module(self, obj):
"""
Save a module as an import
"""
mod_name = obj.__name__
# If module is successfully found then it is not a dynamically created module
if hasattr(obj, '__file__'):
is_dynamic = False
else:
try:
_find_module(mod_name)
is_dynamic = False
except ImportError:
is_dynamic = True
self.modules.add(obj)
if is_dynamic:
self.save_reduce(dynamic_subimport, (obj.__name__, vars(obj)), obj=obj)
else:
self.save_reduce(subimport, (obj.__name__,), obj=obj) | python | def save_module(self, obj):
"""
Save a module as an import
"""
mod_name = obj.__name__
# If module is successfully found then it is not a dynamically created module
if hasattr(obj, '__file__'):
is_dynamic = False
else:
try:
_find_module(mod_name)
is_dynamic = False
except ImportError:
is_dynamic = True
self.modules.add(obj)
if is_dynamic:
self.save_reduce(dynamic_subimport, (obj.__name__, vars(obj)), obj=obj)
else:
self.save_reduce(subimport, (obj.__name__,), obj=obj) | [
"def",
"save_module",
"(",
"self",
",",
"obj",
")",
":",
"mod_name",
"=",
"obj",
".",
"__name__",
"# If module is successfully found then it is not a dynamically created module",
"if",
"hasattr",
"(",
"obj",
",",
"'__file__'",
")",
":",
"is_dynamic",
"=",
"False",
"else",
":",
"try",
":",
"_find_module",
"(",
"mod_name",
")",
"is_dynamic",
"=",
"False",
"except",
"ImportError",
":",
"is_dynamic",
"=",
"True",
"self",
".",
"modules",
".",
"add",
"(",
"obj",
")",
"if",
"is_dynamic",
":",
"self",
".",
"save_reduce",
"(",
"dynamic_subimport",
",",
"(",
"obj",
".",
"__name__",
",",
"vars",
"(",
"obj",
")",
")",
",",
"obj",
"=",
"obj",
")",
"else",
":",
"self",
".",
"save_reduce",
"(",
"subimport",
",",
"(",
"obj",
".",
"__name__",
",",
")",
",",
"obj",
"=",
"obj",
")"
] | Save a module as an import | [
"Save",
"a",
"module",
"as",
"an",
"import"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L285-L304 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | CloudPickler._save_subimports | def _save_subimports(self, code, top_level_dependencies):
"""
Ensure de-pickler imports any package child-modules that
are needed by the function
"""
# check if any known dependency is an imported package
for x in top_level_dependencies:
if isinstance(x, types.ModuleType) and hasattr(x, '__package__') and x.__package__:
# check if the package has any currently loaded sub-imports
prefix = x.__name__ + '.'
for name, module in sys.modules.items():
# Older versions of pytest will add a "None" module to sys.modules.
if name is not None and name.startswith(prefix):
# check whether the function can address the sub-module
tokens = set(name[len(prefix):].split('.'))
if not tokens - set(code.co_names):
# ensure unpickler executes this import
self.save(module)
# then discards the reference to it
self.write(pickle.POP) | python | def _save_subimports(self, code, top_level_dependencies):
"""
Ensure de-pickler imports any package child-modules that
are needed by the function
"""
# check if any known dependency is an imported package
for x in top_level_dependencies:
if isinstance(x, types.ModuleType) and hasattr(x, '__package__') and x.__package__:
# check if the package has any currently loaded sub-imports
prefix = x.__name__ + '.'
for name, module in sys.modules.items():
# Older versions of pytest will add a "None" module to sys.modules.
if name is not None and name.startswith(prefix):
# check whether the function can address the sub-module
tokens = set(name[len(prefix):].split('.'))
if not tokens - set(code.co_names):
# ensure unpickler executes this import
self.save(module)
# then discards the reference to it
self.write(pickle.POP) | [
"def",
"_save_subimports",
"(",
"self",
",",
"code",
",",
"top_level_dependencies",
")",
":",
"# check if any known dependency is an imported package",
"for",
"x",
"in",
"top_level_dependencies",
":",
"if",
"isinstance",
"(",
"x",
",",
"types",
".",
"ModuleType",
")",
"and",
"hasattr",
"(",
"x",
",",
"'__package__'",
")",
"and",
"x",
".",
"__package__",
":",
"# check if the package has any currently loaded sub-imports",
"prefix",
"=",
"x",
".",
"__name__",
"+",
"'.'",
"for",
"name",
",",
"module",
"in",
"sys",
".",
"modules",
".",
"items",
"(",
")",
":",
"# Older versions of pytest will add a \"None\" module to sys.modules.",
"if",
"name",
"is",
"not",
"None",
"and",
"name",
".",
"startswith",
"(",
"prefix",
")",
":",
"# check whether the function can address the sub-module",
"tokens",
"=",
"set",
"(",
"name",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
".",
"split",
"(",
"'.'",
")",
")",
"if",
"not",
"tokens",
"-",
"set",
"(",
"code",
".",
"co_names",
")",
":",
"# ensure unpickler executes this import",
"self",
".",
"save",
"(",
"module",
")",
"# then discards the reference to it",
"self",
".",
"write",
"(",
"pickle",
".",
"POP",
")"
] | Ensure de-pickler imports any package child-modules that
are needed by the function | [
"Ensure",
"de",
"-",
"pickler",
"imports",
"any",
"package",
"child",
"-",
"modules",
"that",
"are",
"needed",
"by",
"the",
"function"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L405-L424 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | CloudPickler.save_dynamic_class | def save_dynamic_class(self, obj):
"""
Save a class that can't be stored as module global.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from global modules.
"""
clsdict = dict(obj.__dict__) # copy dict proxy to a dict
if not isinstance(clsdict.get('__dict__', None), property):
# don't extract dict that are properties
clsdict.pop('__dict__', None)
clsdict.pop('__weakref__', None)
# hack as __new__ is stored differently in the __dict__
new_override = clsdict.get('__new__', None)
if new_override:
clsdict['__new__'] = obj.__new__
# namedtuple is a special case for Spark where we use the _load_namedtuple function
if getattr(obj, '_is_namedtuple_', False):
self.save_reduce(_load_namedtuple, (obj.__name__, obj._fields))
return
save = self.save
write = self.write
# We write pickle instructions explicitly here to handle the
# possibility that the type object participates in a cycle with its own
# __dict__. We first write an empty "skeleton" version of the class and
# memoize it before writing the class' __dict__ itself. We then write
# instructions to "rehydrate" the skeleton class by restoring the
# attributes from the __dict__.
#
# A type can appear in a cycle with its __dict__ if an instance of the
# type appears in the type's __dict__ (which happens for the stdlib
# Enum class), or if the type defines methods that close over the name
# of the type, (which is common for Python 2-style super() calls).
# Push the rehydration function.
save(_rehydrate_skeleton_class)
# Mark the start of the args for the rehydration function.
write(pickle.MARK)
# On PyPy, __doc__ is a readonly attribute, so we need to include it in
# the initial skeleton class. This is safe because we know that the
# doc can't participate in a cycle with the original class.
doc_dict = {'__doc__': clsdict.pop('__doc__', None)}
# Create and memoize an empty class with obj's name and bases.
save(type(obj))
save((
obj.__name__,
obj.__bases__,
doc_dict,
))
write(pickle.REDUCE)
self.memoize(obj)
# Now save the rest of obj's __dict__. Any references to obj
# encountered while saving will point to the skeleton class.
save(clsdict)
# Write a tuple of (skeleton_class, clsdict).
write(pickle.TUPLE)
# Call _rehydrate_skeleton_class(skeleton_class, clsdict)
write(pickle.REDUCE) | python | def save_dynamic_class(self, obj):
"""
Save a class that can't be stored as module global.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from global modules.
"""
clsdict = dict(obj.__dict__) # copy dict proxy to a dict
if not isinstance(clsdict.get('__dict__', None), property):
# don't extract dict that are properties
clsdict.pop('__dict__', None)
clsdict.pop('__weakref__', None)
# hack as __new__ is stored differently in the __dict__
new_override = clsdict.get('__new__', None)
if new_override:
clsdict['__new__'] = obj.__new__
# namedtuple is a special case for Spark where we use the _load_namedtuple function
if getattr(obj, '_is_namedtuple_', False):
self.save_reduce(_load_namedtuple, (obj.__name__, obj._fields))
return
save = self.save
write = self.write
# We write pickle instructions explicitly here to handle the
# possibility that the type object participates in a cycle with its own
# __dict__. We first write an empty "skeleton" version of the class and
# memoize it before writing the class' __dict__ itself. We then write
# instructions to "rehydrate" the skeleton class by restoring the
# attributes from the __dict__.
#
# A type can appear in a cycle with its __dict__ if an instance of the
# type appears in the type's __dict__ (which happens for the stdlib
# Enum class), or if the type defines methods that close over the name
# of the type, (which is common for Python 2-style super() calls).
# Push the rehydration function.
save(_rehydrate_skeleton_class)
# Mark the start of the args for the rehydration function.
write(pickle.MARK)
# On PyPy, __doc__ is a readonly attribute, so we need to include it in
# the initial skeleton class. This is safe because we know that the
# doc can't participate in a cycle with the original class.
doc_dict = {'__doc__': clsdict.pop('__doc__', None)}
# Create and memoize an empty class with obj's name and bases.
save(type(obj))
save((
obj.__name__,
obj.__bases__,
doc_dict,
))
write(pickle.REDUCE)
self.memoize(obj)
# Now save the rest of obj's __dict__. Any references to obj
# encountered while saving will point to the skeleton class.
save(clsdict)
# Write a tuple of (skeleton_class, clsdict).
write(pickle.TUPLE)
# Call _rehydrate_skeleton_class(skeleton_class, clsdict)
write(pickle.REDUCE) | [
"def",
"save_dynamic_class",
"(",
"self",
",",
"obj",
")",
":",
"clsdict",
"=",
"dict",
"(",
"obj",
".",
"__dict__",
")",
"# copy dict proxy to a dict",
"if",
"not",
"isinstance",
"(",
"clsdict",
".",
"get",
"(",
"'__dict__'",
",",
"None",
")",
",",
"property",
")",
":",
"# don't extract dict that are properties",
"clsdict",
".",
"pop",
"(",
"'__dict__'",
",",
"None",
")",
"clsdict",
".",
"pop",
"(",
"'__weakref__'",
",",
"None",
")",
"# hack as __new__ is stored differently in the __dict__",
"new_override",
"=",
"clsdict",
".",
"get",
"(",
"'__new__'",
",",
"None",
")",
"if",
"new_override",
":",
"clsdict",
"[",
"'__new__'",
"]",
"=",
"obj",
".",
"__new__",
"# namedtuple is a special case for Spark where we use the _load_namedtuple function",
"if",
"getattr",
"(",
"obj",
",",
"'_is_namedtuple_'",
",",
"False",
")",
":",
"self",
".",
"save_reduce",
"(",
"_load_namedtuple",
",",
"(",
"obj",
".",
"__name__",
",",
"obj",
".",
"_fields",
")",
")",
"return",
"save",
"=",
"self",
".",
"save",
"write",
"=",
"self",
".",
"write",
"# We write pickle instructions explicitly here to handle the",
"# possibility that the type object participates in a cycle with its own",
"# __dict__. We first write an empty \"skeleton\" version of the class and",
"# memoize it before writing the class' __dict__ itself. We then write",
"# instructions to \"rehydrate\" the skeleton class by restoring the",
"# attributes from the __dict__.",
"#",
"# A type can appear in a cycle with its __dict__ if an instance of the",
"# type appears in the type's __dict__ (which happens for the stdlib",
"# Enum class), or if the type defines methods that close over the name",
"# of the type, (which is common for Python 2-style super() calls).",
"# Push the rehydration function.",
"save",
"(",
"_rehydrate_skeleton_class",
")",
"# Mark the start of the args for the rehydration function.",
"write",
"(",
"pickle",
".",
"MARK",
")",
"# On PyPy, __doc__ is a readonly attribute, so we need to include it in",
"# the initial skeleton class. This is safe because we know that the",
"# doc can't participate in a cycle with the original class.",
"doc_dict",
"=",
"{",
"'__doc__'",
":",
"clsdict",
".",
"pop",
"(",
"'__doc__'",
",",
"None",
")",
"}",
"# Create and memoize an empty class with obj's name and bases.",
"save",
"(",
"type",
"(",
"obj",
")",
")",
"save",
"(",
"(",
"obj",
".",
"__name__",
",",
"obj",
".",
"__bases__",
",",
"doc_dict",
",",
")",
")",
"write",
"(",
"pickle",
".",
"REDUCE",
")",
"self",
".",
"memoize",
"(",
"obj",
")",
"# Now save the rest of obj's __dict__. Any references to obj",
"# encountered while saving will point to the skeleton class.",
"save",
"(",
"clsdict",
")",
"# Write a tuple of (skeleton_class, clsdict).",
"write",
"(",
"pickle",
".",
"TUPLE",
")",
"# Call _rehydrate_skeleton_class(skeleton_class, clsdict)",
"write",
"(",
"pickle",
".",
"REDUCE",
")"
] | Save a class that can't be stored as module global.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from global modules. | [
"Save",
"a",
"class",
"that",
"can",
"t",
"be",
"stored",
"as",
"module",
"global",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L426-L494 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | CloudPickler.save_function_tuple | def save_function_tuple(self, func):
""" Pickles an actual func object.
A func comprises: code, globals, defaults, closure, and dict. We
extract and save these, injecting reducing functions at certain points
to recreate the func object. Keep in mind that some of these pieces
can contain a ref to the func itself. Thus, a naive save on these
pieces could trigger an infinite loop of save's. To get around that,
we first create a skeleton func object using just the code (this is
safe, since this won't contain a ref to the func), and memoize it as
soon as it's created. The other stuff can then be filled in later.
"""
if is_tornado_coroutine(func):
self.save_reduce(_rebuild_tornado_coroutine, (func.__wrapped__,),
obj=func)
return
save = self.save
write = self.write
code, f_globals, defaults, closure_values, dct, base_globals = self.extract_func_data(func)
save(_fill_function) # skeleton function updater
write(pickle.MARK) # beginning of tuple that _fill_function expects
self._save_subimports(
code,
itertools.chain(f_globals.values(), closure_values or ()),
)
# create a skeleton function object and memoize it
save(_make_skel_func)
save((
code,
len(closure_values) if closure_values is not None else -1,
base_globals,
))
write(pickle.REDUCE)
self.memoize(func)
# save the rest of the func data needed by _fill_function
save(f_globals)
save(defaults)
save(dct)
save(func.__module__)
save(closure_values)
write(pickle.TUPLE)
write(pickle.REDUCE) | python | def save_function_tuple(self, func):
""" Pickles an actual func object.
A func comprises: code, globals, defaults, closure, and dict. We
extract and save these, injecting reducing functions at certain points
to recreate the func object. Keep in mind that some of these pieces
can contain a ref to the func itself. Thus, a naive save on these
pieces could trigger an infinite loop of save's. To get around that,
we first create a skeleton func object using just the code (this is
safe, since this won't contain a ref to the func), and memoize it as
soon as it's created. The other stuff can then be filled in later.
"""
if is_tornado_coroutine(func):
self.save_reduce(_rebuild_tornado_coroutine, (func.__wrapped__,),
obj=func)
return
save = self.save
write = self.write
code, f_globals, defaults, closure_values, dct, base_globals = self.extract_func_data(func)
save(_fill_function) # skeleton function updater
write(pickle.MARK) # beginning of tuple that _fill_function expects
self._save_subimports(
code,
itertools.chain(f_globals.values(), closure_values or ()),
)
# create a skeleton function object and memoize it
save(_make_skel_func)
save((
code,
len(closure_values) if closure_values is not None else -1,
base_globals,
))
write(pickle.REDUCE)
self.memoize(func)
# save the rest of the func data needed by _fill_function
save(f_globals)
save(defaults)
save(dct)
save(func.__module__)
save(closure_values)
write(pickle.TUPLE)
write(pickle.REDUCE) | [
"def",
"save_function_tuple",
"(",
"self",
",",
"func",
")",
":",
"if",
"is_tornado_coroutine",
"(",
"func",
")",
":",
"self",
".",
"save_reduce",
"(",
"_rebuild_tornado_coroutine",
",",
"(",
"func",
".",
"__wrapped__",
",",
")",
",",
"obj",
"=",
"func",
")",
"return",
"save",
"=",
"self",
".",
"save",
"write",
"=",
"self",
".",
"write",
"code",
",",
"f_globals",
",",
"defaults",
",",
"closure_values",
",",
"dct",
",",
"base_globals",
"=",
"self",
".",
"extract_func_data",
"(",
"func",
")",
"save",
"(",
"_fill_function",
")",
"# skeleton function updater",
"write",
"(",
"pickle",
".",
"MARK",
")",
"# beginning of tuple that _fill_function expects",
"self",
".",
"_save_subimports",
"(",
"code",
",",
"itertools",
".",
"chain",
"(",
"f_globals",
".",
"values",
"(",
")",
",",
"closure_values",
"or",
"(",
")",
")",
",",
")",
"# create a skeleton function object and memoize it",
"save",
"(",
"_make_skel_func",
")",
"save",
"(",
"(",
"code",
",",
"len",
"(",
"closure_values",
")",
"if",
"closure_values",
"is",
"not",
"None",
"else",
"-",
"1",
",",
"base_globals",
",",
")",
")",
"write",
"(",
"pickle",
".",
"REDUCE",
")",
"self",
".",
"memoize",
"(",
"func",
")",
"# save the rest of the func data needed by _fill_function",
"save",
"(",
"f_globals",
")",
"save",
"(",
"defaults",
")",
"save",
"(",
"dct",
")",
"save",
"(",
"func",
".",
"__module__",
")",
"save",
"(",
"closure_values",
")",
"write",
"(",
"pickle",
".",
"TUPLE",
")",
"write",
"(",
"pickle",
".",
"REDUCE",
")"
] | Pickles an actual func object.
A func comprises: code, globals, defaults, closure, and dict. We
extract and save these, injecting reducing functions at certain points
to recreate the func object. Keep in mind that some of these pieces
can contain a ref to the func itself. Thus, a naive save on these
pieces could trigger an infinite loop of save's. To get around that,
we first create a skeleton func object using just the code (this is
safe, since this won't contain a ref to the func), and memoize it as
soon as it's created. The other stuff can then be filled in later. | [
"Pickles",
"an",
"actual",
"func",
"object",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L496-L543 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | CloudPickler.extract_code_globals | def extract_code_globals(cls, co):
"""
Find all globals names read or written to by codeblock co
"""
out_names = cls._extract_code_globals_cache.get(co)
if out_names is None:
try:
names = co.co_names
except AttributeError:
# PyPy "builtin-code" object
out_names = set()
else:
out_names = set(names[oparg]
for op, oparg in _walk_global_ops(co))
# see if nested function have any global refs
if co.co_consts:
for const in co.co_consts:
if type(const) is types.CodeType:
out_names |= cls.extract_code_globals(const)
cls._extract_code_globals_cache[co] = out_names
return out_names | python | def extract_code_globals(cls, co):
"""
Find all globals names read or written to by codeblock co
"""
out_names = cls._extract_code_globals_cache.get(co)
if out_names is None:
try:
names = co.co_names
except AttributeError:
# PyPy "builtin-code" object
out_names = set()
else:
out_names = set(names[oparg]
for op, oparg in _walk_global_ops(co))
# see if nested function have any global refs
if co.co_consts:
for const in co.co_consts:
if type(const) is types.CodeType:
out_names |= cls.extract_code_globals(const)
cls._extract_code_globals_cache[co] = out_names
return out_names | [
"def",
"extract_code_globals",
"(",
"cls",
",",
"co",
")",
":",
"out_names",
"=",
"cls",
".",
"_extract_code_globals_cache",
".",
"get",
"(",
"co",
")",
"if",
"out_names",
"is",
"None",
":",
"try",
":",
"names",
"=",
"co",
".",
"co_names",
"except",
"AttributeError",
":",
"# PyPy \"builtin-code\" object",
"out_names",
"=",
"set",
"(",
")",
"else",
":",
"out_names",
"=",
"set",
"(",
"names",
"[",
"oparg",
"]",
"for",
"op",
",",
"oparg",
"in",
"_walk_global_ops",
"(",
"co",
")",
")",
"# see if nested function have any global refs",
"if",
"co",
".",
"co_consts",
":",
"for",
"const",
"in",
"co",
".",
"co_consts",
":",
"if",
"type",
"(",
"const",
")",
"is",
"types",
".",
"CodeType",
":",
"out_names",
"|=",
"cls",
".",
"extract_code_globals",
"(",
"const",
")",
"cls",
".",
"_extract_code_globals_cache",
"[",
"co",
"]",
"=",
"out_names",
"return",
"out_names"
] | Find all globals names read or written to by codeblock co | [
"Find",
"all",
"globals",
"names",
"read",
"or",
"written",
"to",
"by",
"codeblock",
"co"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L551-L574 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | CloudPickler.save_global | def save_global(self, obj, name=None, pack=struct.pack):
"""
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
"""
if obj.__module__ == "__builtin__" or obj.__module__ == "builtins":
if obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if name is None:
name = obj.__name__
modname = getattr(obj, "__module__", None)
if modname is None:
try:
# whichmodule() could fail, see
# https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling
modname = pickle.whichmodule(obj, name)
except Exception:
modname = '__main__'
if modname == '__main__':
themodule = None
else:
__import__(modname)
themodule = sys.modules[modname]
self.modules.add(themodule)
if hasattr(themodule, name) and getattr(themodule, name) is obj:
return Pickler.save_global(self, obj, name)
typ = type(obj)
if typ is not obj and isinstance(obj, (type, _class_type)):
self.save_dynamic_class(obj)
else:
raise pickle.PicklingError("Can't pickle %r" % obj) | python | def save_global(self, obj, name=None, pack=struct.pack):
"""
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
"""
if obj.__module__ == "__builtin__" or obj.__module__ == "builtins":
if obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if name is None:
name = obj.__name__
modname = getattr(obj, "__module__", None)
if modname is None:
try:
# whichmodule() could fail, see
# https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling
modname = pickle.whichmodule(obj, name)
except Exception:
modname = '__main__'
if modname == '__main__':
themodule = None
else:
__import__(modname)
themodule = sys.modules[modname]
self.modules.add(themodule)
if hasattr(themodule, name) and getattr(themodule, name) is obj:
return Pickler.save_global(self, obj, name)
typ = type(obj)
if typ is not obj and isinstance(obj, (type, _class_type)):
self.save_dynamic_class(obj)
else:
raise pickle.PicklingError("Can't pickle %r" % obj) | [
"def",
"save_global",
"(",
"self",
",",
"obj",
",",
"name",
"=",
"None",
",",
"pack",
"=",
"struct",
".",
"pack",
")",
":",
"if",
"obj",
".",
"__module__",
"==",
"\"__builtin__\"",
"or",
"obj",
".",
"__module__",
"==",
"\"builtins\"",
":",
"if",
"obj",
"in",
"_BUILTIN_TYPE_NAMES",
":",
"return",
"self",
".",
"save_reduce",
"(",
"_builtin_type",
",",
"(",
"_BUILTIN_TYPE_NAMES",
"[",
"obj",
"]",
",",
")",
",",
"obj",
"=",
"obj",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"obj",
".",
"__name__",
"modname",
"=",
"getattr",
"(",
"obj",
",",
"\"__module__\"",
",",
"None",
")",
"if",
"modname",
"is",
"None",
":",
"try",
":",
"# whichmodule() could fail, see",
"# https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling",
"modname",
"=",
"pickle",
".",
"whichmodule",
"(",
"obj",
",",
"name",
")",
"except",
"Exception",
":",
"modname",
"=",
"'__main__'",
"if",
"modname",
"==",
"'__main__'",
":",
"themodule",
"=",
"None",
"else",
":",
"__import__",
"(",
"modname",
")",
"themodule",
"=",
"sys",
".",
"modules",
"[",
"modname",
"]",
"self",
".",
"modules",
".",
"add",
"(",
"themodule",
")",
"if",
"hasattr",
"(",
"themodule",
",",
"name",
")",
"and",
"getattr",
"(",
"themodule",
",",
"name",
")",
"is",
"obj",
":",
"return",
"Pickler",
".",
"save_global",
"(",
"self",
",",
"obj",
",",
"name",
")",
"typ",
"=",
"type",
"(",
"obj",
")",
"if",
"typ",
"is",
"not",
"obj",
"and",
"isinstance",
"(",
"obj",
",",
"(",
"type",
",",
"_class_type",
")",
")",
":",
"self",
".",
"save_dynamic_class",
"(",
"obj",
")",
"else",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"Can't pickle %r\"",
"%",
"obj",
")"
] | Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here. | [
"Save",
"a",
"global",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L616-L653 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | CloudPickler.save_reduce | def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
"""Modified to support __transient__ on new objects
Change only affects protocol level 2 (which is always used by PiCloud"""
# Assert that args is a tuple or None
if not isinstance(args, tuple):
raise pickle.PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise pickle.PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
#Added fix to allow transient
cls = args[0]
if not hasattr(cls, "__new__"):
raise pickle.PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise pickle.PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
#Don't pickle transient entries
if hasattr(obj, '__transient__'):
transient = obj.__transient__
state = state.copy()
for k in list(state.keys()):
if k in transient:
del state[k]
save(args)
write(pickle.NEWOBJ)
else:
save(func)
save(args)
write(pickle.REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(pickle.BUILD) | python | def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
"""Modified to support __transient__ on new objects
Change only affects protocol level 2 (which is always used by PiCloud"""
# Assert that args is a tuple or None
if not isinstance(args, tuple):
raise pickle.PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise pickle.PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
#Added fix to allow transient
cls = args[0]
if not hasattr(cls, "__new__"):
raise pickle.PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise pickle.PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
#Don't pickle transient entries
if hasattr(obj, '__transient__'):
transient = obj.__transient__
state = state.copy()
for k in list(state.keys()):
if k in transient:
del state[k]
save(args)
write(pickle.NEWOBJ)
else:
save(func)
save(args)
write(pickle.REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(pickle.BUILD) | [
"def",
"save_reduce",
"(",
"self",
",",
"func",
",",
"args",
",",
"state",
"=",
"None",
",",
"listitems",
"=",
"None",
",",
"dictitems",
"=",
"None",
",",
"obj",
"=",
"None",
")",
":",
"# Assert that args is a tuple or None",
"if",
"not",
"isinstance",
"(",
"args",
",",
"tuple",
")",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"args from reduce() should be a tuple\"",
")",
"# Assert that func is callable",
"if",
"not",
"hasattr",
"(",
"func",
",",
"'__call__'",
")",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"func from reduce should be callable\"",
")",
"save",
"=",
"self",
".",
"save",
"write",
"=",
"self",
".",
"write",
"# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ",
"if",
"self",
".",
"proto",
">=",
"2",
"and",
"getattr",
"(",
"func",
",",
"\"__name__\"",
",",
"\"\"",
")",
"==",
"\"__newobj__\"",
":",
"#Added fix to allow transient",
"cls",
"=",
"args",
"[",
"0",
"]",
"if",
"not",
"hasattr",
"(",
"cls",
",",
"\"__new__\"",
")",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"args[0] from __newobj__ args has no __new__\"",
")",
"if",
"obj",
"is",
"not",
"None",
"and",
"cls",
"is",
"not",
"obj",
".",
"__class__",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"args[0] from __newobj__ args has the wrong class\"",
")",
"args",
"=",
"args",
"[",
"1",
":",
"]",
"save",
"(",
"cls",
")",
"#Don't pickle transient entries",
"if",
"hasattr",
"(",
"obj",
",",
"'__transient__'",
")",
":",
"transient",
"=",
"obj",
".",
"__transient__",
"state",
"=",
"state",
".",
"copy",
"(",
")",
"for",
"k",
"in",
"list",
"(",
"state",
".",
"keys",
"(",
")",
")",
":",
"if",
"k",
"in",
"transient",
":",
"del",
"state",
"[",
"k",
"]",
"save",
"(",
"args",
")",
"write",
"(",
"pickle",
".",
"NEWOBJ",
")",
"else",
":",
"save",
"(",
"func",
")",
"save",
"(",
"args",
")",
"write",
"(",
"pickle",
".",
"REDUCE",
")",
"if",
"obj",
"is",
"not",
"None",
":",
"self",
".",
"memoize",
"(",
"obj",
")",
"# More new special cases (that work with older protocols as",
"# well): when __reduce__ returns a tuple with 4 or 5 items,",
"# the 4th and 5th item should be iterators that provide list",
"# items and dict items (as (key, value) tuples), or None.",
"if",
"listitems",
"is",
"not",
"None",
":",
"self",
".",
"_batch_appends",
"(",
"listitems",
")",
"if",
"dictitems",
"is",
"not",
"None",
":",
"self",
".",
"_batch_setitems",
"(",
"dictitems",
")",
"if",
"state",
"is",
"not",
"None",
":",
"save",
"(",
"state",
")",
"write",
"(",
"pickle",
".",
"BUILD",
")"
] | Modified to support __transient__ on new objects
Change only affects protocol level 2 (which is always used by PiCloud | [
"Modified",
"to",
"support",
"__transient__",
"on",
"new",
"objects",
"Change",
"only",
"affects",
"protocol",
"level",
"2",
"(",
"which",
"is",
"always",
"used",
"by",
"PiCloud"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L777-L837 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.