repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | CloudPickler.save_partial | def save_partial(self, obj):
"""Partial objects do not serialize correctly in python2.x -- this fixes the bugs"""
self.save_reduce(_genpartial, (obj.func, obj.args, obj.keywords)) | python | def save_partial(self, obj):
"""Partial objects do not serialize correctly in python2.x -- this fixes the bugs"""
self.save_reduce(_genpartial, (obj.func, obj.args, obj.keywords)) | [
"def",
"save_partial",
"(",
"self",
",",
"obj",
")",
":",
"self",
".",
"save_reduce",
"(",
"_genpartial",
",",
"(",
"obj",
".",
"func",
",",
"obj",
".",
"args",
",",
"obj",
".",
"keywords",
")",
")"
] | Partial objects do not serialize correctly in python2.x -- this fixes the bugs | [
"Partial",
"objects",
"do",
"not",
"serialize",
"correctly",
"in",
"python2",
".",
"x",
"--",
"this",
"fixes",
"the",
"bugs"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L839-L841 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | CloudPickler.save_file | def save_file(self, obj):
"""Save a file"""
try:
import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute
except ImportError:
import io as pystringIO
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
raise pickle.PicklingError("Cannot pickle files that do not map to an actual file")
if obj is sys.stdout:
return self.save_reduce(getattr, (sys,'stdout'), obj=obj)
if obj is sys.stderr:
return self.save_reduce(getattr, (sys,'stderr'), obj=obj)
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, 'isatty') and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if 'r' not in obj.mode and '+' not in obj.mode:
raise pickle.PicklingError("Cannot pickle files that are not opened for reading: %s" % obj.mode)
name = obj.name
retval = pystringIO.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval.write(contents)
retval.seek(curloc)
retval.name = name
self.save(retval)
self.memoize(obj) | python | def save_file(self, obj):
"""Save a file"""
try:
import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute
except ImportError:
import io as pystringIO
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
raise pickle.PicklingError("Cannot pickle files that do not map to an actual file")
if obj is sys.stdout:
return self.save_reduce(getattr, (sys,'stdout'), obj=obj)
if obj is sys.stderr:
return self.save_reduce(getattr, (sys,'stderr'), obj=obj)
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, 'isatty') and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if 'r' not in obj.mode and '+' not in obj.mode:
raise pickle.PicklingError("Cannot pickle files that are not opened for reading: %s" % obj.mode)
name = obj.name
retval = pystringIO.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval.write(contents)
retval.seek(curloc)
retval.name = name
self.save(retval)
self.memoize(obj) | [
"def",
"save_file",
"(",
"self",
",",
"obj",
")",
":",
"try",
":",
"import",
"StringIO",
"as",
"pystringIO",
"#we can't use cStringIO as it lacks the name attribute",
"except",
"ImportError",
":",
"import",
"io",
"as",
"pystringIO",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"'name'",
")",
"or",
"not",
"hasattr",
"(",
"obj",
",",
"'mode'",
")",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"Cannot pickle files that do not map to an actual file\"",
")",
"if",
"obj",
"is",
"sys",
".",
"stdout",
":",
"return",
"self",
".",
"save_reduce",
"(",
"getattr",
",",
"(",
"sys",
",",
"'stdout'",
")",
",",
"obj",
"=",
"obj",
")",
"if",
"obj",
"is",
"sys",
".",
"stderr",
":",
"return",
"self",
".",
"save_reduce",
"(",
"getattr",
",",
"(",
"sys",
",",
"'stderr'",
")",
",",
"obj",
"=",
"obj",
")",
"if",
"obj",
"is",
"sys",
".",
"stdin",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"Cannot pickle standard input\"",
")",
"if",
"obj",
".",
"closed",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"Cannot pickle closed files\"",
")",
"if",
"hasattr",
"(",
"obj",
",",
"'isatty'",
")",
"and",
"obj",
".",
"isatty",
"(",
")",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"Cannot pickle files that map to tty objects\"",
")",
"if",
"'r'",
"not",
"in",
"obj",
".",
"mode",
"and",
"'+'",
"not",
"in",
"obj",
".",
"mode",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"Cannot pickle files that are not opened for reading: %s\"",
"%",
"obj",
".",
"mode",
")",
"name",
"=",
"obj",
".",
"name",
"retval",
"=",
"pystringIO",
".",
"StringIO",
"(",
")",
"try",
":",
"# Read the whole file",
"curloc",
"=",
"obj",
".",
"tell",
"(",
")",
"obj",
".",
"seek",
"(",
"0",
")",
"contents",
"=",
"obj",
".",
"read",
"(",
")",
"obj",
".",
"seek",
"(",
"curloc",
")",
"except",
"IOError",
":",
"raise",
"pickle",
".",
"PicklingError",
"(",
"\"Cannot pickle file %s as it cannot be read\"",
"%",
"name",
")",
"retval",
".",
"write",
"(",
"contents",
")",
"retval",
".",
"seek",
"(",
"curloc",
")",
"retval",
".",
"name",
"=",
"name",
"self",
".",
"save",
"(",
"retval",
")",
"self",
".",
"memoize",
"(",
"obj",
")"
] | Save a file | [
"Save",
"a",
"file"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L847-L886 | train |
apple/turicreate | src/unity/python/turicreate/util/_cloudpickle.py | CloudPickler.save_ufunc | def save_ufunc(self, obj):
"""Hack function for saving numpy ufunc objects"""
name = obj.__name__
numpy_tst_mods = ['numpy', 'scipy.special']
for tst_mod_name in numpy_tst_mods:
tst_mod = sys.modules.get(tst_mod_name, None)
if tst_mod and name in tst_mod.__dict__:
return self.save_reduce(_getobject, (tst_mod_name, name))
raise pickle.PicklingError('cannot save %s. Cannot resolve what module it is defined in'
% str(obj)) | python | def save_ufunc(self, obj):
"""Hack function for saving numpy ufunc objects"""
name = obj.__name__
numpy_tst_mods = ['numpy', 'scipy.special']
for tst_mod_name in numpy_tst_mods:
tst_mod = sys.modules.get(tst_mod_name, None)
if tst_mod and name in tst_mod.__dict__:
return self.save_reduce(_getobject, (tst_mod_name, name))
raise pickle.PicklingError('cannot save %s. Cannot resolve what module it is defined in'
% str(obj)) | [
"def",
"save_ufunc",
"(",
"self",
",",
"obj",
")",
":",
"name",
"=",
"obj",
".",
"__name__",
"numpy_tst_mods",
"=",
"[",
"'numpy'",
",",
"'scipy.special'",
"]",
"for",
"tst_mod_name",
"in",
"numpy_tst_mods",
":",
"tst_mod",
"=",
"sys",
".",
"modules",
".",
"get",
"(",
"tst_mod_name",
",",
"None",
")",
"if",
"tst_mod",
"and",
"name",
"in",
"tst_mod",
".",
"__dict__",
":",
"return",
"self",
".",
"save_reduce",
"(",
"_getobject",
",",
"(",
"tst_mod_name",
",",
"name",
")",
")",
"raise",
"pickle",
".",
"PicklingError",
"(",
"'cannot save %s. Cannot resolve what module it is defined in'",
"%",
"str",
"(",
"obj",
")",
")"
] | Hack function for saving numpy ufunc objects | [
"Hack",
"function",
"for",
"saving",
"numpy",
"ufunc",
"objects"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_cloudpickle.py#L915-L924 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor_database.py | _ExtractSymbols | def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
"""
message_name = '.'.join((package, desc_proto.name))
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name)) | python | def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
"""
message_name = '.'.join((package, desc_proto.name))
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name)) | [
"def",
"_ExtractSymbols",
"(",
"desc_proto",
",",
"package",
")",
":",
"message_name",
"=",
"'.'",
".",
"join",
"(",
"(",
"package",
",",
"desc_proto",
".",
"name",
")",
")",
"yield",
"message_name",
"for",
"nested_type",
"in",
"desc_proto",
".",
"nested_type",
":",
"for",
"symbol",
"in",
"_ExtractSymbols",
"(",
"nested_type",
",",
"message_name",
")",
":",
"yield",
"symbol",
"for",
"enum_type",
"in",
"desc_proto",
".",
"enum_type",
":",
"yield",
"'.'",
".",
"join",
"(",
"(",
"message_name",
",",
"enum_type",
".",
"name",
")",
")"
] | Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor. | [
"Pulls",
"out",
"all",
"the",
"symbols",
"from",
"a",
"descriptor",
"proto",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor_database.py#L127-L144 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor_database.py | DescriptorDatabase.Add | def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
Raises:
DescriptorDatabaseConflictingDefinitionError: if an attempt is made to
add a proto with the same name but different definition than an
exisiting proto in the database.
"""
proto_name = file_desc_proto.name
if proto_name not in self._file_desc_protos_by_file:
self._file_desc_protos_by_file[proto_name] = file_desc_proto
elif self._file_desc_protos_by_file[proto_name] != file_desc_proto:
raise DescriptorDatabaseConflictingDefinitionError(
'%s already added, but with different descriptor.' % proto_name)
# Add all the top-level descriptors to the index.
package = file_desc_proto.package
for message in file_desc_proto.message_type:
self._file_desc_protos_by_symbol.update(
(name, file_desc_proto) for name in _ExtractSymbols(message, package))
for enum in file_desc_proto.enum_type:
self._file_desc_protos_by_symbol[
'.'.join((package, enum.name))] = file_desc_proto
for extension in file_desc_proto.extension:
self._file_desc_protos_by_symbol[
'.'.join((package, extension.name))] = file_desc_proto
for service in file_desc_proto.service:
self._file_desc_protos_by_symbol[
'.'.join((package, service.name))] = file_desc_proto | python | def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
Raises:
DescriptorDatabaseConflictingDefinitionError: if an attempt is made to
add a proto with the same name but different definition than an
exisiting proto in the database.
"""
proto_name = file_desc_proto.name
if proto_name not in self._file_desc_protos_by_file:
self._file_desc_protos_by_file[proto_name] = file_desc_proto
elif self._file_desc_protos_by_file[proto_name] != file_desc_proto:
raise DescriptorDatabaseConflictingDefinitionError(
'%s already added, but with different descriptor.' % proto_name)
# Add all the top-level descriptors to the index.
package = file_desc_proto.package
for message in file_desc_proto.message_type:
self._file_desc_protos_by_symbol.update(
(name, file_desc_proto) for name in _ExtractSymbols(message, package))
for enum in file_desc_proto.enum_type:
self._file_desc_protos_by_symbol[
'.'.join((package, enum.name))] = file_desc_proto
for extension in file_desc_proto.extension:
self._file_desc_protos_by_symbol[
'.'.join((package, extension.name))] = file_desc_proto
for service in file_desc_proto.service:
self._file_desc_protos_by_symbol[
'.'.join((package, service.name))] = file_desc_proto | [
"def",
"Add",
"(",
"self",
",",
"file_desc_proto",
")",
":",
"proto_name",
"=",
"file_desc_proto",
".",
"name",
"if",
"proto_name",
"not",
"in",
"self",
".",
"_file_desc_protos_by_file",
":",
"self",
".",
"_file_desc_protos_by_file",
"[",
"proto_name",
"]",
"=",
"file_desc_proto",
"elif",
"self",
".",
"_file_desc_protos_by_file",
"[",
"proto_name",
"]",
"!=",
"file_desc_proto",
":",
"raise",
"DescriptorDatabaseConflictingDefinitionError",
"(",
"'%s already added, but with different descriptor.'",
"%",
"proto_name",
")",
"# Add all the top-level descriptors to the index.",
"package",
"=",
"file_desc_proto",
".",
"package",
"for",
"message",
"in",
"file_desc_proto",
".",
"message_type",
":",
"self",
".",
"_file_desc_protos_by_symbol",
".",
"update",
"(",
"(",
"name",
",",
"file_desc_proto",
")",
"for",
"name",
"in",
"_ExtractSymbols",
"(",
"message",
",",
"package",
")",
")",
"for",
"enum",
"in",
"file_desc_proto",
".",
"enum_type",
":",
"self",
".",
"_file_desc_protos_by_symbol",
"[",
"'.'",
".",
"join",
"(",
"(",
"package",
",",
"enum",
".",
"name",
")",
")",
"]",
"=",
"file_desc_proto",
"for",
"extension",
"in",
"file_desc_proto",
".",
"extension",
":",
"self",
".",
"_file_desc_protos_by_symbol",
"[",
"'.'",
".",
"join",
"(",
"(",
"package",
",",
"extension",
".",
"name",
")",
")",
"]",
"=",
"file_desc_proto",
"for",
"service",
"in",
"file_desc_proto",
".",
"service",
":",
"self",
".",
"_file_desc_protos_by_symbol",
"[",
"'.'",
".",
"join",
"(",
"(",
"package",
",",
"service",
".",
"name",
")",
")",
"]",
"=",
"file_desc_proto"
] | Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
Raises:
DescriptorDatabaseConflictingDefinitionError: if an attempt is made to
add a proto with the same name but different definition than an
exisiting proto in the database. | [
"Adds",
"the",
"FileDescriptorProto",
"and",
"its",
"types",
"to",
"this",
"database",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor_database.py#L51-L81 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_normalizer.py | convert | def convert(model, input_features, output_features):
"""Convert a normalizer model to the protobuf spec.
Parameters
----------
model: Normalizer
A Normalizer.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
# Test the scikit-learn model
_sklearn_util.check_expected_type(model, Normalizer)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'norm'))
# Set the interface params.
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
spec = _set_transform_interface_params(spec, input_features, output_features)
# Set the one hot encoder parameters
_normalizer_spec = spec.normalizer
if model.norm == 'l1':
_normalizer_spec.normType = _proto__normalizer.L1
elif model.norm == 'l2':
_normalizer_spec.normType = _proto__normalizer.L2
elif model.norm == 'max':
_normalizer_spec.normType = _proto__normalizer.LMax
return _MLModel(spec) | python | def convert(model, input_features, output_features):
"""Convert a normalizer model to the protobuf spec.
Parameters
----------
model: Normalizer
A Normalizer.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
# Test the scikit-learn model
_sklearn_util.check_expected_type(model, Normalizer)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'norm'))
# Set the interface params.
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
spec = _set_transform_interface_params(spec, input_features, output_features)
# Set the one hot encoder parameters
_normalizer_spec = spec.normalizer
if model.norm == 'l1':
_normalizer_spec.normType = _proto__normalizer.L1
elif model.norm == 'l2':
_normalizer_spec.normType = _proto__normalizer.L2
elif model.norm == 'max':
_normalizer_spec.normType = _proto__normalizer.LMax
return _MLModel(spec) | [
"def",
"convert",
"(",
"model",
",",
"input_features",
",",
"output_features",
")",
":",
"if",
"not",
"(",
"_HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"# Test the scikit-learn model",
"_sklearn_util",
".",
"check_expected_type",
"(",
"model",
",",
"Normalizer",
")",
"_sklearn_util",
".",
"check_fitted",
"(",
"model",
",",
"lambda",
"m",
":",
"hasattr",
"(",
"m",
",",
"'norm'",
")",
")",
"# Set the interface params.",
"spec",
"=",
"_Model_pb2",
".",
"Model",
"(",
")",
"spec",
".",
"specificationVersion",
"=",
"SPECIFICATION_VERSION",
"spec",
"=",
"_set_transform_interface_params",
"(",
"spec",
",",
"input_features",
",",
"output_features",
")",
"# Set the one hot encoder parameters",
"_normalizer_spec",
"=",
"spec",
".",
"normalizer",
"if",
"model",
".",
"norm",
"==",
"'l1'",
":",
"_normalizer_spec",
".",
"normType",
"=",
"_proto__normalizer",
".",
"L1",
"elif",
"model",
".",
"norm",
"==",
"'l2'",
":",
"_normalizer_spec",
".",
"normType",
"=",
"_proto__normalizer",
".",
"L2",
"elif",
"model",
".",
"norm",
"==",
"'max'",
":",
"_normalizer_spec",
".",
"normType",
"=",
"_proto__normalizer",
".",
"LMax",
"return",
"_MLModel",
"(",
"spec",
")"
] | Convert a normalizer model to the protobuf spec.
Parameters
----------
model: Normalizer
A Normalizer.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"normalizer",
"model",
"to",
"the",
"protobuf",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_normalizer.py#L24-L64 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/tracker/rabit_mpi.py | mpi_submit | def mpi_submit(nslave, worker_args, worker_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nslave number of slave process to start up
args arguments to launch each job
this usually includes the parameters of master_uri and parameters passed into submit
"""
worker_args += ['%s=%s' % (k, str(v)) for k, v in worker_envs.items()]
sargs = ' '.join(args.command + worker_args)
if args.hostfile is None:
cmd = ' '.join(['mpirun -n %d' % (nslave)] + args.command + worker_args)
else:
cmd = ' '.join(['mpirun -n %d --hostfile %s' % (nslave, args.hostfile)] + args.command + worker_args)
print cmd
subprocess.check_call(cmd, shell = True) | python | def mpi_submit(nslave, worker_args, worker_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nslave number of slave process to start up
args arguments to launch each job
this usually includes the parameters of master_uri and parameters passed into submit
"""
worker_args += ['%s=%s' % (k, str(v)) for k, v in worker_envs.items()]
sargs = ' '.join(args.command + worker_args)
if args.hostfile is None:
cmd = ' '.join(['mpirun -n %d' % (nslave)] + args.command + worker_args)
else:
cmd = ' '.join(['mpirun -n %d --hostfile %s' % (nslave, args.hostfile)] + args.command + worker_args)
print cmd
subprocess.check_call(cmd, shell = True) | [
"def",
"mpi_submit",
"(",
"nslave",
",",
"worker_args",
",",
"worker_envs",
")",
":",
"worker_args",
"+=",
"[",
"'%s=%s'",
"%",
"(",
"k",
",",
"str",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"worker_envs",
".",
"items",
"(",
")",
"]",
"sargs",
"=",
"' '",
".",
"join",
"(",
"args",
".",
"command",
"+",
"worker_args",
")",
"if",
"args",
".",
"hostfile",
"is",
"None",
":",
"cmd",
"=",
"' '",
".",
"join",
"(",
"[",
"'mpirun -n %d'",
"%",
"(",
"nslave",
")",
"]",
"+",
"args",
".",
"command",
"+",
"worker_args",
")",
"else",
":",
"cmd",
"=",
"' '",
".",
"join",
"(",
"[",
"'mpirun -n %d --hostfile %s'",
"%",
"(",
"nslave",
",",
"args",
".",
"hostfile",
")",
"]",
"+",
"args",
".",
"command",
"+",
"worker_args",
")",
"print",
"cmd",
"subprocess",
".",
"check_call",
"(",
"cmd",
",",
"shell",
"=",
"True",
")"
] | customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nslave number of slave process to start up
args arguments to launch each job
this usually includes the parameters of master_uri and parameters passed into submit | [
"customized",
"submit",
"script",
"that",
"submit",
"nslave",
"jobs",
"each",
"must",
"contain",
"args",
"as",
"parameter",
"note",
"this",
"can",
"be",
"a",
"lambda",
"function",
"containing",
"additional",
"parameters",
"in",
"input",
"Parameters",
"nslave",
"number",
"of",
"slave",
"process",
"to",
"start",
"up",
"args",
"arguments",
"to",
"launch",
"each",
"job",
"this",
"usually",
"includes",
"the",
"parameters",
"of",
"master_uri",
"and",
"parameters",
"passed",
"into",
"submit"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/tracker/rabit_mpi.py#L24-L40 | train |
apple/turicreate | src/unity/python/turicreate/meta/bytecodetools/bytecode_consumer.py | ByteCodeConsumer.consume | def consume(self):
'''
Consume byte-code
'''
generic_consume = getattr(self, 'generic_consume', None)
for instr in disassembler(self.code):
method_name = 'consume_%s' % (instr.opname)
method = getattr(self, method_name, generic_consume)
if not method:
raise AttributeError("class %r has no method %r" % (type(self).__name__, method_name))
self.instruction_pre(instr)
method(instr)
self.instruction_post(instr) | python | def consume(self):
'''
Consume byte-code
'''
generic_consume = getattr(self, 'generic_consume', None)
for instr in disassembler(self.code):
method_name = 'consume_%s' % (instr.opname)
method = getattr(self, method_name, generic_consume)
if not method:
raise AttributeError("class %r has no method %r" % (type(self).__name__, method_name))
self.instruction_pre(instr)
method(instr)
self.instruction_post(instr) | [
"def",
"consume",
"(",
"self",
")",
":",
"generic_consume",
"=",
"getattr",
"(",
"self",
",",
"'generic_consume'",
",",
"None",
")",
"for",
"instr",
"in",
"disassembler",
"(",
"self",
".",
"code",
")",
":",
"method_name",
"=",
"'consume_%s'",
"%",
"(",
"instr",
".",
"opname",
")",
"method",
"=",
"getattr",
"(",
"self",
",",
"method_name",
",",
"generic_consume",
")",
"if",
"not",
"method",
":",
"raise",
"AttributeError",
"(",
"\"class %r has no method %r\"",
"%",
"(",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"method_name",
")",
")",
"self",
".",
"instruction_pre",
"(",
"instr",
")",
"method",
"(",
"instr",
")",
"self",
".",
"instruction_post",
"(",
"instr",
")"
] | Consume byte-code | [
"Consume",
"byte",
"-",
"code"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/bytecodetools/bytecode_consumer.py#L25-L39 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/random_forest_classifier.py | create | def create(dataset, target,
features=None,
max_iterations=10,
validation_set='auto',
verbose=True, class_weights=None,
random_seed=None,
metric='auto',
**kwargs):
"""
Create a (binary or multi-class) classifier model of type
:class:`~turicreate.random_forest_classifier.RandomForestClassifier` using
an ensemble of decision trees trained on subsets of the data.
Parameters
----------
dataset : SFrame
A training dataset containing feature columns and a target column.
target : str
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in alphabetical order of the variable values.
For example, a target variable with 'cat', 'dog', and 'foosa' as possible
values is mapped to 0, 1, and, 2 respectively.
features : list[str], optional
A list of columns names of features used for training the model.
Defaults to None, which uses all columns in the SFrame ``dataset``
excepting the target column..
max_iterations : int, optional
The maximum number of iterations to perform. For multi-class
classification with K classes, each iteration will create K-1 trees.
max_depth : float, optional
Maximum depth of a tree.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If set to `None`, all classes are supposed to have weight one. The
`auto` mode set the class weight to be inversely proportional to number of
examples in the training data with the given class.
min_loss_reduction : float, optional (non-negative)
Minimum loss reduction required to make a further partition on a
leaf node of the tree. The larger it is, the more conservative the
algorithm will be. Must be non-negative.
min_child_weight : float, optional (non-negative)
Controls the minimum weight of each leaf node. Larger values result in
more conservative tree learning and help prevent overfitting.
Formally, this is minimum sum of instance weights (hessians) in each
node. If the tree learning algorithm results in a leaf node with the
sum of instance weights less than `min_child_weight`, tree building
will terminate.
row_subsample : float, optional
Subsample the ratio of the training set in each iteration of tree
construction. This is called the bagging trick and can usually help
prevent overfitting. Setting this to a value of 0.5 results in the
model randomly sampling half of the examples (rows) to grow each tree.
column_subsample : float, optional
Subsample ratio of the columns in each iteration of tree
construction. Like row_subsample, this can also help prevent
model overfitting. Setting this to a value of 0.5 results in the
model randomly sampling half of the columns to grow each tree.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. This is computed once per full iteration. Large
differences in model accuracy between the training data and validation
data is indicative of overfitting. The default value is 'auto'.
verbose : boolean, optional
Print progress information during training (if set to true).
random_seed : int, optional
Seeds random operations such as column and row subsampling, such that
results are reproducible.
metric : str or list[str], optional
Performance metric(s) that are tracked during training. When specified,
the progress table will display the tracked metric(s) on training and
validation set.
Supported metrics are: {'accuracy', 'auc', 'log_loss'}
kwargs : dict, optional
Additional arguments for training the model.
- ``model_checkpoint_path`` : str, default None
If specified, checkpoint the model training to the given path every n iterations,
where n is specified by ``model_checkpoint_interval``.
For instance, if `model_checkpoint_interval` is 5, and `model_checkpoint_path` is
set to ``/tmp/model_tmp``, the checkpoints will be saved into
``/tmp/model_tmp/model_checkpoint_5``, ``/tmp/model_tmp/model_checkpoint_10``, ... etc.
Training can be resumed by setting ``resume_from_checkpoint`` to one of these checkpoints.
- ``model_checkpoint_interval`` : int, default 5
If model_check_point_path is specified,
save the model to the given path every n iterations.
- ``resume_from_checkpoint`` : str, default None
Continues training from a model checkpoint. The model must take
exact the same training data as the checkpointed model.
Returns
-------
out : RandomForestClassifier
A trained random forest model for classification tasks.
References
----------
- `Trevor Hastie's slides on Boosted Trees and Random Forest
<http://jessica2.msri.org/attachments/10778/10778-boost.pdf>`_
See Also
--------
RandomForestClassifier, turicreate.logistic_classifier.LogisticClassifier, turicreate.svm_classifier.SVMClassifier
Examples
--------
.. sourcecode:: python
>>> url = 'https://static.turi.com/datasets/xgboost/mushroom.csv'
>>> data = turicreate.SFrame.read_csv(url)
>>> train, test = data.random_split(0.8)
>>> model = turicreate.random_forest_classifier.create(train, target='label')
>>> predictions = model.classify(test)
>>> results = model.evaluate(test)
"""
if random_seed is not None:
kwargs['random_seed'] = random_seed
if 'model_checkpoint_path' in kwargs:
kwargs['model_checkpoint_path'] = _make_internal_url(kwargs['model_checkpoint_path'])
if 'resume_from_checkpoint' in kwargs:
kwargs['resume_from_checkpoint'] = _make_internal_url(kwargs['resume_from_checkpoint'])
if 'num_trees' in kwargs:
logger = _logging.getLogger(__name__)
logger.warning("The `num_trees` keyword argument is deprecated. Please "
"use the `max_iterations` argument instead. Any value provided "
"for `num_trees` will be used in place of `max_iterations`.")
max_iterations = kwargs['num_trees']
del kwargs['num_trees']
model = _sl.create(dataset = dataset,
target = target,
features = features,
model_name = 'random_forest_classifier',
max_iterations = max_iterations,
validation_set = validation_set,
class_weights = class_weights,
verbose = verbose,
metric = metric,
**kwargs)
return RandomForestClassifier(model.__proxy__) | python | def create(dataset, target,
features=None,
max_iterations=10,
validation_set='auto',
verbose=True, class_weights=None,
random_seed=None,
metric='auto',
**kwargs):
"""
Create a (binary or multi-class) classifier model of type
:class:`~turicreate.random_forest_classifier.RandomForestClassifier` using
an ensemble of decision trees trained on subsets of the data.
Parameters
----------
dataset : SFrame
A training dataset containing feature columns and a target column.
target : str
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in alphabetical order of the variable values.
For example, a target variable with 'cat', 'dog', and 'foosa' as possible
values is mapped to 0, 1, and, 2 respectively.
features : list[str], optional
A list of columns names of features used for training the model.
Defaults to None, which uses all columns in the SFrame ``dataset``
excepting the target column..
max_iterations : int, optional
The maximum number of iterations to perform. For multi-class
classification with K classes, each iteration will create K-1 trees.
max_depth : float, optional
Maximum depth of a tree.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If set to `None`, all classes are supposed to have weight one. The
`auto` mode set the class weight to be inversely proportional to number of
examples in the training data with the given class.
min_loss_reduction : float, optional (non-negative)
Minimum loss reduction required to make a further partition on a
leaf node of the tree. The larger it is, the more conservative the
algorithm will be. Must be non-negative.
min_child_weight : float, optional (non-negative)
Controls the minimum weight of each leaf node. Larger values result in
more conservative tree learning and help prevent overfitting.
Formally, this is minimum sum of instance weights (hessians) in each
node. If the tree learning algorithm results in a leaf node with the
sum of instance weights less than `min_child_weight`, tree building
will terminate.
row_subsample : float, optional
Subsample the ratio of the training set in each iteration of tree
construction. This is called the bagging trick and can usually help
prevent overfitting. Setting this to a value of 0.5 results in the
model randomly sampling half of the examples (rows) to grow each tree.
column_subsample : float, optional
Subsample ratio of the columns in each iteration of tree
construction. Like row_subsample, this can also help prevent
model overfitting. Setting this to a value of 0.5 results in the
model randomly sampling half of the columns to grow each tree.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. This is computed once per full iteration. Large
differences in model accuracy between the training data and validation
data is indicative of overfitting. The default value is 'auto'.
verbose : boolean, optional
Print progress information during training (if set to true).
random_seed : int, optional
Seeds random operations such as column and row subsampling, such that
results are reproducible.
metric : str or list[str], optional
Performance metric(s) that are tracked during training. When specified,
the progress table will display the tracked metric(s) on training and
validation set.
Supported metrics are: {'accuracy', 'auc', 'log_loss'}
kwargs : dict, optional
Additional arguments for training the model.
- ``model_checkpoint_path`` : str, default None
If specified, checkpoint the model training to the given path every n iterations,
where n is specified by ``model_checkpoint_interval``.
For instance, if `model_checkpoint_interval` is 5, and `model_checkpoint_path` is
set to ``/tmp/model_tmp``, the checkpoints will be saved into
``/tmp/model_tmp/model_checkpoint_5``, ``/tmp/model_tmp/model_checkpoint_10``, ... etc.
Training can be resumed by setting ``resume_from_checkpoint`` to one of these checkpoints.
- ``model_checkpoint_interval`` : int, default 5
If model_check_point_path is specified,
save the model to the given path every n iterations.
- ``resume_from_checkpoint`` : str, default None
Continues training from a model checkpoint. The model must take
exact the same training data as the checkpointed model.
Returns
-------
out : RandomForestClassifier
A trained random forest model for classification tasks.
References
----------
- `Trevor Hastie's slides on Boosted Trees and Random Forest
<http://jessica2.msri.org/attachments/10778/10778-boost.pdf>`_
See Also
--------
RandomForestClassifier, turicreate.logistic_classifier.LogisticClassifier, turicreate.svm_classifier.SVMClassifier
Examples
--------
.. sourcecode:: python
>>> url = 'https://static.turi.com/datasets/xgboost/mushroom.csv'
>>> data = turicreate.SFrame.read_csv(url)
>>> train, test = data.random_split(0.8)
>>> model = turicreate.random_forest_classifier.create(train, target='label')
>>> predictions = model.classify(test)
>>> results = model.evaluate(test)
"""
if random_seed is not None:
kwargs['random_seed'] = random_seed
if 'model_checkpoint_path' in kwargs:
kwargs['model_checkpoint_path'] = _make_internal_url(kwargs['model_checkpoint_path'])
if 'resume_from_checkpoint' in kwargs:
kwargs['resume_from_checkpoint'] = _make_internal_url(kwargs['resume_from_checkpoint'])
if 'num_trees' in kwargs:
logger = _logging.getLogger(__name__)
logger.warning("The `num_trees` keyword argument is deprecated. Please "
"use the `max_iterations` argument instead. Any value provided "
"for `num_trees` will be used in place of `max_iterations`.")
max_iterations = kwargs['num_trees']
del kwargs['num_trees']
model = _sl.create(dataset = dataset,
target = target,
features = features,
model_name = 'random_forest_classifier',
max_iterations = max_iterations,
validation_set = validation_set,
class_weights = class_weights,
verbose = verbose,
metric = metric,
**kwargs)
return RandomForestClassifier(model.__proxy__) | [
"def",
"create",
"(",
"dataset",
",",
"target",
",",
"features",
"=",
"None",
",",
"max_iterations",
"=",
"10",
",",
"validation_set",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
",",
"class_weights",
"=",
"None",
",",
"random_seed",
"=",
"None",
",",
"metric",
"=",
"'auto'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"random_seed",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'random_seed'",
"]",
"=",
"random_seed",
"if",
"'model_checkpoint_path'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'model_checkpoint_path'",
"]",
"=",
"_make_internal_url",
"(",
"kwargs",
"[",
"'model_checkpoint_path'",
"]",
")",
"if",
"'resume_from_checkpoint'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'resume_from_checkpoint'",
"]",
"=",
"_make_internal_url",
"(",
"kwargs",
"[",
"'resume_from_checkpoint'",
"]",
")",
"if",
"'num_trees'",
"in",
"kwargs",
":",
"logger",
"=",
"_logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"warning",
"(",
"\"The `num_trees` keyword argument is deprecated. Please \"",
"\"use the `max_iterations` argument instead. Any value provided \"",
"\"for `num_trees` will be used in place of `max_iterations`.\"",
")",
"max_iterations",
"=",
"kwargs",
"[",
"'num_trees'",
"]",
"del",
"kwargs",
"[",
"'num_trees'",
"]",
"model",
"=",
"_sl",
".",
"create",
"(",
"dataset",
"=",
"dataset",
",",
"target",
"=",
"target",
",",
"features",
"=",
"features",
",",
"model_name",
"=",
"'random_forest_classifier'",
",",
"max_iterations",
"=",
"max_iterations",
",",
"validation_set",
"=",
"validation_set",
",",
"class_weights",
"=",
"class_weights",
",",
"verbose",
"=",
"verbose",
",",
"metric",
"=",
"metric",
",",
"*",
"*",
"kwargs",
")",
"return",
"RandomForestClassifier",
"(",
"model",
".",
"__proxy__",
")"
] | Create a (binary or multi-class) classifier model of type
:class:`~turicreate.random_forest_classifier.RandomForestClassifier` using
an ensemble of decision trees trained on subsets of the data.
Parameters
----------
dataset : SFrame
A training dataset containing feature columns and a target column.
target : str
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in alphabetical order of the variable values.
For example, a target variable with 'cat', 'dog', and 'foosa' as possible
values is mapped to 0, 1, and, 2 respectively.
features : list[str], optional
A list of columns names of features used for training the model.
Defaults to None, which uses all columns in the SFrame ``dataset``
excepting the target column..
max_iterations : int, optional
The maximum number of iterations to perform. For multi-class
classification with K classes, each iteration will create K-1 trees.
max_depth : float, optional
Maximum depth of a tree.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If set to `None`, all classes are supposed to have weight one. The
`auto` mode set the class weight to be inversely proportional to number of
examples in the training data with the given class.
min_loss_reduction : float, optional (non-negative)
Minimum loss reduction required to make a further partition on a
leaf node of the tree. The larger it is, the more conservative the
algorithm will be. Must be non-negative.
min_child_weight : float, optional (non-negative)
Controls the minimum weight of each leaf node. Larger values result in
more conservative tree learning and help prevent overfitting.
Formally, this is minimum sum of instance weights (hessians) in each
node. If the tree learning algorithm results in a leaf node with the
sum of instance weights less than `min_child_weight`, tree building
will terminate.
row_subsample : float, optional
Subsample the ratio of the training set in each iteration of tree
construction. This is called the bagging trick and can usually help
prevent overfitting. Setting this to a value of 0.5 results in the
model randomly sampling half of the examples (rows) to grow each tree.
column_subsample : float, optional
Subsample ratio of the columns in each iteration of tree
construction. Like row_subsample, this can also help prevent
model overfitting. Setting this to a value of 0.5 results in the
model randomly sampling half of the columns to grow each tree.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. This is computed once per full iteration. Large
differences in model accuracy between the training data and validation
data is indicative of overfitting. The default value is 'auto'.
verbose : boolean, optional
Print progress information during training (if set to true).
random_seed : int, optional
Seeds random operations such as column and row subsampling, such that
results are reproducible.
metric : str or list[str], optional
Performance metric(s) that are tracked during training. When specified,
the progress table will display the tracked metric(s) on training and
validation set.
Supported metrics are: {'accuracy', 'auc', 'log_loss'}
kwargs : dict, optional
Additional arguments for training the model.
- ``model_checkpoint_path`` : str, default None
If specified, checkpoint the model training to the given path every n iterations,
where n is specified by ``model_checkpoint_interval``.
For instance, if `model_checkpoint_interval` is 5, and `model_checkpoint_path` is
set to ``/tmp/model_tmp``, the checkpoints will be saved into
``/tmp/model_tmp/model_checkpoint_5``, ``/tmp/model_tmp/model_checkpoint_10``, ... etc.
Training can be resumed by setting ``resume_from_checkpoint`` to one of these checkpoints.
- ``model_checkpoint_interval`` : int, default 5
If model_check_point_path is specified,
save the model to the given path every n iterations.
- ``resume_from_checkpoint`` : str, default None
Continues training from a model checkpoint. The model must take
exact the same training data as the checkpointed model.
Returns
-------
out : RandomForestClassifier
A trained random forest model for classification tasks.
References
----------
- `Trevor Hastie's slides on Boosted Trees and Random Forest
<http://jessica2.msri.org/attachments/10778/10778-boost.pdf>`_
See Also
--------
RandomForestClassifier, turicreate.logistic_classifier.LogisticClassifier, turicreate.svm_classifier.SVMClassifier
Examples
--------
.. sourcecode:: python
>>> url = 'https://static.turi.com/datasets/xgboost/mushroom.csv'
>>> data = turicreate.SFrame.read_csv(url)
>>> train, test = data.random_split(0.8)
>>> model = turicreate.random_forest_classifier.create(train, target='label')
>>> predictions = model.classify(test)
>>> results = model.evaluate(test) | [
"Create",
"a",
"(",
"binary",
"or",
"multi",
"-",
"class",
")",
"classifier",
"model",
"of",
"type",
":",
"class",
":",
"~turicreate",
".",
"random_forest_classifier",
".",
"RandomForestClassifier",
"using",
"an",
"ensemble",
"of",
"decision",
"trees",
"trained",
"on",
"subsets",
"of",
"the",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/random_forest_classifier.py#L443-L610 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/random_forest_classifier.py | RandomForestClassifier.classify | def classify(self, dataset, missing_value_action='auto'):
"""
Return a classification, for each example in the ``dataset``, using the
trained random forest model. The output SFrame contains predictions
as class labels (0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.random_forest_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
"""
return super(RandomForestClassifier, self).classify(dataset,
missing_value_action=missing_value_action) | python | def classify(self, dataset, missing_value_action='auto'):
"""
Return a classification, for each example in the ``dataset``, using the
trained random forest model. The output SFrame contains predictions
as class labels (0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.random_forest_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
"""
return super(RandomForestClassifier, self).classify(dataset,
missing_value_action=missing_value_action) | [
"def",
"classify",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"return",
"super",
"(",
"RandomForestClassifier",
",",
"self",
")",
".",
"classify",
"(",
"dataset",
",",
"missing_value_action",
"=",
"missing_value_action",
")"
] | Return a classification, for each example in the ``dataset``, using the
trained random forest model. The output SFrame contains predictions
as class labels (0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.random_forest_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data) | [
"Return",
"a",
"classification",
"for",
"each",
"example",
"in",
"the",
"dataset",
"using",
"the",
"trained",
"random",
"forest",
"model",
".",
"The",
"output",
"SFrame",
"contains",
"predictions",
"as",
"class",
"labels",
"(",
"0",
"or",
"1",
")",
"and",
"probabilities",
"associated",
"with",
"the",
"the",
"example",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/random_forest_classifier.py#L360-L406 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras_converter.py | _get_layer_converter_fn | def _get_layer_converter_fn(layer):
"""Get the right converter function for Keras
"""
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
return _KERAS_LAYER_REGISTRY[layer_type]
else:
raise TypeError("Keras layer of type %s is not supported." % type(layer)) | python | def _get_layer_converter_fn(layer):
"""Get the right converter function for Keras
"""
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
return _KERAS_LAYER_REGISTRY[layer_type]
else:
raise TypeError("Keras layer of type %s is not supported." % type(layer)) | [
"def",
"_get_layer_converter_fn",
"(",
"layer",
")",
":",
"layer_type",
"=",
"type",
"(",
"layer",
")",
"if",
"layer_type",
"in",
"_KERAS_LAYER_REGISTRY",
":",
"return",
"_KERAS_LAYER_REGISTRY",
"[",
"layer_type",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"Keras layer of type %s is not supported.\"",
"%",
"type",
"(",
"layer",
")",
")"
] | Get the right converter function for Keras | [
"Get",
"the",
"right",
"converter",
"function",
"for",
"Keras"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras_converter.py#L103-L110 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras_converter.py | convertToSpec | def convertToSpec(model,
input_names = None,
output_names = None,
image_input_names = None,
input_name_shape_dict = {},
is_bgr = False,
red_bias = 0.0,
green_bias = 0.0,
blue_bias = 0.0,
gray_bias = 0.0,
image_scale = 1.0,
class_labels = None,
predicted_feature_name = None,
model_precision = _MLMODEL_FULL_PRECISION,
predicted_probabilities_output = '',
add_custom_layers = False,
custom_conversion_functions = None,
custom_objects=None):
"""
Convert a Keras model to Core ML protobuf specification (.mlmodel).
Parameters
----------
model: Keras model object | str | (str, str)
A trained Keras neural network model which can be one of the following:
- a Keras model object
- a string with the path to a Keras model file (h5)
- a tuple of strings, where the first is the path to a Keras model
architecture (.json file), the second is the path to its weights
stored in h5 file.
input_names: [str] | str
Optional name(s) that can be given to the inputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the inputs of the Keras model. If not provided, the Keras inputs
are named to [input1, input2, ..., inputN] in the Core ML model. When
multiple inputs are present, the input feature names are in the same
order as the Keras inputs.
output_names: [str] | str
Optional name(s) that can be given to the outputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the outputs of the Keras model. If not provided, the Keras outputs
are named to [output1, output2, ..., outputN] in the Core ML model.
When multiple outputs are present, output feature names are in the same
order as the Keras inputs.
image_input_names: [str] | str
Input names to the Keras model (a subset of the input_names
parameter) that can be treated as images by Core ML. All other inputs
are treated as MultiArrays (N-D Arrays).
input_name_shape_dict: {str: [int]}
Optional Dictionary of input tensor names and their corresponding shapes expressed
as a list of ints
is_bgr: bool | dict()
Flag indicating the channel order the model internally uses to represent
color images. Set to True if the internal channel order is BGR,
otherwise it will be assumed RGB. This flag is applicable only if
image_input_names is specified. To specify a different value for each
image input, provide a dictionary with input names as keys.
Note that this flag is about the models internal channel order.
An input image can be passed to the model in any color pixel layout
containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag
determines how those pixel values get mapped to the internal multiarray
representation.
red_bias: float | dict()
Bias value to be added to the red channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the blue channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale). Defaults
to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: list[int or str] | str
Class labels (applies to classifiers only) that map the index of the
output of a neural network to labels in a classifier.
If the provided class_labels is a string, it is assumed to be a
filepath where classes are parsed as a list of newline separated
strings.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
model_precision: str
Precision at which model will be saved. Currently full precision (float) and half precision
(float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision).
predicted_probabilities_output: str
Name of the neural network output to be interpreted as the predicted
probabilities of the resulting classes. Typically the output of a
softmax function. Defaults to the first output blob.
add_custom_layers: bool
If True, then unknown Keras layer types will be added to the model as
'custom' layers, which must then be filled in as postprocessing.
custom_conversion_functions: {'str': (Layer -> CustomLayerParams)}
A dictionary with keys corresponding to names of custom layers and values
as functions taking a Keras custom layer and returning a parameter dictionary
and list of weights.
custom_objects: {'str': (function)}
Dictionary that includes a key, value pair of {'<function name>': <function>}
for custom objects such as custom loss in the Keras model.
Provide a string of the name of the custom function as a key.
Provide a function as a value.
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a Keras model
>>> model = Sequential()
>>> model.add(Dense(num_channels, input_dim = input_dim))
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.keras.convert(model)
# Saving the Core ML model to a file.
>>> coreml_model.save('my_model.mlmodel')
Converting a model with a single image input.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image')
Core ML also lets you add class labels to models to expose them as
classifiers.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names = 'image',
... image_input_names = 'image', class_labels = ['cat', 'dog', 'rat'])
Class labels for classifiers can also come from a file on disk.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image', class_labels = 'labels.txt')
Provide customized input and output names to the Keras inputs and outputs
while exposing them to Core ML.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... ['my_input_1', 'my_input_2'], output_names = ['my_output'])
"""
if model_precision not in _VALID_MLMODEL_PRECISION_TYPES:
raise RuntimeError('Model precision {} is not valid'.format(model_precision))
if _HAS_KERAS_TF:
spec = _convert(model=model,
input_names=input_names,
output_names=output_names,
image_input_names=image_input_names,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
class_labels=class_labels,
predicted_feature_name=predicted_feature_name,
predicted_probabilities_output=predicted_probabilities_output,
custom_objects=custom_objects)
elif _HAS_KERAS2_TF:
from . import _keras2_converter
spec = _keras2_converter._convert(model=model,
input_names=input_names,
output_names=output_names,
image_input_names=image_input_names,
input_name_shape_dict=input_name_shape_dict,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
class_labels=class_labels,
predicted_feature_name=predicted_feature_name,
predicted_probabilities_output=predicted_probabilities_output,
add_custom_layers=add_custom_layers,
custom_conversion_functions=custom_conversion_functions,
custom_objects=custom_objects)
else:
raise RuntimeError(
'Keras not found or unsupported version or backend found. keras conversion API is disabled.')
if model_precision == _MLMODEL_HALF_PRECISION and model is not None:
spec = convert_neural_network_spec_weights_to_fp16(spec)
return spec | python | def convertToSpec(model,
input_names = None,
output_names = None,
image_input_names = None,
input_name_shape_dict = {},
is_bgr = False,
red_bias = 0.0,
green_bias = 0.0,
blue_bias = 0.0,
gray_bias = 0.0,
image_scale = 1.0,
class_labels = None,
predicted_feature_name = None,
model_precision = _MLMODEL_FULL_PRECISION,
predicted_probabilities_output = '',
add_custom_layers = False,
custom_conversion_functions = None,
custom_objects=None):
"""
Convert a Keras model to Core ML protobuf specification (.mlmodel).
Parameters
----------
model: Keras model object | str | (str, str)
A trained Keras neural network model which can be one of the following:
- a Keras model object
- a string with the path to a Keras model file (h5)
- a tuple of strings, where the first is the path to a Keras model
architecture (.json file), the second is the path to its weights
stored in h5 file.
input_names: [str] | str
Optional name(s) that can be given to the inputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the inputs of the Keras model. If not provided, the Keras inputs
are named to [input1, input2, ..., inputN] in the Core ML model. When
multiple inputs are present, the input feature names are in the same
order as the Keras inputs.
output_names: [str] | str
Optional name(s) that can be given to the outputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the outputs of the Keras model. If not provided, the Keras outputs
are named to [output1, output2, ..., outputN] in the Core ML model.
When multiple outputs are present, output feature names are in the same
order as the Keras inputs.
image_input_names: [str] | str
Input names to the Keras model (a subset of the input_names
parameter) that can be treated as images by Core ML. All other inputs
are treated as MultiArrays (N-D Arrays).
input_name_shape_dict: {str: [int]}
Optional Dictionary of input tensor names and their corresponding shapes expressed
as a list of ints
is_bgr: bool | dict()
Flag indicating the channel order the model internally uses to represent
color images. Set to True if the internal channel order is BGR,
otherwise it will be assumed RGB. This flag is applicable only if
image_input_names is specified. To specify a different value for each
image input, provide a dictionary with input names as keys.
Note that this flag is about the models internal channel order.
An input image can be passed to the model in any color pixel layout
containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag
determines how those pixel values get mapped to the internal multiarray
representation.
red_bias: float | dict()
Bias value to be added to the red channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the blue channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale). Defaults
to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: list[int or str] | str
Class labels (applies to classifiers only) that map the index of the
output of a neural network to labels in a classifier.
If the provided class_labels is a string, it is assumed to be a
filepath where classes are parsed as a list of newline separated
strings.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
model_precision: str
Precision at which model will be saved. Currently full precision (float) and half precision
(float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision).
predicted_probabilities_output: str
Name of the neural network output to be interpreted as the predicted
probabilities of the resulting classes. Typically the output of a
softmax function. Defaults to the first output blob.
add_custom_layers: bool
If True, then unknown Keras layer types will be added to the model as
'custom' layers, which must then be filled in as postprocessing.
custom_conversion_functions: {'str': (Layer -> CustomLayerParams)}
A dictionary with keys corresponding to names of custom layers and values
as functions taking a Keras custom layer and returning a parameter dictionary
and list of weights.
custom_objects: {'str': (function)}
Dictionary that includes a key, value pair of {'<function name>': <function>}
for custom objects such as custom loss in the Keras model.
Provide a string of the name of the custom function as a key.
Provide a function as a value.
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a Keras model
>>> model = Sequential()
>>> model.add(Dense(num_channels, input_dim = input_dim))
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.keras.convert(model)
# Saving the Core ML model to a file.
>>> coreml_model.save('my_model.mlmodel')
Converting a model with a single image input.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image')
Core ML also lets you add class labels to models to expose them as
classifiers.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names = 'image',
... image_input_names = 'image', class_labels = ['cat', 'dog', 'rat'])
Class labels for classifiers can also come from a file on disk.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image', class_labels = 'labels.txt')
Provide customized input and output names to the Keras inputs and outputs
while exposing them to Core ML.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... ['my_input_1', 'my_input_2'], output_names = ['my_output'])
"""
if model_precision not in _VALID_MLMODEL_PRECISION_TYPES:
raise RuntimeError('Model precision {} is not valid'.format(model_precision))
if _HAS_KERAS_TF:
spec = _convert(model=model,
input_names=input_names,
output_names=output_names,
image_input_names=image_input_names,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
class_labels=class_labels,
predicted_feature_name=predicted_feature_name,
predicted_probabilities_output=predicted_probabilities_output,
custom_objects=custom_objects)
elif _HAS_KERAS2_TF:
from . import _keras2_converter
spec = _keras2_converter._convert(model=model,
input_names=input_names,
output_names=output_names,
image_input_names=image_input_names,
input_name_shape_dict=input_name_shape_dict,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
class_labels=class_labels,
predicted_feature_name=predicted_feature_name,
predicted_probabilities_output=predicted_probabilities_output,
add_custom_layers=add_custom_layers,
custom_conversion_functions=custom_conversion_functions,
custom_objects=custom_objects)
else:
raise RuntimeError(
'Keras not found or unsupported version or backend found. keras conversion API is disabled.')
if model_precision == _MLMODEL_HALF_PRECISION and model is not None:
spec = convert_neural_network_spec_weights_to_fp16(spec)
return spec | [
"def",
"convertToSpec",
"(",
"model",
",",
"input_names",
"=",
"None",
",",
"output_names",
"=",
"None",
",",
"image_input_names",
"=",
"None",
",",
"input_name_shape_dict",
"=",
"{",
"}",
",",
"is_bgr",
"=",
"False",
",",
"red_bias",
"=",
"0.0",
",",
"green_bias",
"=",
"0.0",
",",
"blue_bias",
"=",
"0.0",
",",
"gray_bias",
"=",
"0.0",
",",
"image_scale",
"=",
"1.0",
",",
"class_labels",
"=",
"None",
",",
"predicted_feature_name",
"=",
"None",
",",
"model_precision",
"=",
"_MLMODEL_FULL_PRECISION",
",",
"predicted_probabilities_output",
"=",
"''",
",",
"add_custom_layers",
"=",
"False",
",",
"custom_conversion_functions",
"=",
"None",
",",
"custom_objects",
"=",
"None",
")",
":",
"if",
"model_precision",
"not",
"in",
"_VALID_MLMODEL_PRECISION_TYPES",
":",
"raise",
"RuntimeError",
"(",
"'Model precision {} is not valid'",
".",
"format",
"(",
"model_precision",
")",
")",
"if",
"_HAS_KERAS_TF",
":",
"spec",
"=",
"_convert",
"(",
"model",
"=",
"model",
",",
"input_names",
"=",
"input_names",
",",
"output_names",
"=",
"output_names",
",",
"image_input_names",
"=",
"image_input_names",
",",
"is_bgr",
"=",
"is_bgr",
",",
"red_bias",
"=",
"red_bias",
",",
"green_bias",
"=",
"green_bias",
",",
"blue_bias",
"=",
"blue_bias",
",",
"gray_bias",
"=",
"gray_bias",
",",
"image_scale",
"=",
"image_scale",
",",
"class_labels",
"=",
"class_labels",
",",
"predicted_feature_name",
"=",
"predicted_feature_name",
",",
"predicted_probabilities_output",
"=",
"predicted_probabilities_output",
",",
"custom_objects",
"=",
"custom_objects",
")",
"elif",
"_HAS_KERAS2_TF",
":",
"from",
".",
"import",
"_keras2_converter",
"spec",
"=",
"_keras2_converter",
".",
"_convert",
"(",
"model",
"=",
"model",
",",
"input_names",
"=",
"input_names",
",",
"output_names",
"=",
"output_names",
",",
"image_input_names",
"=",
"image_input_names",
",",
"input_name_shape_dict",
"=",
"input_name_shape_dict",
",",
"is_bgr",
"=",
"is_bgr",
",",
"red_bias",
"=",
"red_bias",
",",
"green_bias",
"=",
"green_bias",
",",
"blue_bias",
"=",
"blue_bias",
",",
"gray_bias",
"=",
"gray_bias",
",",
"image_scale",
"=",
"image_scale",
",",
"class_labels",
"=",
"class_labels",
",",
"predicted_feature_name",
"=",
"predicted_feature_name",
",",
"predicted_probabilities_output",
"=",
"predicted_probabilities_output",
",",
"add_custom_layers",
"=",
"add_custom_layers",
",",
"custom_conversion_functions",
"=",
"custom_conversion_functions",
",",
"custom_objects",
"=",
"custom_objects",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Keras not found or unsupported version or backend found. keras conversion API is disabled.'",
")",
"if",
"model_precision",
"==",
"_MLMODEL_HALF_PRECISION",
"and",
"model",
"is",
"not",
"None",
":",
"spec",
"=",
"convert_neural_network_spec_weights_to_fp16",
"(",
"spec",
")",
"return",
"spec"
] | Convert a Keras model to Core ML protobuf specification (.mlmodel).
Parameters
----------
model: Keras model object | str | (str, str)
A trained Keras neural network model which can be one of the following:
- a Keras model object
- a string with the path to a Keras model file (h5)
- a tuple of strings, where the first is the path to a Keras model
architecture (.json file), the second is the path to its weights
stored in h5 file.
input_names: [str] | str
Optional name(s) that can be given to the inputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the inputs of the Keras model. If not provided, the Keras inputs
are named to [input1, input2, ..., inputN] in the Core ML model. When
multiple inputs are present, the input feature names are in the same
order as the Keras inputs.
output_names: [str] | str
Optional name(s) that can be given to the outputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the outputs of the Keras model. If not provided, the Keras outputs
are named to [output1, output2, ..., outputN] in the Core ML model.
When multiple outputs are present, output feature names are in the same
order as the Keras inputs.
image_input_names: [str] | str
Input names to the Keras model (a subset of the input_names
parameter) that can be treated as images by Core ML. All other inputs
are treated as MultiArrays (N-D Arrays).
input_name_shape_dict: {str: [int]}
Optional Dictionary of input tensor names and their corresponding shapes expressed
as a list of ints
is_bgr: bool | dict()
Flag indicating the channel order the model internally uses to represent
color images. Set to True if the internal channel order is BGR,
otherwise it will be assumed RGB. This flag is applicable only if
image_input_names is specified. To specify a different value for each
image input, provide a dictionary with input names as keys.
Note that this flag is about the models internal channel order.
An input image can be passed to the model in any color pixel layout
containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag
determines how those pixel values get mapped to the internal multiarray
representation.
red_bias: float | dict()
Bias value to be added to the red channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the blue channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale). Defaults
to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: list[int or str] | str
Class labels (applies to classifiers only) that map the index of the
output of a neural network to labels in a classifier.
If the provided class_labels is a string, it is assumed to be a
filepath where classes are parsed as a list of newline separated
strings.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
model_precision: str
Precision at which model will be saved. Currently full precision (float) and half precision
(float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision).
predicted_probabilities_output: str
Name of the neural network output to be interpreted as the predicted
probabilities of the resulting classes. Typically the output of a
softmax function. Defaults to the first output blob.
add_custom_layers: bool
If True, then unknown Keras layer types will be added to the model as
'custom' layers, which must then be filled in as postprocessing.
custom_conversion_functions: {'str': (Layer -> CustomLayerParams)}
A dictionary with keys corresponding to names of custom layers and values
as functions taking a Keras custom layer and returning a parameter dictionary
and list of weights.
custom_objects: {'str': (function)}
Dictionary that includes a key, value pair of {'<function name>': <function>}
for custom objects such as custom loss in the Keras model.
Provide a string of the name of the custom function as a key.
Provide a function as a value.
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a Keras model
>>> model = Sequential()
>>> model.add(Dense(num_channels, input_dim = input_dim))
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.keras.convert(model)
# Saving the Core ML model to a file.
>>> coreml_model.save('my_model.mlmodel')
Converting a model with a single image input.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image')
Core ML also lets you add class labels to models to expose them as
classifiers.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names = 'image',
... image_input_names = 'image', class_labels = ['cat', 'dog', 'rat'])
Class labels for classifiers can also come from a file on disk.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image', class_labels = 'labels.txt')
Provide customized input and output names to the Keras inputs and outputs
while exposing them to Core ML.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... ['my_input_1', 'my_input_2'], output_names = ['my_output']) | [
"Convert",
"a",
"Keras",
"model",
"to",
"Core",
"ML",
"protobuf",
"specification",
"(",
".",
"mlmodel",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras_converter.py#L333-L564 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras_converter.py | convert | def convert(model,
input_names = None,
output_names = None,
image_input_names = None,
input_name_shape_dict = {},
is_bgr = False,
red_bias = 0.0,
green_bias = 0.0,
blue_bias = 0.0,
gray_bias = 0.0,
image_scale = 1.0,
class_labels = None,
predicted_feature_name = None,
model_precision = _MLMODEL_FULL_PRECISION,
predicted_probabilities_output = '',
add_custom_layers = False,
custom_conversion_functions = None):
"""
Convert a Keras model to Core ML protobuf specification (.mlmodel).
Parameters
----------
model: Keras model object | str | (str, str)
A trained Keras neural network model which can be one of the following:
- a Keras model object
- a string with the path to a Keras model file (h5)
- a tuple of strings, where the first is the path to a Keras model
architecture (.json file), the second is the path to its weights
stored in h5 file.
input_names: [str] | str
Optional name(s) that can be given to the inputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the inputs of the Keras model. If not provided, the Keras inputs
are named to [input1, input2, ..., inputN] in the Core ML model. When
multiple inputs are present, the input feature names are in the same
order as the Keras inputs.
output_names: [str] | str
Optional name(s) that can be given to the outputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the outputs of the Keras model. If not provided, the Keras outputs
are named to [output1, output2, ..., outputN] in the Core ML model.
When multiple outputs are present, output feature names are in the same
order as the Keras inputs.
image_input_names: [str] | str
Input names to the Keras model (a subset of the input_names
parameter) that can be treated as images by Core ML. All other inputs
are treated as MultiArrays (N-D Arrays).
is_bgr: bool | dict()
Flag indicating the channel order the model internally uses to represent
color images. Set to True if the internal channel order is BGR,
otherwise it will be assumed RGB. This flag is applicable only if
image_input_names is specified. To specify a different value for each
image input, provide a dictionary with input names as keys.
Note that this flag is about the models internal channel order.
An input image can be passed to the model in any color pixel layout
containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag
determines how those pixel values get mapped to the internal multiarray
representation.
red_bias: float | dict()
Bias value to be added to the red channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the blue channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale). Defaults
to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: list[int or str] | str
Class labels (applies to classifiers only) that map the index of the
output of a neural network to labels in a classifier.
If the provided class_labels is a string, it is assumed to be a
filepath where classes are parsed as a list of newline separated
strings.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
model_precision: str
Precision at which model will be saved. Currently full precision (float) and half precision
(float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision).
predicted_probabilities_output: str
Name of the neural network output to be interpreted as the predicted
probabilities of the resulting classes. Typically the output of a
softmax function. Defaults to the first output blob.
add_custom_layers: bool
If yes, then unknown Keras layer types will be added to the model as
'custom' layers, which must then be filled in as postprocessing.
custom_conversion_functions: {str:(Layer -> (dict, [weights])) }
A dictionary with keys corresponding to names of custom layers and values
as functions taking a Keras custom layer and returning a parameter dictionary
and list of weights.
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a Keras model
>>> model = Sequential()
>>> model.add(Dense(num_channels, input_dim = input_dim))
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.keras.convert(model)
# Saving the Core ML model to a file.
>>> coreml_model.save('my_model.mlmodel')
Converting a model with a single image input.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image')
Core ML also lets you add class labels to models to expose them as
classifiers.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names = 'image',
... image_input_names = 'image', class_labels = ['cat', 'dog', 'rat'])
Class labels for classifiers can also come from a file on disk.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image', class_labels = 'labels.txt')
Provide customized input and output names to the Keras inputs and outputs
while exposing them to Core ML.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... ['my_input_1', 'my_input_2'], output_names = ['my_output'])
"""
spec = convertToSpec(model,
input_names,
output_names,
image_input_names,
input_name_shape_dict,
is_bgr,
red_bias,
green_bias,
blue_bias,
gray_bias,
image_scale,
class_labels,
predicted_feature_name,
model_precision,
predicted_probabilities_output,
add_custom_layers,
custom_conversion_functions=custom_conversion_functions)
return _MLModel(spec) | python | def convert(model,
input_names = None,
output_names = None,
image_input_names = None,
input_name_shape_dict = {},
is_bgr = False,
red_bias = 0.0,
green_bias = 0.0,
blue_bias = 0.0,
gray_bias = 0.0,
image_scale = 1.0,
class_labels = None,
predicted_feature_name = None,
model_precision = _MLMODEL_FULL_PRECISION,
predicted_probabilities_output = '',
add_custom_layers = False,
custom_conversion_functions = None):
"""
Convert a Keras model to Core ML protobuf specification (.mlmodel).
Parameters
----------
model: Keras model object | str | (str, str)
A trained Keras neural network model which can be one of the following:
- a Keras model object
- a string with the path to a Keras model file (h5)
- a tuple of strings, where the first is the path to a Keras model
architecture (.json file), the second is the path to its weights
stored in h5 file.
input_names: [str] | str
Optional name(s) that can be given to the inputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the inputs of the Keras model. If not provided, the Keras inputs
are named to [input1, input2, ..., inputN] in the Core ML model. When
multiple inputs are present, the input feature names are in the same
order as the Keras inputs.
output_names: [str] | str
Optional name(s) that can be given to the outputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the outputs of the Keras model. If not provided, the Keras outputs
are named to [output1, output2, ..., outputN] in the Core ML model.
When multiple outputs are present, output feature names are in the same
order as the Keras inputs.
image_input_names: [str] | str
Input names to the Keras model (a subset of the input_names
parameter) that can be treated as images by Core ML. All other inputs
are treated as MultiArrays (N-D Arrays).
is_bgr: bool | dict()
Flag indicating the channel order the model internally uses to represent
color images. Set to True if the internal channel order is BGR,
otherwise it will be assumed RGB. This flag is applicable only if
image_input_names is specified. To specify a different value for each
image input, provide a dictionary with input names as keys.
Note that this flag is about the models internal channel order.
An input image can be passed to the model in any color pixel layout
containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag
determines how those pixel values get mapped to the internal multiarray
representation.
red_bias: float | dict()
Bias value to be added to the red channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the blue channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale). Defaults
to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: list[int or str] | str
Class labels (applies to classifiers only) that map the index of the
output of a neural network to labels in a classifier.
If the provided class_labels is a string, it is assumed to be a
filepath where classes are parsed as a list of newline separated
strings.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
model_precision: str
Precision at which model will be saved. Currently full precision (float) and half precision
(float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision).
predicted_probabilities_output: str
Name of the neural network output to be interpreted as the predicted
probabilities of the resulting classes. Typically the output of a
softmax function. Defaults to the first output blob.
add_custom_layers: bool
If yes, then unknown Keras layer types will be added to the model as
'custom' layers, which must then be filled in as postprocessing.
custom_conversion_functions: {str:(Layer -> (dict, [weights])) }
A dictionary with keys corresponding to names of custom layers and values
as functions taking a Keras custom layer and returning a parameter dictionary
and list of weights.
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a Keras model
>>> model = Sequential()
>>> model.add(Dense(num_channels, input_dim = input_dim))
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.keras.convert(model)
# Saving the Core ML model to a file.
>>> coreml_model.save('my_model.mlmodel')
Converting a model with a single image input.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image')
Core ML also lets you add class labels to models to expose them as
classifiers.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names = 'image',
... image_input_names = 'image', class_labels = ['cat', 'dog', 'rat'])
Class labels for classifiers can also come from a file on disk.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image', class_labels = 'labels.txt')
Provide customized input and output names to the Keras inputs and outputs
while exposing them to Core ML.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... ['my_input_1', 'my_input_2'], output_names = ['my_output'])
"""
spec = convertToSpec(model,
input_names,
output_names,
image_input_names,
input_name_shape_dict,
is_bgr,
red_bias,
green_bias,
blue_bias,
gray_bias,
image_scale,
class_labels,
predicted_feature_name,
model_precision,
predicted_probabilities_output,
add_custom_layers,
custom_conversion_functions=custom_conversion_functions)
return _MLModel(spec) | [
"def",
"convert",
"(",
"model",
",",
"input_names",
"=",
"None",
",",
"output_names",
"=",
"None",
",",
"image_input_names",
"=",
"None",
",",
"input_name_shape_dict",
"=",
"{",
"}",
",",
"is_bgr",
"=",
"False",
",",
"red_bias",
"=",
"0.0",
",",
"green_bias",
"=",
"0.0",
",",
"blue_bias",
"=",
"0.0",
",",
"gray_bias",
"=",
"0.0",
",",
"image_scale",
"=",
"1.0",
",",
"class_labels",
"=",
"None",
",",
"predicted_feature_name",
"=",
"None",
",",
"model_precision",
"=",
"_MLMODEL_FULL_PRECISION",
",",
"predicted_probabilities_output",
"=",
"''",
",",
"add_custom_layers",
"=",
"False",
",",
"custom_conversion_functions",
"=",
"None",
")",
":",
"spec",
"=",
"convertToSpec",
"(",
"model",
",",
"input_names",
",",
"output_names",
",",
"image_input_names",
",",
"input_name_shape_dict",
",",
"is_bgr",
",",
"red_bias",
",",
"green_bias",
",",
"blue_bias",
",",
"gray_bias",
",",
"image_scale",
",",
"class_labels",
",",
"predicted_feature_name",
",",
"model_precision",
",",
"predicted_probabilities_output",
",",
"add_custom_layers",
",",
"custom_conversion_functions",
"=",
"custom_conversion_functions",
")",
"return",
"_MLModel",
"(",
"spec",
")"
] | Convert a Keras model to Core ML protobuf specification (.mlmodel).
Parameters
----------
model: Keras model object | str | (str, str)
A trained Keras neural network model which can be one of the following:
- a Keras model object
- a string with the path to a Keras model file (h5)
- a tuple of strings, where the first is the path to a Keras model
architecture (.json file), the second is the path to its weights
stored in h5 file.
input_names: [str] | str
Optional name(s) that can be given to the inputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the inputs of the Keras model. If not provided, the Keras inputs
are named to [input1, input2, ..., inputN] in the Core ML model. When
multiple inputs are present, the input feature names are in the same
order as the Keras inputs.
output_names: [str] | str
Optional name(s) that can be given to the outputs of the Keras model.
These names will be used in the interface of the Core ML models to refer
to the outputs of the Keras model. If not provided, the Keras outputs
are named to [output1, output2, ..., outputN] in the Core ML model.
When multiple outputs are present, output feature names are in the same
order as the Keras inputs.
image_input_names: [str] | str
Input names to the Keras model (a subset of the input_names
parameter) that can be treated as images by Core ML. All other inputs
are treated as MultiArrays (N-D Arrays).
is_bgr: bool | dict()
Flag indicating the channel order the model internally uses to represent
color images. Set to True if the internal channel order is BGR,
otherwise it will be assumed RGB. This flag is applicable only if
image_input_names is specified. To specify a different value for each
image input, provide a dictionary with input names as keys.
Note that this flag is about the models internal channel order.
An input image can be passed to the model in any color pixel layout
containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag
determines how those pixel values get mapped to the internal multiarray
representation.
red_bias: float | dict()
Bias value to be added to the red channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the blue channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale). Defaults
to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: list[int or str] | str
Class labels (applies to classifiers only) that map the index of the
output of a neural network to labels in a classifier.
If the provided class_labels is a string, it is assumed to be a
filepath where classes are parsed as a list of newline separated
strings.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
model_precision: str
Precision at which model will be saved. Currently full precision (float) and half precision
(float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision).
predicted_probabilities_output: str
Name of the neural network output to be interpreted as the predicted
probabilities of the resulting classes. Typically the output of a
softmax function. Defaults to the first output blob.
add_custom_layers: bool
If yes, then unknown Keras layer types will be added to the model as
'custom' layers, which must then be filled in as postprocessing.
custom_conversion_functions: {str:(Layer -> (dict, [weights])) }
A dictionary with keys corresponding to names of custom layers and values
as functions taking a Keras custom layer and returning a parameter dictionary
and list of weights.
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a Keras model
>>> model = Sequential()
>>> model.add(Dense(num_channels, input_dim = input_dim))
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.keras.convert(model)
# Saving the Core ML model to a file.
>>> coreml_model.save('my_model.mlmodel')
Converting a model with a single image input.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image')
Core ML also lets you add class labels to models to expose them as
classifiers.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names = 'image',
... image_input_names = 'image', class_labels = ['cat', 'dog', 'rat'])
Class labels for classifiers can also come from a file on disk.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... 'image', image_input_names = 'image', class_labels = 'labels.txt')
Provide customized input and output names to the Keras inputs and outputs
while exposing them to Core ML.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.keras.convert(model, input_names =
... ['my_input_1', 'my_input_2'], output_names = ['my_output']) | [
"Convert",
"a",
"Keras",
"model",
"to",
"Core",
"ML",
"protobuf",
"specification",
"(",
".",
"mlmodel",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras_converter.py#L567-L762 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_random_forest_regressor.py | convert | def convert(model, feature_names, target):
"""Convert a boosted tree model to protobuf format.
Parameters
----------
decision_tree : RandomForestRegressor
A trained scikit-learn tree model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
_sklearn_util.check_expected_type(model, _ensemble.RandomForestRegressor)
def is_rf_model(m):
if len(m.estimators_) == 0:
return False
if hasattr(m, 'estimators_') and m.estimators_ is not None:
for t in m.estimators_:
if not hasattr(t, 'tree_') or t.tree_ is None:
return False
return True
else:
return False
_sklearn_util.check_fitted(model, is_rf_model)
return _MLModel(_convert_tree_ensemble(model, feature_names, target)) | python | def convert(model, feature_names, target):
"""Convert a boosted tree model to protobuf format.
Parameters
----------
decision_tree : RandomForestRegressor
A trained scikit-learn tree model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
_sklearn_util.check_expected_type(model, _ensemble.RandomForestRegressor)
def is_rf_model(m):
if len(m.estimators_) == 0:
return False
if hasattr(m, 'estimators_') and m.estimators_ is not None:
for t in m.estimators_:
if not hasattr(t, 'tree_') or t.tree_ is None:
return False
return True
else:
return False
_sklearn_util.check_fitted(model, is_rf_model)
return _MLModel(_convert_tree_ensemble(model, feature_names, target)) | [
"def",
"convert",
"(",
"model",
",",
"feature_names",
",",
"target",
")",
":",
"if",
"not",
"(",
"_HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"_sklearn_util",
".",
"check_expected_type",
"(",
"model",
",",
"_ensemble",
".",
"RandomForestRegressor",
")",
"def",
"is_rf_model",
"(",
"m",
")",
":",
"if",
"len",
"(",
"m",
".",
"estimators_",
")",
"==",
"0",
":",
"return",
"False",
"if",
"hasattr",
"(",
"m",
",",
"'estimators_'",
")",
"and",
"m",
".",
"estimators_",
"is",
"not",
"None",
":",
"for",
"t",
"in",
"m",
".",
"estimators_",
":",
"if",
"not",
"hasattr",
"(",
"t",
",",
"'tree_'",
")",
"or",
"t",
".",
"tree_",
"is",
"None",
":",
"return",
"False",
"return",
"True",
"else",
":",
"return",
"False",
"_sklearn_util",
".",
"check_fitted",
"(",
"model",
",",
"is_rf_model",
")",
"return",
"_MLModel",
"(",
"_convert_tree_ensemble",
"(",
"model",
",",
"feature_names",
",",
"target",
")",
")"
] | Convert a boosted tree model to protobuf format.
Parameters
----------
decision_tree : RandomForestRegressor
A trained scikit-learn tree model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"boosted",
"tree",
"model",
"to",
"protobuf",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_random_forest_regressor.py#L19-L53 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/clustering/dbscan.py | create | def create(dataset, features=None, distance=None, radius=1.,
min_core_neighbors=10, verbose=True):
"""
Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the GL Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns]
"""
## Start the training time clock and instantiate an empty model
logger = _logging.getLogger(__name__)
start_time = _time.time()
## Validate the input dataset
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Validate neighborhood parameters
if not isinstance(min_core_neighbors, int) or min_core_neighbors < 0:
raise ValueError("Input 'min_core_neighbors' must be a non-negative " +
"integer.")
if not isinstance(radius, (int, float)) or radius < 0:
raise ValueError("Input 'radius' must be a non-negative integer " +
"or float.")
## Compute all-point nearest neighbors within `radius` and count
# neighborhood sizes
knn_model = _tc.nearest_neighbors.create(dataset, features=features,
distance=distance,
method='brute_force',
verbose=verbose)
knn = knn_model.similarity_graph(k=None, radius=radius,
include_self_edges=False,
output_type='SFrame',
verbose=verbose)
neighbor_counts = knn.groupby('query_label', _agg.COUNT)
### NOTE: points with NO neighbors are already dropped here!
## Identify core points and boundary candidate points. Not all of the
# boundary candidates will be boundary points - some are in small isolated
# clusters.
if verbose:
logger.info("Identifying noise points and core points.")
boundary_mask = neighbor_counts['Count'] < min_core_neighbors
core_mask = 1 - boundary_mask
# this includes too small clusters
boundary_idx = neighbor_counts[boundary_mask]['query_label']
core_idx = neighbor_counts[core_mask]['query_label']
## Build a similarity graph on the core points
## NOTE: careful with singleton core points - the second filter removes them
# from the edge set so they have to be added separately as vertices.
if verbose:
logger.info("Constructing the core point similarity graph.")
core_vertices = knn.filter_by(core_idx, 'query_label')
core_edges = core_vertices.filter_by(core_idx, 'reference_label')
core_graph = _tc.SGraph()
core_graph = core_graph.add_vertices(core_vertices[['query_label']],
vid_field='query_label')
core_graph = core_graph.add_edges(core_edges, src_field='query_label',
dst_field='reference_label')
## Compute core point connected components and relabel to be consecutive
# integers
cc = _tc.connected_components.create(core_graph, verbose=verbose)
cc_labels = cc.component_size.add_row_number('__label')
core_assignments = cc.component_id.join(cc_labels, on='component_id',
how='left')[['__id', '__label']]
core_assignments['type'] = 'core'
## Join potential boundary points to core cluster labels (points that aren't
# really on a boundary are implicitly dropped)
if verbose:
logger.info("Processing boundary points.")
boundary_edges = knn.filter_by(boundary_idx, 'query_label')
# separate real boundary points from points in small isolated clusters
boundary_core_edges = boundary_edges.filter_by(core_idx, 'reference_label')
# join a boundary point to its single closest core point.
boundary_assignments = boundary_core_edges.groupby('query_label',
{'reference_label': _agg.ARGMIN('rank', 'reference_label')})
boundary_assignments = boundary_assignments.join(core_assignments,
on={'reference_label': '__id'})
boundary_assignments = boundary_assignments.rename({'query_label': '__id'}, inplace=True)
boundary_assignments = boundary_assignments.remove_column('reference_label', inplace=True)
boundary_assignments['type'] = 'boundary'
## Identify boundary candidates that turned out to be in small clusters but
# not on real cluster boundaries
small_cluster_idx = set(boundary_idx).difference(
boundary_assignments['__id'])
## Identify individual noise points by the fact that they have no neighbors.
noise_idx = set(range(dataset.num_rows())).difference(
neighbor_counts['query_label'])
noise_idx = noise_idx.union(small_cluster_idx)
noise_assignments = _tc.SFrame({'row_id': _tc.SArray(list(noise_idx), int)})
noise_assignments['cluster_id'] = None
noise_assignments['cluster_id'] = noise_assignments['cluster_id'].astype(int)
noise_assignments['type'] = 'noise'
## Append core, boundary, and noise results to each other.
master_assignments = _tc.SFrame()
num_clusters = 0
if core_assignments.num_rows() > 0:
core_assignments = core_assignments.rename({'__id': 'row_id',
'__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(core_assignments)
num_clusters = len(core_assignments['cluster_id'].unique())
if boundary_assignments.num_rows() > 0:
boundary_assignments = boundary_assignments.rename({'__id': 'row_id',
'__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(boundary_assignments)
if noise_assignments.num_rows() > 0:
master_assignments = master_assignments.append(noise_assignments)
## Post-processing and formatting
state = {'verbose': verbose,
'radius': radius,
'min_core_neighbors': min_core_neighbors,
'distance': knn_model.distance,
'num_distance_components': knn_model.num_distance_components,
'num_examples': dataset.num_rows(),
'features': knn_model.features,
'num_features': knn_model.num_features,
'unpacked_features': knn_model.unpacked_features,
'num_unpacked_features': knn_model.num_unpacked_features,
'cluster_id': master_assignments,
'num_clusters': num_clusters,
'training_time': _time.time() - start_time}
return DBSCANModel(state) | python | def create(dataset, features=None, distance=None, radius=1.,
min_core_neighbors=10, verbose=True):
"""
Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the GL Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns]
"""
## Start the training time clock and instantiate an empty model
logger = _logging.getLogger(__name__)
start_time = _time.time()
## Validate the input dataset
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Validate neighborhood parameters
if not isinstance(min_core_neighbors, int) or min_core_neighbors < 0:
raise ValueError("Input 'min_core_neighbors' must be a non-negative " +
"integer.")
if not isinstance(radius, (int, float)) or radius < 0:
raise ValueError("Input 'radius' must be a non-negative integer " +
"or float.")
## Compute all-point nearest neighbors within `radius` and count
# neighborhood sizes
knn_model = _tc.nearest_neighbors.create(dataset, features=features,
distance=distance,
method='brute_force',
verbose=verbose)
knn = knn_model.similarity_graph(k=None, radius=radius,
include_self_edges=False,
output_type='SFrame',
verbose=verbose)
neighbor_counts = knn.groupby('query_label', _agg.COUNT)
### NOTE: points with NO neighbors are already dropped here!
## Identify core points and boundary candidate points. Not all of the
# boundary candidates will be boundary points - some are in small isolated
# clusters.
if verbose:
logger.info("Identifying noise points and core points.")
boundary_mask = neighbor_counts['Count'] < min_core_neighbors
core_mask = 1 - boundary_mask
# this includes too small clusters
boundary_idx = neighbor_counts[boundary_mask]['query_label']
core_idx = neighbor_counts[core_mask]['query_label']
## Build a similarity graph on the core points
## NOTE: careful with singleton core points - the second filter removes them
# from the edge set so they have to be added separately as vertices.
if verbose:
logger.info("Constructing the core point similarity graph.")
core_vertices = knn.filter_by(core_idx, 'query_label')
core_edges = core_vertices.filter_by(core_idx, 'reference_label')
core_graph = _tc.SGraph()
core_graph = core_graph.add_vertices(core_vertices[['query_label']],
vid_field='query_label')
core_graph = core_graph.add_edges(core_edges, src_field='query_label',
dst_field='reference_label')
## Compute core point connected components and relabel to be consecutive
# integers
cc = _tc.connected_components.create(core_graph, verbose=verbose)
cc_labels = cc.component_size.add_row_number('__label')
core_assignments = cc.component_id.join(cc_labels, on='component_id',
how='left')[['__id', '__label']]
core_assignments['type'] = 'core'
## Join potential boundary points to core cluster labels (points that aren't
# really on a boundary are implicitly dropped)
if verbose:
logger.info("Processing boundary points.")
boundary_edges = knn.filter_by(boundary_idx, 'query_label')
# separate real boundary points from points in small isolated clusters
boundary_core_edges = boundary_edges.filter_by(core_idx, 'reference_label')
# join a boundary point to its single closest core point.
boundary_assignments = boundary_core_edges.groupby('query_label',
{'reference_label': _agg.ARGMIN('rank', 'reference_label')})
boundary_assignments = boundary_assignments.join(core_assignments,
on={'reference_label': '__id'})
boundary_assignments = boundary_assignments.rename({'query_label': '__id'}, inplace=True)
boundary_assignments = boundary_assignments.remove_column('reference_label', inplace=True)
boundary_assignments['type'] = 'boundary'
## Identify boundary candidates that turned out to be in small clusters but
# not on real cluster boundaries
small_cluster_idx = set(boundary_idx).difference(
boundary_assignments['__id'])
## Identify individual noise points by the fact that they have no neighbors.
noise_idx = set(range(dataset.num_rows())).difference(
neighbor_counts['query_label'])
noise_idx = noise_idx.union(small_cluster_idx)
noise_assignments = _tc.SFrame({'row_id': _tc.SArray(list(noise_idx), int)})
noise_assignments['cluster_id'] = None
noise_assignments['cluster_id'] = noise_assignments['cluster_id'].astype(int)
noise_assignments['type'] = 'noise'
## Append core, boundary, and noise results to each other.
master_assignments = _tc.SFrame()
num_clusters = 0
if core_assignments.num_rows() > 0:
core_assignments = core_assignments.rename({'__id': 'row_id',
'__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(core_assignments)
num_clusters = len(core_assignments['cluster_id'].unique())
if boundary_assignments.num_rows() > 0:
boundary_assignments = boundary_assignments.rename({'__id': 'row_id',
'__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(boundary_assignments)
if noise_assignments.num_rows() > 0:
master_assignments = master_assignments.append(noise_assignments)
## Post-processing and formatting
state = {'verbose': verbose,
'radius': radius,
'min_core_neighbors': min_core_neighbors,
'distance': knn_model.distance,
'num_distance_components': knn_model.num_distance_components,
'num_examples': dataset.num_rows(),
'features': knn_model.features,
'num_features': knn_model.num_features,
'unpacked_features': knn_model.unpacked_features,
'num_unpacked_features': knn_model.num_unpacked_features,
'cluster_id': master_assignments,
'num_clusters': num_clusters,
'training_time': _time.time() - start_time}
return DBSCANModel(state) | [
"def",
"create",
"(",
"dataset",
",",
"features",
"=",
"None",
",",
"distance",
"=",
"None",
",",
"radius",
"=",
"1.",
",",
"min_core_neighbors",
"=",
"10",
",",
"verbose",
"=",
"True",
")",
":",
"## Start the training time clock and instantiate an empty model",
"logger",
"=",
"_logging",
".",
"getLogger",
"(",
"__name__",
")",
"start_time",
"=",
"_time",
".",
"time",
"(",
")",
"## Validate the input dataset",
"_tkutl",
".",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"_tkutl",
".",
"_raise_error_if_sframe_empty",
"(",
"dataset",
",",
"\"dataset\"",
")",
"## Validate neighborhood parameters",
"if",
"not",
"isinstance",
"(",
"min_core_neighbors",
",",
"int",
")",
"or",
"min_core_neighbors",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Input 'min_core_neighbors' must be a non-negative \"",
"+",
"\"integer.\"",
")",
"if",
"not",
"isinstance",
"(",
"radius",
",",
"(",
"int",
",",
"float",
")",
")",
"or",
"radius",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Input 'radius' must be a non-negative integer \"",
"+",
"\"or float.\"",
")",
"## Compute all-point nearest neighbors within `radius` and count",
"# neighborhood sizes",
"knn_model",
"=",
"_tc",
".",
"nearest_neighbors",
".",
"create",
"(",
"dataset",
",",
"features",
"=",
"features",
",",
"distance",
"=",
"distance",
",",
"method",
"=",
"'brute_force'",
",",
"verbose",
"=",
"verbose",
")",
"knn",
"=",
"knn_model",
".",
"similarity_graph",
"(",
"k",
"=",
"None",
",",
"radius",
"=",
"radius",
",",
"include_self_edges",
"=",
"False",
",",
"output_type",
"=",
"'SFrame'",
",",
"verbose",
"=",
"verbose",
")",
"neighbor_counts",
"=",
"knn",
".",
"groupby",
"(",
"'query_label'",
",",
"_agg",
".",
"COUNT",
")",
"### NOTE: points with NO neighbors are already dropped here!",
"## Identify core points and boundary candidate points. Not all of the",
"# boundary candidates will be boundary points - some are in small isolated",
"# clusters.",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Identifying noise points and core points.\"",
")",
"boundary_mask",
"=",
"neighbor_counts",
"[",
"'Count'",
"]",
"<",
"min_core_neighbors",
"core_mask",
"=",
"1",
"-",
"boundary_mask",
"# this includes too small clusters",
"boundary_idx",
"=",
"neighbor_counts",
"[",
"boundary_mask",
"]",
"[",
"'query_label'",
"]",
"core_idx",
"=",
"neighbor_counts",
"[",
"core_mask",
"]",
"[",
"'query_label'",
"]",
"## Build a similarity graph on the core points",
"## NOTE: careful with singleton core points - the second filter removes them",
"# from the edge set so they have to be added separately as vertices.",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Constructing the core point similarity graph.\"",
")",
"core_vertices",
"=",
"knn",
".",
"filter_by",
"(",
"core_idx",
",",
"'query_label'",
")",
"core_edges",
"=",
"core_vertices",
".",
"filter_by",
"(",
"core_idx",
",",
"'reference_label'",
")",
"core_graph",
"=",
"_tc",
".",
"SGraph",
"(",
")",
"core_graph",
"=",
"core_graph",
".",
"add_vertices",
"(",
"core_vertices",
"[",
"[",
"'query_label'",
"]",
"]",
",",
"vid_field",
"=",
"'query_label'",
")",
"core_graph",
"=",
"core_graph",
".",
"add_edges",
"(",
"core_edges",
",",
"src_field",
"=",
"'query_label'",
",",
"dst_field",
"=",
"'reference_label'",
")",
"## Compute core point connected components and relabel to be consecutive",
"# integers",
"cc",
"=",
"_tc",
".",
"connected_components",
".",
"create",
"(",
"core_graph",
",",
"verbose",
"=",
"verbose",
")",
"cc_labels",
"=",
"cc",
".",
"component_size",
".",
"add_row_number",
"(",
"'__label'",
")",
"core_assignments",
"=",
"cc",
".",
"component_id",
".",
"join",
"(",
"cc_labels",
",",
"on",
"=",
"'component_id'",
",",
"how",
"=",
"'left'",
")",
"[",
"[",
"'__id'",
",",
"'__label'",
"]",
"]",
"core_assignments",
"[",
"'type'",
"]",
"=",
"'core'",
"## Join potential boundary points to core cluster labels (points that aren't",
"# really on a boundary are implicitly dropped)",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Processing boundary points.\"",
")",
"boundary_edges",
"=",
"knn",
".",
"filter_by",
"(",
"boundary_idx",
",",
"'query_label'",
")",
"# separate real boundary points from points in small isolated clusters",
"boundary_core_edges",
"=",
"boundary_edges",
".",
"filter_by",
"(",
"core_idx",
",",
"'reference_label'",
")",
"# join a boundary point to its single closest core point.",
"boundary_assignments",
"=",
"boundary_core_edges",
".",
"groupby",
"(",
"'query_label'",
",",
"{",
"'reference_label'",
":",
"_agg",
".",
"ARGMIN",
"(",
"'rank'",
",",
"'reference_label'",
")",
"}",
")",
"boundary_assignments",
"=",
"boundary_assignments",
".",
"join",
"(",
"core_assignments",
",",
"on",
"=",
"{",
"'reference_label'",
":",
"'__id'",
"}",
")",
"boundary_assignments",
"=",
"boundary_assignments",
".",
"rename",
"(",
"{",
"'query_label'",
":",
"'__id'",
"}",
",",
"inplace",
"=",
"True",
")",
"boundary_assignments",
"=",
"boundary_assignments",
".",
"remove_column",
"(",
"'reference_label'",
",",
"inplace",
"=",
"True",
")",
"boundary_assignments",
"[",
"'type'",
"]",
"=",
"'boundary'",
"## Identify boundary candidates that turned out to be in small clusters but",
"# not on real cluster boundaries",
"small_cluster_idx",
"=",
"set",
"(",
"boundary_idx",
")",
".",
"difference",
"(",
"boundary_assignments",
"[",
"'__id'",
"]",
")",
"## Identify individual noise points by the fact that they have no neighbors.",
"noise_idx",
"=",
"set",
"(",
"range",
"(",
"dataset",
".",
"num_rows",
"(",
")",
")",
")",
".",
"difference",
"(",
"neighbor_counts",
"[",
"'query_label'",
"]",
")",
"noise_idx",
"=",
"noise_idx",
".",
"union",
"(",
"small_cluster_idx",
")",
"noise_assignments",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"'row_id'",
":",
"_tc",
".",
"SArray",
"(",
"list",
"(",
"noise_idx",
")",
",",
"int",
")",
"}",
")",
"noise_assignments",
"[",
"'cluster_id'",
"]",
"=",
"None",
"noise_assignments",
"[",
"'cluster_id'",
"]",
"=",
"noise_assignments",
"[",
"'cluster_id'",
"]",
".",
"astype",
"(",
"int",
")",
"noise_assignments",
"[",
"'type'",
"]",
"=",
"'noise'",
"## Append core, boundary, and noise results to each other.",
"master_assignments",
"=",
"_tc",
".",
"SFrame",
"(",
")",
"num_clusters",
"=",
"0",
"if",
"core_assignments",
".",
"num_rows",
"(",
")",
">",
"0",
":",
"core_assignments",
"=",
"core_assignments",
".",
"rename",
"(",
"{",
"'__id'",
":",
"'row_id'",
",",
"'__label'",
":",
"'cluster_id'",
"}",
",",
"inplace",
"=",
"True",
")",
"master_assignments",
"=",
"master_assignments",
".",
"append",
"(",
"core_assignments",
")",
"num_clusters",
"=",
"len",
"(",
"core_assignments",
"[",
"'cluster_id'",
"]",
".",
"unique",
"(",
")",
")",
"if",
"boundary_assignments",
".",
"num_rows",
"(",
")",
">",
"0",
":",
"boundary_assignments",
"=",
"boundary_assignments",
".",
"rename",
"(",
"{",
"'__id'",
":",
"'row_id'",
",",
"'__label'",
":",
"'cluster_id'",
"}",
",",
"inplace",
"=",
"True",
")",
"master_assignments",
"=",
"master_assignments",
".",
"append",
"(",
"boundary_assignments",
")",
"if",
"noise_assignments",
".",
"num_rows",
"(",
")",
">",
"0",
":",
"master_assignments",
"=",
"master_assignments",
".",
"append",
"(",
"noise_assignments",
")",
"## Post-processing and formatting",
"state",
"=",
"{",
"'verbose'",
":",
"verbose",
",",
"'radius'",
":",
"radius",
",",
"'min_core_neighbors'",
":",
"min_core_neighbors",
",",
"'distance'",
":",
"knn_model",
".",
"distance",
",",
"'num_distance_components'",
":",
"knn_model",
".",
"num_distance_components",
",",
"'num_examples'",
":",
"dataset",
".",
"num_rows",
"(",
")",
",",
"'features'",
":",
"knn_model",
".",
"features",
",",
"'num_features'",
":",
"knn_model",
".",
"num_features",
",",
"'unpacked_features'",
":",
"knn_model",
".",
"unpacked_features",
",",
"'num_unpacked_features'",
":",
"knn_model",
".",
"num_unpacked_features",
",",
"'cluster_id'",
":",
"master_assignments",
",",
"'num_clusters'",
":",
"num_clusters",
",",
"'training_time'",
":",
"_time",
".",
"time",
"(",
")",
"-",
"start_time",
"}",
"return",
"DBSCANModel",
"(",
"state",
")"
] | Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the GL Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns] | [
"Create",
"a",
"DBSCAN",
"clustering",
"model",
".",
"The",
"DBSCAN",
"method",
"partitions",
"the",
"input",
"dataset",
"into",
"three",
"types",
"of",
"points",
"based",
"on",
"the",
"estimated",
"probability",
"density",
"at",
"each",
"point",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/clustering/dbscan.py#L24-L322 | train |
apple/turicreate | src/external/xgboost/python-package/xgboost/libpath.py | find_lib_path | def find_lib_path():
"""Load find the path to xgboost dynamic library files.
Returns
-------
lib_path: list(string)
List of all found library path to xgboost
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# make pythonpack hack: copy this directory one level upper for setup.py
dll_path = [curr_path, os.path.join(curr_path, '../../wrapper/'),
os.path.join(curr_path, './wrapper/')]
if os.name == 'nt':
if platform.architecture()[0] == '64bit':
dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))
# hack for pip installation when copy all parent source directory here
dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))
else:
dll_path.append(os.path.join(curr_path, '../../windows/Release/'))
# hack for pip installation when copy all parent source directory here
dll_path.append(os.path.join(curr_path, './windows/Release/'))
if os.name == 'nt':
dll_path = [os.path.join(p, 'xgboost_wrapper.dll') for p in dll_path]
else:
dll_path = [os.path.join(p, 'libxgboostwrapper.so') for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
#From github issues, most of installation errors come from machines w/o compilers
if len(lib_path) == 0 and not os.environ.get('XGBOOST_BUILD_DOC', False):
raise XGBoostLibraryNotFound(
'Cannot find XGBoost Libarary in the candicate path, ' +
'did you install compilers and run build.sh in root path?\n'
'List of candidates:\n' + ('\n'.join(dll_path)))
return lib_path | python | def find_lib_path():
"""Load find the path to xgboost dynamic library files.
Returns
-------
lib_path: list(string)
List of all found library path to xgboost
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# make pythonpack hack: copy this directory one level upper for setup.py
dll_path = [curr_path, os.path.join(curr_path, '../../wrapper/'),
os.path.join(curr_path, './wrapper/')]
if os.name == 'nt':
if platform.architecture()[0] == '64bit':
dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))
# hack for pip installation when copy all parent source directory here
dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))
else:
dll_path.append(os.path.join(curr_path, '../../windows/Release/'))
# hack for pip installation when copy all parent source directory here
dll_path.append(os.path.join(curr_path, './windows/Release/'))
if os.name == 'nt':
dll_path = [os.path.join(p, 'xgboost_wrapper.dll') for p in dll_path]
else:
dll_path = [os.path.join(p, 'libxgboostwrapper.so') for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
#From github issues, most of installation errors come from machines w/o compilers
if len(lib_path) == 0 and not os.environ.get('XGBOOST_BUILD_DOC', False):
raise XGBoostLibraryNotFound(
'Cannot find XGBoost Libarary in the candicate path, ' +
'did you install compilers and run build.sh in root path?\n'
'List of candidates:\n' + ('\n'.join(dll_path)))
return lib_path | [
"def",
"find_lib_path",
"(",
")",
":",
"curr_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"__file__",
")",
")",
")",
"# make pythonpack hack: copy this directory one level upper for setup.py",
"dll_path",
"=",
"[",
"curr_path",
",",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'../../wrapper/'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'./wrapper/'",
")",
"]",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"if",
"platform",
".",
"architecture",
"(",
")",
"[",
"0",
"]",
"==",
"'64bit'",
":",
"dll_path",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'../../windows/x64/Release/'",
")",
")",
"# hack for pip installation when copy all parent source directory here",
"dll_path",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'./windows/x64/Release/'",
")",
")",
"else",
":",
"dll_path",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'../../windows/Release/'",
")",
")",
"# hack for pip installation when copy all parent source directory here",
"dll_path",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'./windows/Release/'",
")",
")",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"dll_path",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"'xgboost_wrapper.dll'",
")",
"for",
"p",
"in",
"dll_path",
"]",
"else",
":",
"dll_path",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"'libxgboostwrapper.so'",
")",
"for",
"p",
"in",
"dll_path",
"]",
"lib_path",
"=",
"[",
"p",
"for",
"p",
"in",
"dll_path",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"p",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"p",
")",
"]",
"#From github issues, most of installation errors come from machines w/o compilers",
"if",
"len",
"(",
"lib_path",
")",
"==",
"0",
"and",
"not",
"os",
".",
"environ",
".",
"get",
"(",
"'XGBOOST_BUILD_DOC'",
",",
"False",
")",
":",
"raise",
"XGBoostLibraryNotFound",
"(",
"'Cannot find XGBoost Libarary in the candicate path, '",
"+",
"'did you install compilers and run build.sh in root path?\\n'",
"'List of candidates:\\n'",
"+",
"(",
"'\\n'",
".",
"join",
"(",
"dll_path",
")",
")",
")",
"return",
"lib_path"
] | Load find the path to xgboost dynamic library files.
Returns
-------
lib_path: list(string)
List of all found library path to xgboost | [
"Load",
"find",
"the",
"path",
"to",
"xgboost",
"dynamic",
"library",
"files",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/libpath.py#L13-L45 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_sklearn_util.py | check_expected_type | def check_expected_type(model, expected_type):
"""Check if a model is of the right type. Raise error if not.
Parameters
----------
model: model
Any scikit-learn model
expected_type: Type
Expected type of the scikit-learn.
"""
if (model.__class__.__name__ != expected_type.__name__):
raise TypeError("Expected model of type '%s' (got %s)" % \
(expected_type.__name__, model.__class__.__name__)) | python | def check_expected_type(model, expected_type):
"""Check if a model is of the right type. Raise error if not.
Parameters
----------
model: model
Any scikit-learn model
expected_type: Type
Expected type of the scikit-learn.
"""
if (model.__class__.__name__ != expected_type.__name__):
raise TypeError("Expected model of type '%s' (got %s)" % \
(expected_type.__name__, model.__class__.__name__)) | [
"def",
"check_expected_type",
"(",
"model",
",",
"expected_type",
")",
":",
"if",
"(",
"model",
".",
"__class__",
".",
"__name__",
"!=",
"expected_type",
".",
"__name__",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected model of type '%s' (got %s)\"",
"%",
"(",
"expected_type",
".",
"__name__",
",",
"model",
".",
"__class__",
".",
"__name__",
")",
")"
] | Check if a model is of the right type. Raise error if not.
Parameters
----------
model: model
Any scikit-learn model
expected_type: Type
Expected type of the scikit-learn. | [
"Check",
"if",
"a",
"model",
"is",
"of",
"the",
"right",
"type",
".",
"Raise",
"error",
"if",
"not",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_sklearn_util.py#L20-L33 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/libsvm/__init__.py | convert | def convert(model, input_names='input', target_name='target',
probability='classProbability', input_length='auto'):
"""
Convert a LIBSVM model to Core ML format.
Parameters
----------
model: a libsvm model (C-SVC, nu-SVC, epsilon-SVR, or nu-SVR)
or string path to a saved model.
input_names: str | [str]
Name of the input column(s).
If a single string is used (the default) the input will be an array. The
length of the array will be inferred from the model, this can be overridden
using the 'input_length' parameter.
target: str
Name of the output column.
probability: str
Name of the output class probability column.
Only used for C-SVC and nu-SVC that have been trained with probability
estimates enabled.
input_length: int
Set the length of the input array.
This parameter should only be used when the input is an array (i.e. when
'input_name' is a string).
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a LIBSVM model
>>> import svmutil
>>> problem = svmutil.svm_problem([0,0,1,1], [[0,1], [1,1], [8,9], [7,7]])
>>> libsvm_model = svmutil.svm_train(problem, svmutil.svm_parameter())
# Convert using default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model)
# Save the CoreML model to a file.
>>> coreml_model.save('./my_model.mlmodel')
# Convert using user specified input names
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model, input_names=['x', 'y'])
"""
if not(_HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
if isinstance(model, _string_types):
libsvm_model = _libsvm_util.load_model(model)
else:
libsvm_model = model
if not isinstance(libsvm_model, _libsvm.svm_model):
raise TypeError("Expected 'model' of type '%s' (got %s)" % (_libsvm.svm_model, type(libsvm_model)))
if not isinstance(target_name, _string_types):
raise TypeError("Expected 'target_name' of type str (got %s)" % type(libsvm_model))
if input_length != 'auto' and not isinstance(input_length, int):
raise TypeError("Expected 'input_length' of type int, got %s" % type(input_length))
if input_length != 'auto' and not isinstance(input_names, _string_types):
raise ValueError("'input_length' should not be used unless the input will be only one array.")
if not isinstance(probability, _string_types):
raise TypeError("Expected 'probability' of type str (got %s)" % type(probability))
return _libsvm_converter.convert(libsvm_model, input_names, target_name, input_length, probability) | python | def convert(model, input_names='input', target_name='target',
probability='classProbability', input_length='auto'):
"""
Convert a LIBSVM model to Core ML format.
Parameters
----------
model: a libsvm model (C-SVC, nu-SVC, epsilon-SVR, or nu-SVR)
or string path to a saved model.
input_names: str | [str]
Name of the input column(s).
If a single string is used (the default) the input will be an array. The
length of the array will be inferred from the model, this can be overridden
using the 'input_length' parameter.
target: str
Name of the output column.
probability: str
Name of the output class probability column.
Only used for C-SVC and nu-SVC that have been trained with probability
estimates enabled.
input_length: int
Set the length of the input array.
This parameter should only be used when the input is an array (i.e. when
'input_name' is a string).
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a LIBSVM model
>>> import svmutil
>>> problem = svmutil.svm_problem([0,0,1,1], [[0,1], [1,1], [8,9], [7,7]])
>>> libsvm_model = svmutil.svm_train(problem, svmutil.svm_parameter())
# Convert using default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model)
# Save the CoreML model to a file.
>>> coreml_model.save('./my_model.mlmodel')
# Convert using user specified input names
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model, input_names=['x', 'y'])
"""
if not(_HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
if isinstance(model, _string_types):
libsvm_model = _libsvm_util.load_model(model)
else:
libsvm_model = model
if not isinstance(libsvm_model, _libsvm.svm_model):
raise TypeError("Expected 'model' of type '%s' (got %s)" % (_libsvm.svm_model, type(libsvm_model)))
if not isinstance(target_name, _string_types):
raise TypeError("Expected 'target_name' of type str (got %s)" % type(libsvm_model))
if input_length != 'auto' and not isinstance(input_length, int):
raise TypeError("Expected 'input_length' of type int, got %s" % type(input_length))
if input_length != 'auto' and not isinstance(input_names, _string_types):
raise ValueError("'input_length' should not be used unless the input will be only one array.")
if not isinstance(probability, _string_types):
raise TypeError("Expected 'probability' of type str (got %s)" % type(probability))
return _libsvm_converter.convert(libsvm_model, input_names, target_name, input_length, probability) | [
"def",
"convert",
"(",
"model",
",",
"input_names",
"=",
"'input'",
",",
"target_name",
"=",
"'target'",
",",
"probability",
"=",
"'classProbability'",
",",
"input_length",
"=",
"'auto'",
")",
":",
"if",
"not",
"(",
"_HAS_LIBSVM",
")",
":",
"raise",
"RuntimeError",
"(",
"'libsvm not found. libsvm conversion API is disabled.'",
")",
"if",
"isinstance",
"(",
"model",
",",
"_string_types",
")",
":",
"libsvm_model",
"=",
"_libsvm_util",
".",
"load_model",
"(",
"model",
")",
"else",
":",
"libsvm_model",
"=",
"model",
"if",
"not",
"isinstance",
"(",
"libsvm_model",
",",
"_libsvm",
".",
"svm_model",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected 'model' of type '%s' (got %s)\"",
"%",
"(",
"_libsvm",
".",
"svm_model",
",",
"type",
"(",
"libsvm_model",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"target_name",
",",
"_string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected 'target_name' of type str (got %s)\"",
"%",
"type",
"(",
"libsvm_model",
")",
")",
"if",
"input_length",
"!=",
"'auto'",
"and",
"not",
"isinstance",
"(",
"input_length",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected 'input_length' of type int, got %s\"",
"%",
"type",
"(",
"input_length",
")",
")",
"if",
"input_length",
"!=",
"'auto'",
"and",
"not",
"isinstance",
"(",
"input_names",
",",
"_string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"'input_length' should not be used unless the input will be only one array.\"",
")",
"if",
"not",
"isinstance",
"(",
"probability",
",",
"_string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected 'probability' of type str (got %s)\"",
"%",
"type",
"(",
"probability",
")",
")",
"return",
"_libsvm_converter",
".",
"convert",
"(",
"libsvm_model",
",",
"input_names",
",",
"target_name",
",",
"input_length",
",",
"probability",
")"
] | Convert a LIBSVM model to Core ML format.
Parameters
----------
model: a libsvm model (C-SVC, nu-SVC, epsilon-SVR, or nu-SVR)
or string path to a saved model.
input_names: str | [str]
Name of the input column(s).
If a single string is used (the default) the input will be an array. The
length of the array will be inferred from the model, this can be overridden
using the 'input_length' parameter.
target: str
Name of the output column.
probability: str
Name of the output class probability column.
Only used for C-SVC and nu-SVC that have been trained with probability
estimates enabled.
input_length: int
Set the length of the input array.
This parameter should only be used when the input is an array (i.e. when
'input_name' is a string).
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a LIBSVM model
>>> import svmutil
>>> problem = svmutil.svm_problem([0,0,1,1], [[0,1], [1,1], [8,9], [7,7]])
>>> libsvm_model = svmutil.svm_train(problem, svmutil.svm_parameter())
# Convert using default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model)
# Save the CoreML model to a file.
>>> coreml_model.save('./my_model.mlmodel')
# Convert using user specified input names
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model, input_names=['x', 'y']) | [
"Convert",
"a",
"LIBSVM",
"model",
"to",
"Core",
"ML",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/libsvm/__init__.py#L17-L93 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py | RepeatedScalarFieldContainer.append | def append(self, value):
"""Appends an item to the list. Similar to list.append()."""
self._values.append(self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified() | python | def append(self, value):
"""Appends an item to the list. Similar to list.append()."""
self._values.append(self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified() | [
"def",
"append",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_values",
".",
"append",
"(",
"self",
".",
"_type_checker",
".",
"CheckValue",
"(",
"value",
")",
")",
"if",
"not",
"self",
".",
"_message_listener",
".",
"dirty",
":",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")"
] | Appends an item to the list. Similar to list.append(). | [
"Appends",
"an",
"item",
"to",
"the",
"list",
".",
"Similar",
"to",
"list",
".",
"append",
"()",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py#L249-L253 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py | RepeatedScalarFieldContainer.insert | def insert(self, key, value):
"""Inserts the item at the specified position. Similar to list.insert()."""
self._values.insert(key, self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified() | python | def insert(self, key, value):
"""Inserts the item at the specified position. Similar to list.insert()."""
self._values.insert(key, self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified() | [
"def",
"insert",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"_values",
".",
"insert",
"(",
"key",
",",
"self",
".",
"_type_checker",
".",
"CheckValue",
"(",
"value",
")",
")",
"if",
"not",
"self",
".",
"_message_listener",
".",
"dirty",
":",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")"
] | Inserts the item at the specified position. Similar to list.insert(). | [
"Inserts",
"the",
"item",
"at",
"the",
"specified",
"position",
".",
"Similar",
"to",
"list",
".",
"insert",
"()",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py#L255-L259 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py | RepeatedScalarFieldContainer.extend | def extend(self, elem_seq):
"""Extends by appending the given iterable. Similar to list.extend()."""
if elem_seq is None:
return
try:
elem_seq_iter = iter(elem_seq)
except TypeError:
if not elem_seq:
# silently ignore falsy inputs :-/.
# TODO(ptucker): Deprecate this behavior. b/18413862
return
raise
new_values = [self._type_checker.CheckValue(elem) for elem in elem_seq_iter]
if new_values:
self._values.extend(new_values)
self._message_listener.Modified() | python | def extend(self, elem_seq):
"""Extends by appending the given iterable. Similar to list.extend()."""
if elem_seq is None:
return
try:
elem_seq_iter = iter(elem_seq)
except TypeError:
if not elem_seq:
# silently ignore falsy inputs :-/.
# TODO(ptucker): Deprecate this behavior. b/18413862
return
raise
new_values = [self._type_checker.CheckValue(elem) for elem in elem_seq_iter]
if new_values:
self._values.extend(new_values)
self._message_listener.Modified() | [
"def",
"extend",
"(",
"self",
",",
"elem_seq",
")",
":",
"if",
"elem_seq",
"is",
"None",
":",
"return",
"try",
":",
"elem_seq_iter",
"=",
"iter",
"(",
"elem_seq",
")",
"except",
"TypeError",
":",
"if",
"not",
"elem_seq",
":",
"# silently ignore falsy inputs :-/.",
"# TODO(ptucker): Deprecate this behavior. b/18413862",
"return",
"raise",
"new_values",
"=",
"[",
"self",
".",
"_type_checker",
".",
"CheckValue",
"(",
"elem",
")",
"for",
"elem",
"in",
"elem_seq_iter",
"]",
"if",
"new_values",
":",
"self",
".",
"_values",
".",
"extend",
"(",
"new_values",
")",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")"
] | Extends by appending the given iterable. Similar to list.extend(). | [
"Extends",
"by",
"appending",
"the",
"given",
"iterable",
".",
"Similar",
"to",
"list",
".",
"extend",
"()",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py#L261-L278 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py | RepeatedScalarFieldContainer.MergeFrom | def MergeFrom(self, other):
"""Appends the contents of another repeated field of the same type to this
one. We do not check the types of the individual fields.
"""
self._values.extend(other._values)
self._message_listener.Modified() | python | def MergeFrom(self, other):
"""Appends the contents of another repeated field of the same type to this
one. We do not check the types of the individual fields.
"""
self._values.extend(other._values)
self._message_listener.Modified() | [
"def",
"MergeFrom",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"_values",
".",
"extend",
"(",
"other",
".",
"_values",
")",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")"
] | Appends the contents of another repeated field of the same type to this
one. We do not check the types of the individual fields. | [
"Appends",
"the",
"contents",
"of",
"another",
"repeated",
"field",
"of",
"the",
"same",
"type",
"to",
"this",
"one",
".",
"We",
"do",
"not",
"check",
"the",
"types",
"of",
"the",
"individual",
"fields",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py#L280-L285 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py | RepeatedScalarFieldContainer.remove | def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified() | python | def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified() | [
"def",
"remove",
"(",
"self",
",",
"elem",
")",
":",
"self",
".",
"_values",
".",
"remove",
"(",
"elem",
")",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")"
] | Removes an item from the list. Similar to list.remove(). | [
"Removes",
"an",
"item",
"from",
"the",
"list",
".",
"Similar",
"to",
"list",
".",
"remove",
"()",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py#L287-L290 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py | RepeatedScalarFieldContainer.pop | def pop(self, key=-1):
"""Removes and returns an item at a given index. Similar to list.pop()."""
value = self._values[key]
self.__delitem__(key)
return value | python | def pop(self, key=-1):
"""Removes and returns an item at a given index. Similar to list.pop()."""
value = self._values[key]
self.__delitem__(key)
return value | [
"def",
"pop",
"(",
"self",
",",
"key",
"=",
"-",
"1",
")",
":",
"value",
"=",
"self",
".",
"_values",
"[",
"key",
"]",
"self",
".",
"__delitem__",
"(",
"key",
")",
"return",
"value"
] | Removes and returns an item at a given index. Similar to list.pop(). | [
"Removes",
"and",
"returns",
"an",
"item",
"at",
"a",
"given",
"index",
".",
"Similar",
"to",
"list",
".",
"pop",
"()",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py#L292-L296 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py | RepeatedCompositeFieldContainer.add | def add(self, **kwargs):
"""Adds a new element at the end of the list and returns it. Keyword
arguments may be used to initialize the element.
"""
new_element = self._message_descriptor._concrete_class(**kwargs)
new_element._SetListener(self._message_listener)
self._values.append(new_element)
if not self._message_listener.dirty:
self._message_listener.Modified()
return new_element | python | def add(self, **kwargs):
"""Adds a new element at the end of the list and returns it. Keyword
arguments may be used to initialize the element.
"""
new_element = self._message_descriptor._concrete_class(**kwargs)
new_element._SetListener(self._message_listener)
self._values.append(new_element)
if not self._message_listener.dirty:
self._message_listener.Modified()
return new_element | [
"def",
"add",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"new_element",
"=",
"self",
".",
"_message_descriptor",
".",
"_concrete_class",
"(",
"*",
"*",
"kwargs",
")",
"new_element",
".",
"_SetListener",
"(",
"self",
".",
"_message_listener",
")",
"self",
".",
"_values",
".",
"append",
"(",
"new_element",
")",
"if",
"not",
"self",
".",
"_message_listener",
".",
"dirty",
":",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")",
"return",
"new_element"
] | Adds a new element at the end of the list and returns it. Keyword
arguments may be used to initialize the element. | [
"Adds",
"a",
"new",
"element",
"at",
"the",
"end",
"of",
"the",
"list",
"and",
"returns",
"it",
".",
"Keyword",
"arguments",
"may",
"be",
"used",
"to",
"initialize",
"the",
"element",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py#L368-L377 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py | RepeatedCompositeFieldContainer.extend | def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
message_class = self._message_descriptor._concrete_class
listener = self._message_listener
values = self._values
for message in elem_seq:
new_element = message_class()
new_element._SetListener(listener)
new_element.MergeFrom(message)
values.append(new_element)
listener.Modified() | python | def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
message_class = self._message_descriptor._concrete_class
listener = self._message_listener
values = self._values
for message in elem_seq:
new_element = message_class()
new_element._SetListener(listener)
new_element.MergeFrom(message)
values.append(new_element)
listener.Modified() | [
"def",
"extend",
"(",
"self",
",",
"elem_seq",
")",
":",
"message_class",
"=",
"self",
".",
"_message_descriptor",
".",
"_concrete_class",
"listener",
"=",
"self",
".",
"_message_listener",
"values",
"=",
"self",
".",
"_values",
"for",
"message",
"in",
"elem_seq",
":",
"new_element",
"=",
"message_class",
"(",
")",
"new_element",
".",
"_SetListener",
"(",
"listener",
")",
"new_element",
".",
"MergeFrom",
"(",
"message",
")",
"values",
".",
"append",
"(",
"new_element",
")",
"listener",
".",
"Modified",
"(",
")"
] | Extends by appending the given sequence of elements of the same type
as this one, copying each individual message. | [
"Extends",
"by",
"appending",
"the",
"given",
"sequence",
"of",
"elements",
"of",
"the",
"same",
"type",
"as",
"this",
"one",
"copying",
"each",
"individual",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/containers.py#L379-L391 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/set.py | difference | def difference (b, a):
""" Returns the elements of B that are not in A.
"""
a = set(a)
result = []
for item in b:
if item not in a:
result.append(item)
return result | python | def difference (b, a):
""" Returns the elements of B that are not in A.
"""
a = set(a)
result = []
for item in b:
if item not in a:
result.append(item)
return result | [
"def",
"difference",
"(",
"b",
",",
"a",
")",
":",
"a",
"=",
"set",
"(",
"a",
")",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"b",
":",
"if",
"item",
"not",
"in",
"a",
":",
"result",
".",
"append",
"(",
"item",
")",
"return",
"result"
] | Returns the elements of B that are not in A. | [
"Returns",
"the",
"elements",
"of",
"B",
"that",
"are",
"not",
"in",
"A",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/set.py#L10-L18 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/set.py | intersection | def intersection (set1, set2):
""" Removes from set1 any items which don't appear in set2 and returns the result.
"""
assert is_iterable(set1)
assert is_iterable(set2)
result = []
for v in set1:
if v in set2:
result.append (v)
return result | python | def intersection (set1, set2):
""" Removes from set1 any items which don't appear in set2 and returns the result.
"""
assert is_iterable(set1)
assert is_iterable(set2)
result = []
for v in set1:
if v in set2:
result.append (v)
return result | [
"def",
"intersection",
"(",
"set1",
",",
"set2",
")",
":",
"assert",
"is_iterable",
"(",
"set1",
")",
"assert",
"is_iterable",
"(",
"set2",
")",
"result",
"=",
"[",
"]",
"for",
"v",
"in",
"set1",
":",
"if",
"v",
"in",
"set2",
":",
"result",
".",
"append",
"(",
"v",
")",
"return",
"result"
] | Removes from set1 any items which don't appear in set2 and returns the result. | [
"Removes",
"from",
"set1",
"any",
"items",
"which",
"don",
"t",
"appear",
"in",
"set2",
"and",
"returns",
"the",
"result",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/set.py#L20-L29 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/set.py | contains | def contains (small, large):
""" Returns true iff all elements of 'small' exist in 'large'.
"""
small = to_seq (small)
large = to_seq (large)
for s in small:
if not s in large:
return False
return True | python | def contains (small, large):
""" Returns true iff all elements of 'small' exist in 'large'.
"""
small = to_seq (small)
large = to_seq (large)
for s in small:
if not s in large:
return False
return True | [
"def",
"contains",
"(",
"small",
",",
"large",
")",
":",
"small",
"=",
"to_seq",
"(",
"small",
")",
"large",
"=",
"to_seq",
"(",
"large",
")",
"for",
"s",
"in",
"small",
":",
"if",
"not",
"s",
"in",
"large",
":",
"return",
"False",
"return",
"True"
] | Returns true iff all elements of 'small' exist in 'large'. | [
"Returns",
"true",
"iff",
"all",
"elements",
"of",
"small",
"exist",
"in",
"large",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/set.py#L31-L40 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/set.py | equal | def equal (a, b):
""" Returns True iff 'a' contains the same elements as 'b', irrespective of their order.
# TODO: Python 2.4 has a proper set class.
"""
assert is_iterable(a)
assert is_iterable(b)
return contains (a, b) and contains (b, a) | python | def equal (a, b):
""" Returns True iff 'a' contains the same elements as 'b', irrespective of their order.
# TODO: Python 2.4 has a proper set class.
"""
assert is_iterable(a)
assert is_iterable(b)
return contains (a, b) and contains (b, a) | [
"def",
"equal",
"(",
"a",
",",
"b",
")",
":",
"assert",
"is_iterable",
"(",
"a",
")",
"assert",
"is_iterable",
"(",
"b",
")",
"return",
"contains",
"(",
"a",
",",
"b",
")",
"and",
"contains",
"(",
"b",
",",
"a",
")"
] | Returns True iff 'a' contains the same elements as 'b', irrespective of their order.
# TODO: Python 2.4 has a proper set class. | [
"Returns",
"True",
"iff",
"a",
"contains",
"the",
"same",
"elements",
"as",
"b",
"irrespective",
"of",
"their",
"order",
".",
"#",
"TODO",
":",
"Python",
"2",
".",
"4",
"has",
"a",
"proper",
"set",
"class",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/set.py#L42-L48 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/image_classifier/_annotate.py | annotate | def annotate(data, image_column=None, annotation_column='annotations'):
"""
Annotate your images loaded in either an SFrame or SArray Format
The annotate util is a GUI assisted application used to create labels in
SArray Image data. Specifying a column, with dtype Image, in an SFrame
works as well since SFrames are composed of multiple SArrays.
When the GUI is terminated an SFrame is returned with the representative,
images and annotations.
The returned SFrame includes the newly created annotations.
Parameters
--------------
data : SArray | SFrame
The data containing the images. If the data type is 'SArray'
the 'image_column', and 'annotation_column' variables are used to construct
a new 'SFrame' containing the 'SArray' data for annotation.
If the data type is 'SFrame' the 'image_column', and 'annotation_column'
variables are used to annotate the images.
image_column: string, optional
If the data type is SFrame and the 'image_column' parameter is specified
then the column name is used as the image column used in the annotation. If
the data type is 'SFrame' and the 'image_column' variable is left empty. A
default column value of 'image' is used in the annotation. If the data type is
'SArray', the 'image_column' is used to construct the 'SFrame' data for
the annotation
annotation_column : string, optional
If the data type is SFrame and the 'annotation_column' parameter is specified
then the column name is used as the annotation column used in the annotation. If
the data type is 'SFrame' and the 'annotation_column' variable is left empty. A
default column value of 'annotation' is used in the annotation. If the data type is
'SArray', the 'annotation_column' is used to construct the 'SFrame' data for
the annotation
Returns
-------
out : SFrame
A new SFrame that contains the newly annotated data.
Examples
--------
>> import turicreate as tc
>> images = tc.image_analysis.load_images("path/to/images")
>> print(images)
Columns:
path str
image Image
Rows: 4
Data:
+------------------------+--------------------------+
| path | image |
+------------------------+--------------------------+
| /Users/username/Doc... | Height: 1712 Width: 1952 |
| /Users/username/Doc... | Height: 1386 Width: 1000 |
| /Users/username/Doc... | Height: 536 Width: 858 |
| /Users/username/Doc... | Height: 1512 Width: 2680 |
+------------------------+--------------------------+
[4 rows x 2 columns]
>> images = tc.image_classifier.annotate(images)
>> print(images)
Columns:
path str
image Image
annotation str
Rows: 4
Data:
+------------------------+--------------------------+-------------------+
| path | image | annotation |
+------------------------+--------------------------+-------------------+
| /Users/username/Doc... | Height: 1712 Width: 1952 | dog |
| /Users/username/Doc... | Height: 1386 Width: 1000 | dog |
| /Users/username/Doc... | Height: 536 Width: 858 | cat |
| /Users/username/Doc... | Height: 1512 Width: 2680 | mouse |
+------------------------+--------------------------+-------------------+
[4 rows x 3 columns]
"""
# Check Value of Column Variables
if image_column == None:
image_column = _tkutl._find_only_image_column(data)
if image_column == None:
raise ValueError("'image_column' cannot be 'None'")
if type(image_column) != str:
raise TypeError("'image_column' has to be of type 'str'")
if annotation_column == None:
annotation_column = ""
if type(annotation_column) != str:
raise TypeError("'annotation_column' has to be of type 'str'")
# Check Data Structure
if type(data) == __tc.data_structures.image.Image:
data = __tc.SFrame({image_column:__tc.SArray([data])})
elif type(data) == __tc.data_structures.sframe.SFrame:
if(data.shape[0] == 0):
return data
if not (data[image_column].dtype == __tc.data_structures.image.Image):
raise TypeError("'data[image_column]' must be an SFrame or SArray")
elif type(data) == __tc.data_structures.sarray.SArray:
if(data.shape[0] == 0):
return data
data = __tc.SFrame({image_column:data})
else:
raise TypeError("'data' must be an SFrame or SArray")
_warning_annotations()
annotation_window = __tc.extensions.create_image_classification_annotation(
data,
[image_column],
annotation_column
)
annotation_window.annotate(_get_client_app_path())
return annotation_window.returnAnnotations() | python | def annotate(data, image_column=None, annotation_column='annotations'):
"""
Annotate your images loaded in either an SFrame or SArray Format
The annotate util is a GUI assisted application used to create labels in
SArray Image data. Specifying a column, with dtype Image, in an SFrame
works as well since SFrames are composed of multiple SArrays.
When the GUI is terminated an SFrame is returned with the representative,
images and annotations.
The returned SFrame includes the newly created annotations.
Parameters
--------------
data : SArray | SFrame
The data containing the images. If the data type is 'SArray'
the 'image_column', and 'annotation_column' variables are used to construct
a new 'SFrame' containing the 'SArray' data for annotation.
If the data type is 'SFrame' the 'image_column', and 'annotation_column'
variables are used to annotate the images.
image_column: string, optional
If the data type is SFrame and the 'image_column' parameter is specified
then the column name is used as the image column used in the annotation. If
the data type is 'SFrame' and the 'image_column' variable is left empty. A
default column value of 'image' is used in the annotation. If the data type is
'SArray', the 'image_column' is used to construct the 'SFrame' data for
the annotation
annotation_column : string, optional
If the data type is SFrame and the 'annotation_column' parameter is specified
then the column name is used as the annotation column used in the annotation. If
the data type is 'SFrame' and the 'annotation_column' variable is left empty. A
default column value of 'annotation' is used in the annotation. If the data type is
'SArray', the 'annotation_column' is used to construct the 'SFrame' data for
the annotation
Returns
-------
out : SFrame
A new SFrame that contains the newly annotated data.
Examples
--------
>> import turicreate as tc
>> images = tc.image_analysis.load_images("path/to/images")
>> print(images)
Columns:
path str
image Image
Rows: 4
Data:
+------------------------+--------------------------+
| path | image |
+------------------------+--------------------------+
| /Users/username/Doc... | Height: 1712 Width: 1952 |
| /Users/username/Doc... | Height: 1386 Width: 1000 |
| /Users/username/Doc... | Height: 536 Width: 858 |
| /Users/username/Doc... | Height: 1512 Width: 2680 |
+------------------------+--------------------------+
[4 rows x 2 columns]
>> images = tc.image_classifier.annotate(images)
>> print(images)
Columns:
path str
image Image
annotation str
Rows: 4
Data:
+------------------------+--------------------------+-------------------+
| path | image | annotation |
+------------------------+--------------------------+-------------------+
| /Users/username/Doc... | Height: 1712 Width: 1952 | dog |
| /Users/username/Doc... | Height: 1386 Width: 1000 | dog |
| /Users/username/Doc... | Height: 536 Width: 858 | cat |
| /Users/username/Doc... | Height: 1512 Width: 2680 | mouse |
+------------------------+--------------------------+-------------------+
[4 rows x 3 columns]
"""
# Check Value of Column Variables
if image_column == None:
image_column = _tkutl._find_only_image_column(data)
if image_column == None:
raise ValueError("'image_column' cannot be 'None'")
if type(image_column) != str:
raise TypeError("'image_column' has to be of type 'str'")
if annotation_column == None:
annotation_column = ""
if type(annotation_column) != str:
raise TypeError("'annotation_column' has to be of type 'str'")
# Check Data Structure
if type(data) == __tc.data_structures.image.Image:
data = __tc.SFrame({image_column:__tc.SArray([data])})
elif type(data) == __tc.data_structures.sframe.SFrame:
if(data.shape[0] == 0):
return data
if not (data[image_column].dtype == __tc.data_structures.image.Image):
raise TypeError("'data[image_column]' must be an SFrame or SArray")
elif type(data) == __tc.data_structures.sarray.SArray:
if(data.shape[0] == 0):
return data
data = __tc.SFrame({image_column:data})
else:
raise TypeError("'data' must be an SFrame or SArray")
_warning_annotations()
annotation_window = __tc.extensions.create_image_classification_annotation(
data,
[image_column],
annotation_column
)
annotation_window.annotate(_get_client_app_path())
return annotation_window.returnAnnotations() | [
"def",
"annotate",
"(",
"data",
",",
"image_column",
"=",
"None",
",",
"annotation_column",
"=",
"'annotations'",
")",
":",
"# Check Value of Column Variables",
"if",
"image_column",
"==",
"None",
":",
"image_column",
"=",
"_tkutl",
".",
"_find_only_image_column",
"(",
"data",
")",
"if",
"image_column",
"==",
"None",
":",
"raise",
"ValueError",
"(",
"\"'image_column' cannot be 'None'\"",
")",
"if",
"type",
"(",
"image_column",
")",
"!=",
"str",
":",
"raise",
"TypeError",
"(",
"\"'image_column' has to be of type 'str'\"",
")",
"if",
"annotation_column",
"==",
"None",
":",
"annotation_column",
"=",
"\"\"",
"if",
"type",
"(",
"annotation_column",
")",
"!=",
"str",
":",
"raise",
"TypeError",
"(",
"\"'annotation_column' has to be of type 'str'\"",
")",
"# Check Data Structure",
"if",
"type",
"(",
"data",
")",
"==",
"__tc",
".",
"data_structures",
".",
"image",
".",
"Image",
":",
"data",
"=",
"__tc",
".",
"SFrame",
"(",
"{",
"image_column",
":",
"__tc",
".",
"SArray",
"(",
"[",
"data",
"]",
")",
"}",
")",
"elif",
"type",
"(",
"data",
")",
"==",
"__tc",
".",
"data_structures",
".",
"sframe",
".",
"SFrame",
":",
"if",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
")",
":",
"return",
"data",
"if",
"not",
"(",
"data",
"[",
"image_column",
"]",
".",
"dtype",
"==",
"__tc",
".",
"data_structures",
".",
"image",
".",
"Image",
")",
":",
"raise",
"TypeError",
"(",
"\"'data[image_column]' must be an SFrame or SArray\"",
")",
"elif",
"type",
"(",
"data",
")",
"==",
"__tc",
".",
"data_structures",
".",
"sarray",
".",
"SArray",
":",
"if",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
")",
":",
"return",
"data",
"data",
"=",
"__tc",
".",
"SFrame",
"(",
"{",
"image_column",
":",
"data",
"}",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"'data' must be an SFrame or SArray\"",
")",
"_warning_annotations",
"(",
")",
"annotation_window",
"=",
"__tc",
".",
"extensions",
".",
"create_image_classification_annotation",
"(",
"data",
",",
"[",
"image_column",
"]",
",",
"annotation_column",
")",
"annotation_window",
".",
"annotate",
"(",
"_get_client_app_path",
"(",
")",
")",
"return",
"annotation_window",
".",
"returnAnnotations",
"(",
")"
] | Annotate your images loaded in either an SFrame or SArray Format
The annotate util is a GUI assisted application used to create labels in
SArray Image data. Specifying a column, with dtype Image, in an SFrame
works as well since SFrames are composed of multiple SArrays.
When the GUI is terminated an SFrame is returned with the representative,
images and annotations.
The returned SFrame includes the newly created annotations.
Parameters
--------------
data : SArray | SFrame
The data containing the images. If the data type is 'SArray'
the 'image_column', and 'annotation_column' variables are used to construct
a new 'SFrame' containing the 'SArray' data for annotation.
If the data type is 'SFrame' the 'image_column', and 'annotation_column'
variables are used to annotate the images.
image_column: string, optional
If the data type is SFrame and the 'image_column' parameter is specified
then the column name is used as the image column used in the annotation. If
the data type is 'SFrame' and the 'image_column' variable is left empty. A
default column value of 'image' is used in the annotation. If the data type is
'SArray', the 'image_column' is used to construct the 'SFrame' data for
the annotation
annotation_column : string, optional
If the data type is SFrame and the 'annotation_column' parameter is specified
then the column name is used as the annotation column used in the annotation. If
the data type is 'SFrame' and the 'annotation_column' variable is left empty. A
default column value of 'annotation' is used in the annotation. If the data type is
'SArray', the 'annotation_column' is used to construct the 'SFrame' data for
the annotation
Returns
-------
out : SFrame
A new SFrame that contains the newly annotated data.
Examples
--------
>> import turicreate as tc
>> images = tc.image_analysis.load_images("path/to/images")
>> print(images)
Columns:
path str
image Image
Rows: 4
Data:
+------------------------+--------------------------+
| path | image |
+------------------------+--------------------------+
| /Users/username/Doc... | Height: 1712 Width: 1952 |
| /Users/username/Doc... | Height: 1386 Width: 1000 |
| /Users/username/Doc... | Height: 536 Width: 858 |
| /Users/username/Doc... | Height: 1512 Width: 2680 |
+------------------------+--------------------------+
[4 rows x 2 columns]
>> images = tc.image_classifier.annotate(images)
>> print(images)
Columns:
path str
image Image
annotation str
Rows: 4
Data:
+------------------------+--------------------------+-------------------+
| path | image | annotation |
+------------------------+--------------------------+-------------------+
| /Users/username/Doc... | Height: 1712 Width: 1952 | dog |
| /Users/username/Doc... | Height: 1386 Width: 1000 | dog |
| /Users/username/Doc... | Height: 536 Width: 858 | cat |
| /Users/username/Doc... | Height: 1512 Width: 2680 | mouse |
+------------------------+--------------------------+-------------------+
[4 rows x 3 columns] | [
"Annotate",
"your",
"images",
"loaded",
"in",
"either",
"an",
"SFrame",
"or",
"SArray",
"Format"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/image_classifier/_annotate.py#L30-L171 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/image_classifier/_annotate.py | recover_annotation | def recover_annotation():
"""
Recover the last annotated SFrame.
If you annotate an SFrame and forget to assign it to a variable, this
function allows you to recover the last annotated SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the recovered annotation data.
Examples
--------
>> annotations = tc.image_classifier.recover_annotation()
>> print(annotations)
Columns:
images Image
labels int
annotations str
Rows: 400
Data:
+----------------------+-------------+
| images | annotations |
+----------------------+-------------+
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Dog |
| Height: 28 Width: 28 | Mouse |
| Height: 28 Width: 28 | Feather |
| Height: 28 Width: 28 | Bird |
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Dog |
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Bird |
+----------------------+-------------+
[400 rows x 3 columns]
"""
empty_instance = __tc.extensions.ImageClassification()
annotation_wrapper = empty_instance.get_annotation_registry()
return annotation_wrapper.annotation_sframe | python | def recover_annotation():
"""
Recover the last annotated SFrame.
If you annotate an SFrame and forget to assign it to a variable, this
function allows you to recover the last annotated SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the recovered annotation data.
Examples
--------
>> annotations = tc.image_classifier.recover_annotation()
>> print(annotations)
Columns:
images Image
labels int
annotations str
Rows: 400
Data:
+----------------------+-------------+
| images | annotations |
+----------------------+-------------+
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Dog |
| Height: 28 Width: 28 | Mouse |
| Height: 28 Width: 28 | Feather |
| Height: 28 Width: 28 | Bird |
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Dog |
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Bird |
+----------------------+-------------+
[400 rows x 3 columns]
"""
empty_instance = __tc.extensions.ImageClassification()
annotation_wrapper = empty_instance.get_annotation_registry()
return annotation_wrapper.annotation_sframe | [
"def",
"recover_annotation",
"(",
")",
":",
"empty_instance",
"=",
"__tc",
".",
"extensions",
".",
"ImageClassification",
"(",
")",
"annotation_wrapper",
"=",
"empty_instance",
".",
"get_annotation_registry",
"(",
")",
"return",
"annotation_wrapper",
".",
"annotation_sframe"
] | Recover the last annotated SFrame.
If you annotate an SFrame and forget to assign it to a variable, this
function allows you to recover the last annotated SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the recovered annotation data.
Examples
--------
>> annotations = tc.image_classifier.recover_annotation()
>> print(annotations)
Columns:
images Image
labels int
annotations str
Rows: 400
Data:
+----------------------+-------------+
| images | annotations |
+----------------------+-------------+
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Dog |
| Height: 28 Width: 28 | Mouse |
| Height: 28 Width: 28 | Feather |
| Height: 28 Width: 28 | Bird |
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Dog |
| Height: 28 Width: 28 | Cat |
| Height: 28 Width: 28 | Bird |
+----------------------+-------------+
[400 rows x 3 columns] | [
"Recover",
"the",
"last",
"annotated",
"SFrame",
".",
"If",
"you",
"annotate",
"an",
"SFrame",
"and",
"forget",
"to",
"assign",
"it",
"to",
"a",
"variable",
"this",
"function",
"allows",
"you",
"to",
"recover",
"the",
"last",
"annotated",
"SFrame",
".",
"Returns",
"-------"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/image_classifier/_annotate.py#L173-L221 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_imputer.py | convert | def convert(model, input_features, output_features):
"""Convert a DictVectorizer model to the protobuf spec.
Parameters
----------
model: DictVectorizer
A fitted DictVectorizer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
# Set the interface params.
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
assert len(input_features) == 1
assert isinstance(input_features[0][1], datatypes.Array)
# feature name in and out are the same here
spec = set_transform_interface_params(spec, input_features, output_features)
# Test the scikit-learn model
_sklearn_util.check_expected_type(model, Imputer)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'statistics_'))
if model.axis != 0:
raise ValueError("Imputation is only supported along axis = 0.")
# The imputer in our framework only works on single columns, so
# we need to translate that over. The easiest way to do that is to
# put it in a nested pipeline with a feature extractor and a
tr_spec = spec.imputer
for v in model.statistics_:
tr_spec.imputedDoubleArray.vector.append(v)
try:
tr_spec.replaceDoubleValue = float(model.missing_values)
except ValueError:
raise ValueError("Only scalar values or NAN as missing_values "
"in _imputer are supported.")
return _MLModel(spec) | python | def convert(model, input_features, output_features):
"""Convert a DictVectorizer model to the protobuf spec.
Parameters
----------
model: DictVectorizer
A fitted DictVectorizer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
# Set the interface params.
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
assert len(input_features) == 1
assert isinstance(input_features[0][1], datatypes.Array)
# feature name in and out are the same here
spec = set_transform_interface_params(spec, input_features, output_features)
# Test the scikit-learn model
_sklearn_util.check_expected_type(model, Imputer)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'statistics_'))
if model.axis != 0:
raise ValueError("Imputation is only supported along axis = 0.")
# The imputer in our framework only works on single columns, so
# we need to translate that over. The easiest way to do that is to
# put it in a nested pipeline with a feature extractor and a
tr_spec = spec.imputer
for v in model.statistics_:
tr_spec.imputedDoubleArray.vector.append(v)
try:
tr_spec.replaceDoubleValue = float(model.missing_values)
except ValueError:
raise ValueError("Only scalar values or NAN as missing_values "
"in _imputer are supported.")
return _MLModel(spec) | [
"def",
"convert",
"(",
"model",
",",
"input_features",
",",
"output_features",
")",
":",
"if",
"not",
"(",
"_HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"# Set the interface params.",
"spec",
"=",
"_Model_pb2",
".",
"Model",
"(",
")",
"spec",
".",
"specificationVersion",
"=",
"SPECIFICATION_VERSION",
"assert",
"len",
"(",
"input_features",
")",
"==",
"1",
"assert",
"isinstance",
"(",
"input_features",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"datatypes",
".",
"Array",
")",
"# feature name in and out are the same here",
"spec",
"=",
"set_transform_interface_params",
"(",
"spec",
",",
"input_features",
",",
"output_features",
")",
"# Test the scikit-learn model",
"_sklearn_util",
".",
"check_expected_type",
"(",
"model",
",",
"Imputer",
")",
"_sklearn_util",
".",
"check_fitted",
"(",
"model",
",",
"lambda",
"m",
":",
"hasattr",
"(",
"m",
",",
"'statistics_'",
")",
")",
"if",
"model",
".",
"axis",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Imputation is only supported along axis = 0.\"",
")",
"# The imputer in our framework only works on single columns, so",
"# we need to translate that over. The easiest way to do that is to",
"# put it in a nested pipeline with a feature extractor and a",
"tr_spec",
"=",
"spec",
".",
"imputer",
"for",
"v",
"in",
"model",
".",
"statistics_",
":",
"tr_spec",
".",
"imputedDoubleArray",
".",
"vector",
".",
"append",
"(",
"v",
")",
"try",
":",
"tr_spec",
".",
"replaceDoubleValue",
"=",
"float",
"(",
"model",
".",
"missing_values",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Only scalar values or NAN as missing_values \"",
"\"in _imputer are supported.\"",
")",
"return",
"_MLModel",
"(",
"spec",
")"
] | Convert a DictVectorizer model to the protobuf spec.
Parameters
----------
model: DictVectorizer
A fitted DictVectorizer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"DictVectorizer",
"model",
"to",
"the",
"protobuf",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_imputer.py#L21-L76 | train |
apple/turicreate | src/unity/python/turicreate/_cython/python_printer_callback.py | print_callback | def print_callback(val):
"""
Internal function.
This function is called via a call back returning from IPC to Cython
to Python. It tries to perform incremental printing to IPython Notebook or
Jupyter Notebook and when all else fails, just prints locally.
"""
success = False
try:
# for reasons I cannot fathom, regular printing, even directly
# to io.stdout does not work.
# I have to intrude rather deep into IPython to make it behave
if have_ipython:
if InteractiveShell.initialized():
IPython.display.publish_display_data({'text/plain':val,'text/html':'<pre>' + val + '</pre>'})
success = True
except:
pass
if not success:
print(val)
sys.stdout.flush() | python | def print_callback(val):
"""
Internal function.
This function is called via a call back returning from IPC to Cython
to Python. It tries to perform incremental printing to IPython Notebook or
Jupyter Notebook and when all else fails, just prints locally.
"""
success = False
try:
# for reasons I cannot fathom, regular printing, even directly
# to io.stdout does not work.
# I have to intrude rather deep into IPython to make it behave
if have_ipython:
if InteractiveShell.initialized():
IPython.display.publish_display_data({'text/plain':val,'text/html':'<pre>' + val + '</pre>'})
success = True
except:
pass
if not success:
print(val)
sys.stdout.flush() | [
"def",
"print_callback",
"(",
"val",
")",
":",
"success",
"=",
"False",
"try",
":",
"# for reasons I cannot fathom, regular printing, even directly",
"# to io.stdout does not work.",
"# I have to intrude rather deep into IPython to make it behave",
"if",
"have_ipython",
":",
"if",
"InteractiveShell",
".",
"initialized",
"(",
")",
":",
"IPython",
".",
"display",
".",
"publish_display_data",
"(",
"{",
"'text/plain'",
":",
"val",
",",
"'text/html'",
":",
"'<pre>'",
"+",
"val",
"+",
"'</pre>'",
"}",
")",
"success",
"=",
"True",
"except",
":",
"pass",
"if",
"not",
"success",
":",
"print",
"(",
"val",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] | Internal function.
This function is called via a call back returning from IPC to Cython
to Python. It tries to perform incremental printing to IPython Notebook or
Jupyter Notebook and when all else fails, just prints locally. | [
"Internal",
"function",
".",
"This",
"function",
"is",
"called",
"via",
"a",
"call",
"back",
"returning",
"from",
"IPC",
"to",
"Cython",
"to",
"Python",
".",
"It",
"tries",
"to",
"perform",
"incremental",
"printing",
"to",
"IPython",
"Notebook",
"or",
"Jupyter",
"Notebook",
"and",
"when",
"all",
"else",
"fails",
"just",
"prints",
"locally",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_cython/python_printer_callback.py#L17-L38 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_main.py | run | def run(toolkit_name, options, verbose=True, show_progress=False):
"""
Internal function to execute toolkit on the turicreate server.
Parameters
----------
toolkit_name : string
The name of the toolkit.
options : dict
A map containing the required input for the toolkit function,
for example: {'graph': g, 'reset_prob': 0.15}.
verbose : bool
If true, enable progress log from server.
show_progress : bool
If true, display progress plot.
Returns
-------
out : dict
The toolkit specific model parameters.
Raises
------
RuntimeError
Raises RuntimeError if the server fail executing the toolkit.
"""
unity = glconnect.get_unity()
if (not verbose):
glconnect.get_server().set_log_progress(False)
(success, message, params) = unity.run_toolkit(toolkit_name, options)
if (len(message) > 0):
logging.getLogger(__name__).error("Toolkit error: " + message)
# set the verbose level back to default
glconnect.get_server().set_log_progress(True)
if success:
return params
else:
raise ToolkitError(str(message)) | python | def run(toolkit_name, options, verbose=True, show_progress=False):
"""
Internal function to execute toolkit on the turicreate server.
Parameters
----------
toolkit_name : string
The name of the toolkit.
options : dict
A map containing the required input for the toolkit function,
for example: {'graph': g, 'reset_prob': 0.15}.
verbose : bool
If true, enable progress log from server.
show_progress : bool
If true, display progress plot.
Returns
-------
out : dict
The toolkit specific model parameters.
Raises
------
RuntimeError
Raises RuntimeError if the server fail executing the toolkit.
"""
unity = glconnect.get_unity()
if (not verbose):
glconnect.get_server().set_log_progress(False)
(success, message, params) = unity.run_toolkit(toolkit_name, options)
if (len(message) > 0):
logging.getLogger(__name__).error("Toolkit error: " + message)
# set the verbose level back to default
glconnect.get_server().set_log_progress(True)
if success:
return params
else:
raise ToolkitError(str(message)) | [
"def",
"run",
"(",
"toolkit_name",
",",
"options",
",",
"verbose",
"=",
"True",
",",
"show_progress",
"=",
"False",
")",
":",
"unity",
"=",
"glconnect",
".",
"get_unity",
"(",
")",
"if",
"(",
"not",
"verbose",
")",
":",
"glconnect",
".",
"get_server",
"(",
")",
".",
"set_log_progress",
"(",
"False",
")",
"(",
"success",
",",
"message",
",",
"params",
")",
"=",
"unity",
".",
"run_toolkit",
"(",
"toolkit_name",
",",
"options",
")",
"if",
"(",
"len",
"(",
"message",
")",
">",
"0",
")",
":",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"error",
"(",
"\"Toolkit error: \"",
"+",
"message",
")",
"# set the verbose level back to default",
"glconnect",
".",
"get_server",
"(",
")",
".",
"set_log_progress",
"(",
"True",
")",
"if",
"success",
":",
"return",
"params",
"else",
":",
"raise",
"ToolkitError",
"(",
"str",
"(",
"message",
")",
")"
] | Internal function to execute toolkit on the turicreate server.
Parameters
----------
toolkit_name : string
The name of the toolkit.
options : dict
A map containing the required input for the toolkit function,
for example: {'graph': g, 'reset_prob': 0.15}.
verbose : bool
If true, enable progress log from server.
show_progress : bool
If true, display progress plot.
Returns
-------
out : dict
The toolkit specific model parameters.
Raises
------
RuntimeError
Raises RuntimeError if the server fail executing the toolkit. | [
"Internal",
"function",
"to",
"execute",
"toolkit",
"on",
"the",
"turicreate",
"server",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_main.py#L25-L69 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _RoundTowardZero | def _RoundTowardZero(value, divider):
"""Truncates the remainder part after division."""
# For some languanges, the sign of the remainder is implementation
# dependent if any of the operands is negative. Here we enforce
# "rounded toward zero" semantics. For example, for (-5) / 2 an
# implementation may give -3 as the result with the remainder being
# 1. This function ensures we always return -2 (closer to zero).
result = value // divider
remainder = value % divider
if result < 0 and remainder > 0:
return result + 1
else:
return result | python | def _RoundTowardZero(value, divider):
"""Truncates the remainder part after division."""
# For some languanges, the sign of the remainder is implementation
# dependent if any of the operands is negative. Here we enforce
# "rounded toward zero" semantics. For example, for (-5) / 2 an
# implementation may give -3 as the result with the remainder being
# 1. This function ensures we always return -2 (closer to zero).
result = value // divider
remainder = value % divider
if result < 0 and remainder > 0:
return result + 1
else:
return result | [
"def",
"_RoundTowardZero",
"(",
"value",
",",
"divider",
")",
":",
"# For some languanges, the sign of the remainder is implementation",
"# dependent if any of the operands is negative. Here we enforce",
"# \"rounded toward zero\" semantics. For example, for (-5) / 2 an",
"# implementation may give -3 as the result with the remainder being",
"# 1. This function ensures we always return -2 (closer to zero).",
"result",
"=",
"value",
"//",
"divider",
"remainder",
"=",
"value",
"%",
"divider",
"if",
"result",
"<",
"0",
"and",
"remainder",
">",
"0",
":",
"return",
"result",
"+",
"1",
"else",
":",
"return",
"result"
] | Truncates the remainder part after division. | [
"Truncates",
"the",
"remainder",
"part",
"after",
"division",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L378-L390 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _IsValidPath | def _IsValidPath(message_descriptor, path):
"""Checks whether the path is valid for Message Descriptor."""
parts = path.split('.')
last = parts.pop()
for name in parts:
field = message_descriptor.fields_by_name[name]
if (field is None or
field.label == FieldDescriptor.LABEL_REPEATED or
field.type != FieldDescriptor.TYPE_MESSAGE):
return False
message_descriptor = field.message_type
return last in message_descriptor.fields_by_name | python | def _IsValidPath(message_descriptor, path):
"""Checks whether the path is valid for Message Descriptor."""
parts = path.split('.')
last = parts.pop()
for name in parts:
field = message_descriptor.fields_by_name[name]
if (field is None or
field.label == FieldDescriptor.LABEL_REPEATED or
field.type != FieldDescriptor.TYPE_MESSAGE):
return False
message_descriptor = field.message_type
return last in message_descriptor.fields_by_name | [
"def",
"_IsValidPath",
"(",
"message_descriptor",
",",
"path",
")",
":",
"parts",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"last",
"=",
"parts",
".",
"pop",
"(",
")",
"for",
"name",
"in",
"parts",
":",
"field",
"=",
"message_descriptor",
".",
"fields_by_name",
"[",
"name",
"]",
"if",
"(",
"field",
"is",
"None",
"or",
"field",
".",
"label",
"==",
"FieldDescriptor",
".",
"LABEL_REPEATED",
"or",
"field",
".",
"type",
"!=",
"FieldDescriptor",
".",
"TYPE_MESSAGE",
")",
":",
"return",
"False",
"message_descriptor",
"=",
"field",
".",
"message_type",
"return",
"last",
"in",
"message_descriptor",
".",
"fields_by_name"
] | Checks whether the path is valid for Message Descriptor. | [
"Checks",
"whether",
"the",
"path",
"is",
"valid",
"for",
"Message",
"Descriptor",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L471-L482 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _CheckFieldMaskMessage | def _CheckFieldMaskMessage(message):
"""Raises ValueError if message is not a FieldMask."""
message_descriptor = message.DESCRIPTOR
if (message_descriptor.name != 'FieldMask' or
message_descriptor.file.name != 'google/protobuf/field_mask.proto'):
raise ValueError('Message {0} is not a FieldMask.'.format(
message_descriptor.full_name)) | python | def _CheckFieldMaskMessage(message):
"""Raises ValueError if message is not a FieldMask."""
message_descriptor = message.DESCRIPTOR
if (message_descriptor.name != 'FieldMask' or
message_descriptor.file.name != 'google/protobuf/field_mask.proto'):
raise ValueError('Message {0} is not a FieldMask.'.format(
message_descriptor.full_name)) | [
"def",
"_CheckFieldMaskMessage",
"(",
"message",
")",
":",
"message_descriptor",
"=",
"message",
".",
"DESCRIPTOR",
"if",
"(",
"message_descriptor",
".",
"name",
"!=",
"'FieldMask'",
"or",
"message_descriptor",
".",
"file",
".",
"name",
"!=",
"'google/protobuf/field_mask.proto'",
")",
":",
"raise",
"ValueError",
"(",
"'Message {0} is not a FieldMask.'",
".",
"format",
"(",
"message_descriptor",
".",
"full_name",
")",
")"
] | Raises ValueError if message is not a FieldMask. | [
"Raises",
"ValueError",
"if",
"message",
"is",
"not",
"a",
"FieldMask",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L485-L491 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _SnakeCaseToCamelCase | def _SnakeCaseToCamelCase(path_name):
"""Converts a path name from snake_case to camelCase."""
result = []
after_underscore = False
for c in path_name:
if c.isupper():
raise Error('Fail to print FieldMask to Json string: Path name '
'{0} must not contain uppercase letters.'.format(path_name))
if after_underscore:
if c.islower():
result.append(c.upper())
after_underscore = False
else:
raise Error('Fail to print FieldMask to Json string: The '
'character after a "_" must be a lowercase letter '
'in path name {0}.'.format(path_name))
elif c == '_':
after_underscore = True
else:
result += c
if after_underscore:
raise Error('Fail to print FieldMask to Json string: Trailing "_" '
'in path name {0}.'.format(path_name))
return ''.join(result) | python | def _SnakeCaseToCamelCase(path_name):
"""Converts a path name from snake_case to camelCase."""
result = []
after_underscore = False
for c in path_name:
if c.isupper():
raise Error('Fail to print FieldMask to Json string: Path name '
'{0} must not contain uppercase letters.'.format(path_name))
if after_underscore:
if c.islower():
result.append(c.upper())
after_underscore = False
else:
raise Error('Fail to print FieldMask to Json string: The '
'character after a "_" must be a lowercase letter '
'in path name {0}.'.format(path_name))
elif c == '_':
after_underscore = True
else:
result += c
if after_underscore:
raise Error('Fail to print FieldMask to Json string: Trailing "_" '
'in path name {0}.'.format(path_name))
return ''.join(result) | [
"def",
"_SnakeCaseToCamelCase",
"(",
"path_name",
")",
":",
"result",
"=",
"[",
"]",
"after_underscore",
"=",
"False",
"for",
"c",
"in",
"path_name",
":",
"if",
"c",
".",
"isupper",
"(",
")",
":",
"raise",
"Error",
"(",
"'Fail to print FieldMask to Json string: Path name '",
"'{0} must not contain uppercase letters.'",
".",
"format",
"(",
"path_name",
")",
")",
"if",
"after_underscore",
":",
"if",
"c",
".",
"islower",
"(",
")",
":",
"result",
".",
"append",
"(",
"c",
".",
"upper",
"(",
")",
")",
"after_underscore",
"=",
"False",
"else",
":",
"raise",
"Error",
"(",
"'Fail to print FieldMask to Json string: The '",
"'character after a \"_\" must be a lowercase letter '",
"'in path name {0}.'",
".",
"format",
"(",
"path_name",
")",
")",
"elif",
"c",
"==",
"'_'",
":",
"after_underscore",
"=",
"True",
"else",
":",
"result",
"+=",
"c",
"if",
"after_underscore",
":",
"raise",
"Error",
"(",
"'Fail to print FieldMask to Json string: Trailing \"_\" '",
"'in path name {0}.'",
".",
"format",
"(",
"path_name",
")",
")",
"return",
"''",
".",
"join",
"(",
"result",
")"
] | Converts a path name from snake_case to camelCase. | [
"Converts",
"a",
"path",
"name",
"from",
"snake_case",
"to",
"camelCase",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L494-L518 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _CamelCaseToSnakeCase | def _CamelCaseToSnakeCase(path_name):
"""Converts a field name from camelCase to snake_case."""
result = []
for c in path_name:
if c == '_':
raise ParseError('Fail to parse FieldMask: Path name '
'{0} must not contain "_"s.'.format(path_name))
if c.isupper():
result += '_'
result += c.lower()
else:
result += c
return ''.join(result) | python | def _CamelCaseToSnakeCase(path_name):
"""Converts a field name from camelCase to snake_case."""
result = []
for c in path_name:
if c == '_':
raise ParseError('Fail to parse FieldMask: Path name '
'{0} must not contain "_"s.'.format(path_name))
if c.isupper():
result += '_'
result += c.lower()
else:
result += c
return ''.join(result) | [
"def",
"_CamelCaseToSnakeCase",
"(",
"path_name",
")",
":",
"result",
"=",
"[",
"]",
"for",
"c",
"in",
"path_name",
":",
"if",
"c",
"==",
"'_'",
":",
"raise",
"ParseError",
"(",
"'Fail to parse FieldMask: Path name '",
"'{0} must not contain \"_\"s.'",
".",
"format",
"(",
"path_name",
")",
")",
"if",
"c",
".",
"isupper",
"(",
")",
":",
"result",
"+=",
"'_'",
"result",
"+=",
"c",
".",
"lower",
"(",
")",
"else",
":",
"result",
"+=",
"c",
"return",
"''",
".",
"join",
"(",
"result",
")"
] | Converts a field name from camelCase to snake_case. | [
"Converts",
"a",
"field",
"name",
"from",
"camelCase",
"to",
"snake_case",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L521-L533 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _MergeMessage | def _MergeMessage(
node, source, destination, replace_message, replace_repeated):
"""Merge all fields specified by a sub-tree from source to destination."""
source_descriptor = source.DESCRIPTOR
for name in node:
child = node[name]
field = source_descriptor.fields_by_name[name]
if field is None:
raise ValueError('Error: Can\'t find field {0} in message {1}.'.format(
name, source_descriptor.full_name))
if child:
# Sub-paths are only allowed for singular message fields.
if (field.label == FieldDescriptor.LABEL_REPEATED or
field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE):
raise ValueError('Error: Field {0} in message {1} is not a singular '
'message field and cannot have sub-fields.'.format(
name, source_descriptor.full_name))
_MergeMessage(
child, getattr(source, name), getattr(destination, name),
replace_message, replace_repeated)
continue
if field.label == FieldDescriptor.LABEL_REPEATED:
if replace_repeated:
destination.ClearField(_StrConvert(name))
repeated_source = getattr(source, name)
repeated_destination = getattr(destination, name)
if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
for item in repeated_source:
repeated_destination.add().MergeFrom(item)
else:
repeated_destination.extend(repeated_source)
else:
if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
if replace_message:
destination.ClearField(_StrConvert(name))
if source.HasField(name):
getattr(destination, name).MergeFrom(getattr(source, name))
else:
setattr(destination, name, getattr(source, name)) | python | def _MergeMessage(
node, source, destination, replace_message, replace_repeated):
"""Merge all fields specified by a sub-tree from source to destination."""
source_descriptor = source.DESCRIPTOR
for name in node:
child = node[name]
field = source_descriptor.fields_by_name[name]
if field is None:
raise ValueError('Error: Can\'t find field {0} in message {1}.'.format(
name, source_descriptor.full_name))
if child:
# Sub-paths are only allowed for singular message fields.
if (field.label == FieldDescriptor.LABEL_REPEATED or
field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE):
raise ValueError('Error: Field {0} in message {1} is not a singular '
'message field and cannot have sub-fields.'.format(
name, source_descriptor.full_name))
_MergeMessage(
child, getattr(source, name), getattr(destination, name),
replace_message, replace_repeated)
continue
if field.label == FieldDescriptor.LABEL_REPEATED:
if replace_repeated:
destination.ClearField(_StrConvert(name))
repeated_source = getattr(source, name)
repeated_destination = getattr(destination, name)
if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
for item in repeated_source:
repeated_destination.add().MergeFrom(item)
else:
repeated_destination.extend(repeated_source)
else:
if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
if replace_message:
destination.ClearField(_StrConvert(name))
if source.HasField(name):
getattr(destination, name).MergeFrom(getattr(source, name))
else:
setattr(destination, name, getattr(source, name)) | [
"def",
"_MergeMessage",
"(",
"node",
",",
"source",
",",
"destination",
",",
"replace_message",
",",
"replace_repeated",
")",
":",
"source_descriptor",
"=",
"source",
".",
"DESCRIPTOR",
"for",
"name",
"in",
"node",
":",
"child",
"=",
"node",
"[",
"name",
"]",
"field",
"=",
"source_descriptor",
".",
"fields_by_name",
"[",
"name",
"]",
"if",
"field",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Error: Can\\'t find field {0} in message {1}.'",
".",
"format",
"(",
"name",
",",
"source_descriptor",
".",
"full_name",
")",
")",
"if",
"child",
":",
"# Sub-paths are only allowed for singular message fields.",
"if",
"(",
"field",
".",
"label",
"==",
"FieldDescriptor",
".",
"LABEL_REPEATED",
"or",
"field",
".",
"cpp_type",
"!=",
"FieldDescriptor",
".",
"CPPTYPE_MESSAGE",
")",
":",
"raise",
"ValueError",
"(",
"'Error: Field {0} in message {1} is not a singular '",
"'message field and cannot have sub-fields.'",
".",
"format",
"(",
"name",
",",
"source_descriptor",
".",
"full_name",
")",
")",
"_MergeMessage",
"(",
"child",
",",
"getattr",
"(",
"source",
",",
"name",
")",
",",
"getattr",
"(",
"destination",
",",
"name",
")",
",",
"replace_message",
",",
"replace_repeated",
")",
"continue",
"if",
"field",
".",
"label",
"==",
"FieldDescriptor",
".",
"LABEL_REPEATED",
":",
"if",
"replace_repeated",
":",
"destination",
".",
"ClearField",
"(",
"_StrConvert",
"(",
"name",
")",
")",
"repeated_source",
"=",
"getattr",
"(",
"source",
",",
"name",
")",
"repeated_destination",
"=",
"getattr",
"(",
"destination",
",",
"name",
")",
"if",
"field",
".",
"cpp_type",
"==",
"FieldDescriptor",
".",
"CPPTYPE_MESSAGE",
":",
"for",
"item",
"in",
"repeated_source",
":",
"repeated_destination",
".",
"add",
"(",
")",
".",
"MergeFrom",
"(",
"item",
")",
"else",
":",
"repeated_destination",
".",
"extend",
"(",
"repeated_source",
")",
"else",
":",
"if",
"field",
".",
"cpp_type",
"==",
"FieldDescriptor",
".",
"CPPTYPE_MESSAGE",
":",
"if",
"replace_message",
":",
"destination",
".",
"ClearField",
"(",
"_StrConvert",
"(",
"name",
")",
")",
"if",
"source",
".",
"HasField",
"(",
"name",
")",
":",
"getattr",
"(",
"destination",
",",
"name",
")",
".",
"MergeFrom",
"(",
"getattr",
"(",
"source",
",",
"name",
")",
")",
"else",
":",
"setattr",
"(",
"destination",
",",
"name",
",",
"getattr",
"(",
"source",
",",
"name",
")",
")"
] | Merge all fields specified by a sub-tree from source to destination. | [
"Merge",
"all",
"fields",
"specified",
"by",
"a",
"sub",
"-",
"tree",
"from",
"source",
"to",
"destination",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L633-L671 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _AddFieldPaths | def _AddFieldPaths(node, prefix, field_mask):
"""Adds the field paths descended from node to field_mask."""
if not node:
field_mask.paths.append(prefix)
return
for name in sorted(node):
if prefix:
child_path = prefix + '.' + name
else:
child_path = name
_AddFieldPaths(node[name], child_path, field_mask) | python | def _AddFieldPaths(node, prefix, field_mask):
"""Adds the field paths descended from node to field_mask."""
if not node:
field_mask.paths.append(prefix)
return
for name in sorted(node):
if prefix:
child_path = prefix + '.' + name
else:
child_path = name
_AddFieldPaths(node[name], child_path, field_mask) | [
"def",
"_AddFieldPaths",
"(",
"node",
",",
"prefix",
",",
"field_mask",
")",
":",
"if",
"not",
"node",
":",
"field_mask",
".",
"paths",
".",
"append",
"(",
"prefix",
")",
"return",
"for",
"name",
"in",
"sorted",
"(",
"node",
")",
":",
"if",
"prefix",
":",
"child_path",
"=",
"prefix",
"+",
"'.'",
"+",
"name",
"else",
":",
"child_path",
"=",
"name",
"_AddFieldPaths",
"(",
"node",
"[",
"name",
"]",
",",
"child_path",
",",
"field_mask",
")"
] | Adds the field paths descended from node to field_mask. | [
"Adds",
"the",
"field",
"paths",
"descended",
"from",
"node",
"to",
"field_mask",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L674-L684 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Any.Pack | def Pack(self, msg, type_url_prefix='type.googleapis.com/'):
"""Packs the specified message into current Any message."""
if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/':
self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
else:
self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
self.value = msg.SerializeToString() | python | def Pack(self, msg, type_url_prefix='type.googleapis.com/'):
"""Packs the specified message into current Any message."""
if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/':
self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
else:
self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
self.value = msg.SerializeToString() | [
"def",
"Pack",
"(",
"self",
",",
"msg",
",",
"type_url_prefix",
"=",
"'type.googleapis.com/'",
")",
":",
"if",
"len",
"(",
"type_url_prefix",
")",
"<",
"1",
"or",
"type_url_prefix",
"[",
"-",
"1",
"]",
"!=",
"'/'",
":",
"self",
".",
"type_url",
"=",
"'%s/%s'",
"%",
"(",
"type_url_prefix",
",",
"msg",
".",
"DESCRIPTOR",
".",
"full_name",
")",
"else",
":",
"self",
".",
"type_url",
"=",
"'%s%s'",
"%",
"(",
"type_url_prefix",
",",
"msg",
".",
"DESCRIPTOR",
".",
"full_name",
")",
"self",
".",
"value",
"=",
"msg",
".",
"SerializeToString",
"(",
")"
] | Packs the specified message into current Any message. | [
"Packs",
"the",
"specified",
"message",
"into",
"current",
"Any",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L70-L76 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Any.Unpack | def Unpack(self, msg):
"""Unpacks the current Any message into specified message."""
descriptor = msg.DESCRIPTOR
if not self.Is(descriptor):
return False
msg.ParseFromString(self.value)
return True | python | def Unpack(self, msg):
"""Unpacks the current Any message into specified message."""
descriptor = msg.DESCRIPTOR
if not self.Is(descriptor):
return False
msg.ParseFromString(self.value)
return True | [
"def",
"Unpack",
"(",
"self",
",",
"msg",
")",
":",
"descriptor",
"=",
"msg",
".",
"DESCRIPTOR",
"if",
"not",
"self",
".",
"Is",
"(",
"descriptor",
")",
":",
"return",
"False",
"msg",
".",
"ParseFromString",
"(",
"self",
".",
"value",
")",
"return",
"True"
] | Unpacks the current Any message into specified message. | [
"Unpacks",
"the",
"current",
"Any",
"message",
"into",
"specified",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L78-L84 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Timestamp.ToJsonString | def ToJsonString(self):
"""Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
"""
nanos = self.nanos % _NANOS_PER_SECOND
total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND
seconds = total_sec % _SECONDS_PER_DAY
days = (total_sec - seconds) // _SECONDS_PER_DAY
dt = datetime(1970, 1, 1) + timedelta(days, seconds)
result = dt.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 'Z'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03dZ' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06dZ' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09dZ' % nanos | python | def ToJsonString(self):
"""Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
"""
nanos = self.nanos % _NANOS_PER_SECOND
total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND
seconds = total_sec % _SECONDS_PER_DAY
days = (total_sec - seconds) // _SECONDS_PER_DAY
dt = datetime(1970, 1, 1) + timedelta(days, seconds)
result = dt.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 'Z'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03dZ' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06dZ' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09dZ' % nanos | [
"def",
"ToJsonString",
"(",
"self",
")",
":",
"nanos",
"=",
"self",
".",
"nanos",
"%",
"_NANOS_PER_SECOND",
"total_sec",
"=",
"self",
".",
"seconds",
"+",
"(",
"self",
".",
"nanos",
"-",
"nanos",
")",
"//",
"_NANOS_PER_SECOND",
"seconds",
"=",
"total_sec",
"%",
"_SECONDS_PER_DAY",
"days",
"=",
"(",
"total_sec",
"-",
"seconds",
")",
"//",
"_SECONDS_PER_DAY",
"dt",
"=",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
"+",
"timedelta",
"(",
"days",
",",
"seconds",
")",
"result",
"=",
"dt",
".",
"isoformat",
"(",
")",
"if",
"(",
"nanos",
"%",
"1e9",
")",
"==",
"0",
":",
"# If there are 0 fractional digits, the fractional",
"# point '.' should be omitted when serializing.",
"return",
"result",
"+",
"'Z'",
"if",
"(",
"nanos",
"%",
"1e6",
")",
"==",
"0",
":",
"# Serialize 3 fractional digits.",
"return",
"result",
"+",
"'.%03dZ'",
"%",
"(",
"nanos",
"/",
"1e6",
")",
"if",
"(",
"nanos",
"%",
"1e3",
")",
"==",
"0",
":",
"# Serialize 6 fractional digits.",
"return",
"result",
"+",
"'.%06dZ'",
"%",
"(",
"nanos",
"/",
"1e3",
")",
"# Serialize 9 fractional digits.",
"return",
"result",
"+",
"'.%09dZ'",
"%",
"nanos"
] | Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z' | [
"Converts",
"Timestamp",
"to",
"RFC",
"3339",
"date",
"string",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L99-L125 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Timestamp.FromJsonString | def FromJsonString(self, value):
"""Parse a RFC 3339 date string format to Timestamp.
Args:
value: A date string. Any fractional digits (or none) and any offset are
accepted as long as they fit into nano-seconds precision.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
Raises:
ParseError: On parsing problems.
"""
timezone_offset = value.find('Z')
if timezone_offset == -1:
timezone_offset = value.find('+')
if timezone_offset == -1:
timezone_offset = value.rfind('-')
if timezone_offset == -1:
raise ParseError(
'Failed to parse timestamp: missing valid timezone offset.')
time_value = value[0:timezone_offset]
# Parse datetime and nanos.
point_position = time_value.find('.')
if point_position == -1:
second_value = time_value
nano_value = ''
else:
second_value = time_value[:point_position]
nano_value = time_value[point_position + 1:]
date_object = datetime.strptime(second_value, _TIMESTAMPFOMAT)
td = date_object - datetime(1970, 1, 1)
seconds = td.seconds + td.days * _SECONDS_PER_DAY
if len(nano_value) > 9:
raise ParseError(
'Failed to parse Timestamp: nanos {0} more than '
'9 fractional digits.'.format(nano_value))
if nano_value:
nanos = round(float('0.' + nano_value) * 1e9)
else:
nanos = 0
# Parse timezone offsets.
if value[timezone_offset] == 'Z':
if len(value) != timezone_offset + 1:
raise ParseError('Failed to parse timestamp: invalid trailing'
' data {0}.'.format(value))
else:
timezone = value[timezone_offset:]
pos = timezone.find(':')
if pos == -1:
raise ParseError(
'Invalid timezone offset value: {0}.'.format(timezone))
if timezone[0] == '+':
seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
else:
seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
# Set seconds and nanos
self.seconds = int(seconds)
self.nanos = int(nanos) | python | def FromJsonString(self, value):
"""Parse a RFC 3339 date string format to Timestamp.
Args:
value: A date string. Any fractional digits (or none) and any offset are
accepted as long as they fit into nano-seconds precision.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
Raises:
ParseError: On parsing problems.
"""
timezone_offset = value.find('Z')
if timezone_offset == -1:
timezone_offset = value.find('+')
if timezone_offset == -1:
timezone_offset = value.rfind('-')
if timezone_offset == -1:
raise ParseError(
'Failed to parse timestamp: missing valid timezone offset.')
time_value = value[0:timezone_offset]
# Parse datetime and nanos.
point_position = time_value.find('.')
if point_position == -1:
second_value = time_value
nano_value = ''
else:
second_value = time_value[:point_position]
nano_value = time_value[point_position + 1:]
date_object = datetime.strptime(second_value, _TIMESTAMPFOMAT)
td = date_object - datetime(1970, 1, 1)
seconds = td.seconds + td.days * _SECONDS_PER_DAY
if len(nano_value) > 9:
raise ParseError(
'Failed to parse Timestamp: nanos {0} more than '
'9 fractional digits.'.format(nano_value))
if nano_value:
nanos = round(float('0.' + nano_value) * 1e9)
else:
nanos = 0
# Parse timezone offsets.
if value[timezone_offset] == 'Z':
if len(value) != timezone_offset + 1:
raise ParseError('Failed to parse timestamp: invalid trailing'
' data {0}.'.format(value))
else:
timezone = value[timezone_offset:]
pos = timezone.find(':')
if pos == -1:
raise ParseError(
'Invalid timezone offset value: {0}.'.format(timezone))
if timezone[0] == '+':
seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
else:
seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
# Set seconds and nanos
self.seconds = int(seconds)
self.nanos = int(nanos) | [
"def",
"FromJsonString",
"(",
"self",
",",
"value",
")",
":",
"timezone_offset",
"=",
"value",
".",
"find",
"(",
"'Z'",
")",
"if",
"timezone_offset",
"==",
"-",
"1",
":",
"timezone_offset",
"=",
"value",
".",
"find",
"(",
"'+'",
")",
"if",
"timezone_offset",
"==",
"-",
"1",
":",
"timezone_offset",
"=",
"value",
".",
"rfind",
"(",
"'-'",
")",
"if",
"timezone_offset",
"==",
"-",
"1",
":",
"raise",
"ParseError",
"(",
"'Failed to parse timestamp: missing valid timezone offset.'",
")",
"time_value",
"=",
"value",
"[",
"0",
":",
"timezone_offset",
"]",
"# Parse datetime and nanos.",
"point_position",
"=",
"time_value",
".",
"find",
"(",
"'.'",
")",
"if",
"point_position",
"==",
"-",
"1",
":",
"second_value",
"=",
"time_value",
"nano_value",
"=",
"''",
"else",
":",
"second_value",
"=",
"time_value",
"[",
":",
"point_position",
"]",
"nano_value",
"=",
"time_value",
"[",
"point_position",
"+",
"1",
":",
"]",
"date_object",
"=",
"datetime",
".",
"strptime",
"(",
"second_value",
",",
"_TIMESTAMPFOMAT",
")",
"td",
"=",
"date_object",
"-",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
"seconds",
"=",
"td",
".",
"seconds",
"+",
"td",
".",
"days",
"*",
"_SECONDS_PER_DAY",
"if",
"len",
"(",
"nano_value",
")",
">",
"9",
":",
"raise",
"ParseError",
"(",
"'Failed to parse Timestamp: nanos {0} more than '",
"'9 fractional digits.'",
".",
"format",
"(",
"nano_value",
")",
")",
"if",
"nano_value",
":",
"nanos",
"=",
"round",
"(",
"float",
"(",
"'0.'",
"+",
"nano_value",
")",
"*",
"1e9",
")",
"else",
":",
"nanos",
"=",
"0",
"# Parse timezone offsets.",
"if",
"value",
"[",
"timezone_offset",
"]",
"==",
"'Z'",
":",
"if",
"len",
"(",
"value",
")",
"!=",
"timezone_offset",
"+",
"1",
":",
"raise",
"ParseError",
"(",
"'Failed to parse timestamp: invalid trailing'",
"' data {0}.'",
".",
"format",
"(",
"value",
")",
")",
"else",
":",
"timezone",
"=",
"value",
"[",
"timezone_offset",
":",
"]",
"pos",
"=",
"timezone",
".",
"find",
"(",
"':'",
")",
"if",
"pos",
"==",
"-",
"1",
":",
"raise",
"ParseError",
"(",
"'Invalid timezone offset value: {0}.'",
".",
"format",
"(",
"timezone",
")",
")",
"if",
"timezone",
"[",
"0",
"]",
"==",
"'+'",
":",
"seconds",
"-=",
"(",
"int",
"(",
"timezone",
"[",
"1",
":",
"pos",
"]",
")",
"*",
"60",
"+",
"int",
"(",
"timezone",
"[",
"pos",
"+",
"1",
":",
"]",
")",
")",
"*",
"60",
"else",
":",
"seconds",
"+=",
"(",
"int",
"(",
"timezone",
"[",
"1",
":",
"pos",
"]",
")",
"*",
"60",
"+",
"int",
"(",
"timezone",
"[",
"pos",
"+",
"1",
":",
"]",
")",
")",
"*",
"60",
"# Set seconds and nanos",
"self",
".",
"seconds",
"=",
"int",
"(",
"seconds",
")",
"self",
".",
"nanos",
"=",
"int",
"(",
"nanos",
")"
] | Parse a RFC 3339 date string format to Timestamp.
Args:
value: A date string. Any fractional digits (or none) and any offset are
accepted as long as they fit into nano-seconds precision.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
Raises:
ParseError: On parsing problems. | [
"Parse",
"a",
"RFC",
"3339",
"date",
"string",
"format",
"to",
"Timestamp",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L127-L183 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Timestamp.FromNanoseconds | def FromNanoseconds(self, nanos):
"""Converts nanoseconds since epoch to Timestamp."""
self.seconds = nanos // _NANOS_PER_SECOND
self.nanos = nanos % _NANOS_PER_SECOND | python | def FromNanoseconds(self, nanos):
"""Converts nanoseconds since epoch to Timestamp."""
self.seconds = nanos // _NANOS_PER_SECOND
self.nanos = nanos % _NANOS_PER_SECOND | [
"def",
"FromNanoseconds",
"(",
"self",
",",
"nanos",
")",
":",
"self",
".",
"seconds",
"=",
"nanos",
"//",
"_NANOS_PER_SECOND",
"self",
".",
"nanos",
"=",
"nanos",
"%",
"_NANOS_PER_SECOND"
] | Converts nanoseconds since epoch to Timestamp. | [
"Converts",
"nanoseconds",
"since",
"epoch",
"to",
"Timestamp",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L207-L210 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Timestamp.FromMicroseconds | def FromMicroseconds(self, micros):
"""Converts microseconds since epoch to Timestamp."""
self.seconds = micros // _MICROS_PER_SECOND
self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND | python | def FromMicroseconds(self, micros):
"""Converts microseconds since epoch to Timestamp."""
self.seconds = micros // _MICROS_PER_SECOND
self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND | [
"def",
"FromMicroseconds",
"(",
"self",
",",
"micros",
")",
":",
"self",
".",
"seconds",
"=",
"micros",
"//",
"_MICROS_PER_SECOND",
"self",
".",
"nanos",
"=",
"(",
"micros",
"%",
"_MICROS_PER_SECOND",
")",
"*",
"_NANOS_PER_MICROSECOND"
] | Converts microseconds since epoch to Timestamp. | [
"Converts",
"microseconds",
"since",
"epoch",
"to",
"Timestamp",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L212-L215 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Timestamp.FromMilliseconds | def FromMilliseconds(self, millis):
"""Converts milliseconds since epoch to Timestamp."""
self.seconds = millis // _MILLIS_PER_SECOND
self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND | python | def FromMilliseconds(self, millis):
"""Converts milliseconds since epoch to Timestamp."""
self.seconds = millis // _MILLIS_PER_SECOND
self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND | [
"def",
"FromMilliseconds",
"(",
"self",
",",
"millis",
")",
":",
"self",
".",
"seconds",
"=",
"millis",
"//",
"_MILLIS_PER_SECOND",
"self",
".",
"nanos",
"=",
"(",
"millis",
"%",
"_MILLIS_PER_SECOND",
")",
"*",
"_NANOS_PER_MILLISECOND"
] | Converts milliseconds since epoch to Timestamp. | [
"Converts",
"milliseconds",
"since",
"epoch",
"to",
"Timestamp",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L217-L220 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Timestamp.ToDatetime | def ToDatetime(self):
"""Converts Timestamp to datetime."""
return datetime.utcfromtimestamp(
self.seconds + self.nanos / float(_NANOS_PER_SECOND)) | python | def ToDatetime(self):
"""Converts Timestamp to datetime."""
return datetime.utcfromtimestamp(
self.seconds + self.nanos / float(_NANOS_PER_SECOND)) | [
"def",
"ToDatetime",
"(",
"self",
")",
":",
"return",
"datetime",
".",
"utcfromtimestamp",
"(",
"self",
".",
"seconds",
"+",
"self",
".",
"nanos",
"/",
"float",
"(",
"_NANOS_PER_SECOND",
")",
")"
] | Converts Timestamp to datetime. | [
"Converts",
"Timestamp",
"to",
"datetime",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L227-L230 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Timestamp.FromDatetime | def FromDatetime(self, dt):
"""Converts datetime to Timestamp."""
td = dt - datetime(1970, 1, 1)
self.seconds = td.seconds + td.days * _SECONDS_PER_DAY
self.nanos = td.microseconds * _NANOS_PER_MICROSECOND | python | def FromDatetime(self, dt):
"""Converts datetime to Timestamp."""
td = dt - datetime(1970, 1, 1)
self.seconds = td.seconds + td.days * _SECONDS_PER_DAY
self.nanos = td.microseconds * _NANOS_PER_MICROSECOND | [
"def",
"FromDatetime",
"(",
"self",
",",
"dt",
")",
":",
"td",
"=",
"dt",
"-",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
"self",
".",
"seconds",
"=",
"td",
".",
"seconds",
"+",
"td",
".",
"days",
"*",
"_SECONDS_PER_DAY",
"self",
".",
"nanos",
"=",
"td",
".",
"microseconds",
"*",
"_NANOS_PER_MICROSECOND"
] | Converts datetime to Timestamp. | [
"Converts",
"datetime",
"to",
"Timestamp",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L232-L236 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Duration.ToMicroseconds | def ToMicroseconds(self):
"""Converts a Duration to microseconds."""
micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)
return self.seconds * _MICROS_PER_SECOND + micros | python | def ToMicroseconds(self):
"""Converts a Duration to microseconds."""
micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)
return self.seconds * _MICROS_PER_SECOND + micros | [
"def",
"ToMicroseconds",
"(",
"self",
")",
":",
"micros",
"=",
"_RoundTowardZero",
"(",
"self",
".",
"nanos",
",",
"_NANOS_PER_MICROSECOND",
")",
"return",
"self",
".",
"seconds",
"*",
"_MICROS_PER_SECOND",
"+",
"micros"
] | Converts a Duration to microseconds. | [
"Converts",
"a",
"Duration",
"to",
"microseconds",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L310-L313 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Duration.ToMilliseconds | def ToMilliseconds(self):
"""Converts a Duration to milliseconds."""
millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND)
return self.seconds * _MILLIS_PER_SECOND + millis | python | def ToMilliseconds(self):
"""Converts a Duration to milliseconds."""
millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND)
return self.seconds * _MILLIS_PER_SECOND + millis | [
"def",
"ToMilliseconds",
"(",
"self",
")",
":",
"millis",
"=",
"_RoundTowardZero",
"(",
"self",
".",
"nanos",
",",
"_NANOS_PER_MILLISECOND",
")",
"return",
"self",
".",
"seconds",
"*",
"_MILLIS_PER_SECOND",
"+",
"millis"
] | Converts a Duration to milliseconds. | [
"Converts",
"a",
"Duration",
"to",
"milliseconds",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L315-L318 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Duration.FromMicroseconds | def FromMicroseconds(self, micros):
"""Converts microseconds to Duration."""
self._NormalizeDuration(
micros // _MICROS_PER_SECOND,
(micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND) | python | def FromMicroseconds(self, micros):
"""Converts microseconds to Duration."""
self._NormalizeDuration(
micros // _MICROS_PER_SECOND,
(micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND) | [
"def",
"FromMicroseconds",
"(",
"self",
",",
"micros",
")",
":",
"self",
".",
"_NormalizeDuration",
"(",
"micros",
"//",
"_MICROS_PER_SECOND",
",",
"(",
"micros",
"%",
"_MICROS_PER_SECOND",
")",
"*",
"_NANOS_PER_MICROSECOND",
")"
] | Converts microseconds to Duration. | [
"Converts",
"microseconds",
"to",
"Duration",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L329-L333 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Duration.FromMilliseconds | def FromMilliseconds(self, millis):
"""Converts milliseconds to Duration."""
self._NormalizeDuration(
millis // _MILLIS_PER_SECOND,
(millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND) | python | def FromMilliseconds(self, millis):
"""Converts milliseconds to Duration."""
self._NormalizeDuration(
millis // _MILLIS_PER_SECOND,
(millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND) | [
"def",
"FromMilliseconds",
"(",
"self",
",",
"millis",
")",
":",
"self",
".",
"_NormalizeDuration",
"(",
"millis",
"//",
"_MILLIS_PER_SECOND",
",",
"(",
"millis",
"%",
"_MILLIS_PER_SECOND",
")",
"*",
"_NANOS_PER_MILLISECOND",
")"
] | Converts milliseconds to Duration. | [
"Converts",
"milliseconds",
"to",
"Duration",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L335-L339 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Duration.ToTimedelta | def ToTimedelta(self):
"""Converts Duration to timedelta."""
return timedelta(
seconds=self.seconds, microseconds=_RoundTowardZero(
self.nanos, _NANOS_PER_MICROSECOND)) | python | def ToTimedelta(self):
"""Converts Duration to timedelta."""
return timedelta(
seconds=self.seconds, microseconds=_RoundTowardZero(
self.nanos, _NANOS_PER_MICROSECOND)) | [
"def",
"ToTimedelta",
"(",
"self",
")",
":",
"return",
"timedelta",
"(",
"seconds",
"=",
"self",
".",
"seconds",
",",
"microseconds",
"=",
"_RoundTowardZero",
"(",
"self",
".",
"nanos",
",",
"_NANOS_PER_MICROSECOND",
")",
")"
] | Converts Duration to timedelta. | [
"Converts",
"Duration",
"to",
"timedelta",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L346-L350 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Duration.FromTimedelta | def FromTimedelta(self, td):
"""Convertd timedelta to Duration."""
self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY,
td.microseconds * _NANOS_PER_MICROSECOND) | python | def FromTimedelta(self, td):
"""Convertd timedelta to Duration."""
self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY,
td.microseconds * _NANOS_PER_MICROSECOND) | [
"def",
"FromTimedelta",
"(",
"self",
",",
"td",
")",
":",
"self",
".",
"_NormalizeDuration",
"(",
"td",
".",
"seconds",
"+",
"td",
".",
"days",
"*",
"_SECONDS_PER_DAY",
",",
"td",
".",
"microseconds",
"*",
"_NANOS_PER_MICROSECOND",
")"
] | Convertd timedelta to Duration. | [
"Convertd",
"timedelta",
"to",
"Duration",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L352-L355 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | Duration._NormalizeDuration | def _NormalizeDuration(self, seconds, nanos):
"""Set Duration by seconds and nonas."""
# Force nanos to be negative if the duration is negative.
if seconds < 0 and nanos > 0:
seconds += 1
nanos -= _NANOS_PER_SECOND
self.seconds = seconds
self.nanos = nanos | python | def _NormalizeDuration(self, seconds, nanos):
"""Set Duration by seconds and nonas."""
# Force nanos to be negative if the duration is negative.
if seconds < 0 and nanos > 0:
seconds += 1
nanos -= _NANOS_PER_SECOND
self.seconds = seconds
self.nanos = nanos | [
"def",
"_NormalizeDuration",
"(",
"self",
",",
"seconds",
",",
"nanos",
")",
":",
"# Force nanos to be negative if the duration is negative.",
"if",
"seconds",
"<",
"0",
"and",
"nanos",
">",
"0",
":",
"seconds",
"+=",
"1",
"nanos",
"-=",
"_NANOS_PER_SECOND",
"self",
".",
"seconds",
"=",
"seconds",
"self",
".",
"nanos",
"=",
"nanos"
] | Set Duration by seconds and nonas. | [
"Set",
"Duration",
"by",
"seconds",
"and",
"nonas",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L357-L364 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | FieldMask.ToJsonString | def ToJsonString(self):
"""Converts FieldMask to string according to proto3 JSON spec."""
camelcase_paths = []
for path in self.paths:
camelcase_paths.append(_SnakeCaseToCamelCase(path))
return ','.join(camelcase_paths) | python | def ToJsonString(self):
"""Converts FieldMask to string according to proto3 JSON spec."""
camelcase_paths = []
for path in self.paths:
camelcase_paths.append(_SnakeCaseToCamelCase(path))
return ','.join(camelcase_paths) | [
"def",
"ToJsonString",
"(",
"self",
")",
":",
"camelcase_paths",
"=",
"[",
"]",
"for",
"path",
"in",
"self",
".",
"paths",
":",
"camelcase_paths",
".",
"append",
"(",
"_SnakeCaseToCamelCase",
"(",
"path",
")",
")",
"return",
"','",
".",
"join",
"(",
"camelcase_paths",
")"
] | Converts FieldMask to string according to proto3 JSON spec. | [
"Converts",
"FieldMask",
"to",
"string",
"according",
"to",
"proto3",
"JSON",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L396-L401 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | FieldMask.IsValidForDescriptor | def IsValidForDescriptor(self, message_descriptor):
"""Checks whether the FieldMask is valid for Message Descriptor."""
for path in self.paths:
if not _IsValidPath(message_descriptor, path):
return False
return True | python | def IsValidForDescriptor(self, message_descriptor):
"""Checks whether the FieldMask is valid for Message Descriptor."""
for path in self.paths:
if not _IsValidPath(message_descriptor, path):
return False
return True | [
"def",
"IsValidForDescriptor",
"(",
"self",
",",
"message_descriptor",
")",
":",
"for",
"path",
"in",
"self",
".",
"paths",
":",
"if",
"not",
"_IsValidPath",
"(",
"message_descriptor",
",",
"path",
")",
":",
"return",
"False",
"return",
"True"
] | Checks whether the FieldMask is valid for Message Descriptor. | [
"Checks",
"whether",
"the",
"FieldMask",
"is",
"valid",
"for",
"Message",
"Descriptor",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L409-L414 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | FieldMask.AllFieldsFromDescriptor | def AllFieldsFromDescriptor(self, message_descriptor):
"""Gets all direct fields of Message Descriptor to FieldMask."""
self.Clear()
for field in message_descriptor.fields:
self.paths.append(field.name) | python | def AllFieldsFromDescriptor(self, message_descriptor):
"""Gets all direct fields of Message Descriptor to FieldMask."""
self.Clear()
for field in message_descriptor.fields:
self.paths.append(field.name) | [
"def",
"AllFieldsFromDescriptor",
"(",
"self",
",",
"message_descriptor",
")",
":",
"self",
".",
"Clear",
"(",
")",
"for",
"field",
"in",
"message_descriptor",
".",
"fields",
":",
"self",
".",
"paths",
".",
"append",
"(",
"field",
".",
"name",
")"
] | Gets all direct fields of Message Descriptor to FieldMask. | [
"Gets",
"all",
"direct",
"fields",
"of",
"Message",
"Descriptor",
"to",
"FieldMask",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L416-L420 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | FieldMask.Union | def Union(self, mask1, mask2):
"""Merges mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
tree.MergeFromFieldMask(mask2)
tree.ToFieldMask(self) | python | def Union(self, mask1, mask2):
"""Merges mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
tree.MergeFromFieldMask(mask2)
tree.ToFieldMask(self) | [
"def",
"Union",
"(",
"self",
",",
"mask1",
",",
"mask2",
")",
":",
"_CheckFieldMaskMessage",
"(",
"mask1",
")",
"_CheckFieldMaskMessage",
"(",
"mask2",
")",
"tree",
"=",
"_FieldMaskTree",
"(",
"mask1",
")",
"tree",
".",
"MergeFromFieldMask",
"(",
"mask2",
")",
"tree",
".",
"ToFieldMask",
"(",
"self",
")"
] | Merges mask1 and mask2 into this FieldMask. | [
"Merges",
"mask1",
"and",
"mask2",
"into",
"this",
"FieldMask",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L435-L441 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | FieldMask.Intersect | def Intersect(self, mask1, mask2):
"""Intersects mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
intersection = _FieldMaskTree()
for path in mask2.paths:
tree.IntersectPath(path, intersection)
intersection.ToFieldMask(self) | python | def Intersect(self, mask1, mask2):
"""Intersects mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
intersection = _FieldMaskTree()
for path in mask2.paths:
tree.IntersectPath(path, intersection)
intersection.ToFieldMask(self) | [
"def",
"Intersect",
"(",
"self",
",",
"mask1",
",",
"mask2",
")",
":",
"_CheckFieldMaskMessage",
"(",
"mask1",
")",
"_CheckFieldMaskMessage",
"(",
"mask2",
")",
"tree",
"=",
"_FieldMaskTree",
"(",
"mask1",
")",
"intersection",
"=",
"_FieldMaskTree",
"(",
")",
"for",
"path",
"in",
"mask2",
".",
"paths",
":",
"tree",
".",
"IntersectPath",
"(",
"path",
",",
"intersection",
")",
"intersection",
".",
"ToFieldMask",
"(",
"self",
")"
] | Intersects mask1 and mask2 into this FieldMask. | [
"Intersects",
"mask1",
"and",
"mask2",
"into",
"this",
"FieldMask",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L443-L451 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | FieldMask.MergeMessage | def MergeMessage(
self, source, destination,
replace_message_field=False, replace_repeated_field=False):
"""Merges fields specified in FieldMask from source to destination.
Args:
source: Source message.
destination: The destination message to be merged into.
replace_message_field: Replace message field if True. Merge message
field if False.
replace_repeated_field: Replace repeated field if True. Append
elements of repeated field if False.
"""
tree = _FieldMaskTree(self)
tree.MergeMessage(
source, destination, replace_message_field, replace_repeated_field) | python | def MergeMessage(
self, source, destination,
replace_message_field=False, replace_repeated_field=False):
"""Merges fields specified in FieldMask from source to destination.
Args:
source: Source message.
destination: The destination message to be merged into.
replace_message_field: Replace message field if True. Merge message
field if False.
replace_repeated_field: Replace repeated field if True. Append
elements of repeated field if False.
"""
tree = _FieldMaskTree(self)
tree.MergeMessage(
source, destination, replace_message_field, replace_repeated_field) | [
"def",
"MergeMessage",
"(",
"self",
",",
"source",
",",
"destination",
",",
"replace_message_field",
"=",
"False",
",",
"replace_repeated_field",
"=",
"False",
")",
":",
"tree",
"=",
"_FieldMaskTree",
"(",
"self",
")",
"tree",
".",
"MergeMessage",
"(",
"source",
",",
"destination",
",",
"replace_message_field",
",",
"replace_repeated_field",
")"
] | Merges fields specified in FieldMask from source to destination.
Args:
source: Source message.
destination: The destination message to be merged into.
replace_message_field: Replace message field if True. Merge message
field if False.
replace_repeated_field: Replace repeated field if True. Append
elements of repeated field if False. | [
"Merges",
"fields",
"specified",
"in",
"FieldMask",
"from",
"source",
"to",
"destination",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L453-L468 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _FieldMaskTree.AddPath | def AddPath(self, path):
"""Adds a field path into the tree.
If the field path to add is a sub-path of an existing field path
in the tree (i.e., a leaf node), it means the tree already matches
the given path so nothing will be added to the tree. If the path
matches an existing non-leaf node in the tree, that non-leaf node
will be turned into a leaf node with all its children removed because
the path matches all the node's children. Otherwise, a new path will
be added.
Args:
path: The field path to add.
"""
node = self._root
for name in path.split('.'):
if name not in node:
node[name] = {}
elif not node[name]:
# Pre-existing empty node implies we already have this entire tree.
return
node = node[name]
# Remove any sub-trees we might have had.
node.clear() | python | def AddPath(self, path):
"""Adds a field path into the tree.
If the field path to add is a sub-path of an existing field path
in the tree (i.e., a leaf node), it means the tree already matches
the given path so nothing will be added to the tree. If the path
matches an existing non-leaf node in the tree, that non-leaf node
will be turned into a leaf node with all its children removed because
the path matches all the node's children. Otherwise, a new path will
be added.
Args:
path: The field path to add.
"""
node = self._root
for name in path.split('.'):
if name not in node:
node[name] = {}
elif not node[name]:
# Pre-existing empty node implies we already have this entire tree.
return
node = node[name]
# Remove any sub-trees we might have had.
node.clear() | [
"def",
"AddPath",
"(",
"self",
",",
"path",
")",
":",
"node",
"=",
"self",
".",
"_root",
"for",
"name",
"in",
"path",
".",
"split",
"(",
"'.'",
")",
":",
"if",
"name",
"not",
"in",
"node",
":",
"node",
"[",
"name",
"]",
"=",
"{",
"}",
"elif",
"not",
"node",
"[",
"name",
"]",
":",
"# Pre-existing empty node implies we already have this entire tree.",
"return",
"node",
"=",
"node",
"[",
"name",
"]",
"# Remove any sub-trees we might have had.",
"node",
".",
"clear",
"(",
")"
] | Adds a field path into the tree.
If the field path to add is a sub-path of an existing field path
in the tree (i.e., a leaf node), it means the tree already matches
the given path so nothing will be added to the tree. If the path
matches an existing non-leaf node in the tree, that non-leaf node
will be turned into a leaf node with all its children removed because
the path matches all the node's children. Otherwise, a new path will
be added.
Args:
path: The field path to add. | [
"Adds",
"a",
"field",
"path",
"into",
"the",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L560-L583 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _FieldMaskTree.IntersectPath | def IntersectPath(self, path, intersection):
"""Calculates the intersection part of a field path with this tree.
Args:
path: The field path to calculates.
intersection: The out tree to record the intersection part.
"""
node = self._root
for name in path.split('.'):
if name not in node:
return
elif not node[name]:
intersection.AddPath(path)
return
node = node[name]
intersection.AddLeafNodes(path, node) | python | def IntersectPath(self, path, intersection):
"""Calculates the intersection part of a field path with this tree.
Args:
path: The field path to calculates.
intersection: The out tree to record the intersection part.
"""
node = self._root
for name in path.split('.'):
if name not in node:
return
elif not node[name]:
intersection.AddPath(path)
return
node = node[name]
intersection.AddLeafNodes(path, node) | [
"def",
"IntersectPath",
"(",
"self",
",",
"path",
",",
"intersection",
")",
":",
"node",
"=",
"self",
".",
"_root",
"for",
"name",
"in",
"path",
".",
"split",
"(",
"'.'",
")",
":",
"if",
"name",
"not",
"in",
"node",
":",
"return",
"elif",
"not",
"node",
"[",
"name",
"]",
":",
"intersection",
".",
"AddPath",
"(",
"path",
")",
"return",
"node",
"=",
"node",
"[",
"name",
"]",
"intersection",
".",
"AddLeafNodes",
"(",
"path",
",",
"node",
")"
] | Calculates the intersection part of a field path with this tree.
Args:
path: The field path to calculates.
intersection: The out tree to record the intersection part. | [
"Calculates",
"the",
"intersection",
"part",
"of",
"a",
"field",
"path",
"with",
"this",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L590-L605 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _FieldMaskTree.AddLeafNodes | def AddLeafNodes(self, prefix, node):
"""Adds leaf nodes begin with prefix to this tree."""
if not node:
self.AddPath(prefix)
for name in node:
child_path = prefix + '.' + name
self.AddLeafNodes(child_path, node[name]) | python | def AddLeafNodes(self, prefix, node):
"""Adds leaf nodes begin with prefix to this tree."""
if not node:
self.AddPath(prefix)
for name in node:
child_path = prefix + '.' + name
self.AddLeafNodes(child_path, node[name]) | [
"def",
"AddLeafNodes",
"(",
"self",
",",
"prefix",
",",
"node",
")",
":",
"if",
"not",
"node",
":",
"self",
".",
"AddPath",
"(",
"prefix",
")",
"for",
"name",
"in",
"node",
":",
"child_path",
"=",
"prefix",
"+",
"'.'",
"+",
"name",
"self",
".",
"AddLeafNodes",
"(",
"child_path",
",",
"node",
"[",
"name",
"]",
")"
] | Adds leaf nodes begin with prefix to this tree. | [
"Adds",
"leaf",
"nodes",
"begin",
"with",
"prefix",
"to",
"this",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L607-L613 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | _FieldMaskTree.MergeMessage | def MergeMessage(
self, source, destination,
replace_message, replace_repeated):
"""Merge all fields specified by this tree from source to destination."""
_MergeMessage(
self._root, source, destination, replace_message, replace_repeated) | python | def MergeMessage(
self, source, destination,
replace_message, replace_repeated):
"""Merge all fields specified by this tree from source to destination."""
_MergeMessage(
self._root, source, destination, replace_message, replace_repeated) | [
"def",
"MergeMessage",
"(",
"self",
",",
"source",
",",
"destination",
",",
"replace_message",
",",
"replace_repeated",
")",
":",
"_MergeMessage",
"(",
"self",
".",
"_root",
",",
"source",
",",
"destination",
",",
"replace_message",
",",
"replace_repeated",
")"
] | Merge all fields specified by this tree from source to destination. | [
"Merge",
"all",
"fields",
"specified",
"by",
"this",
"tree",
"from",
"source",
"to",
"destination",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L615-L620 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/rc.py | configure | def configure (command = None, condition = None, options = None):
"""
Configures a new resource compilation command specific to a condition,
usually a toolset selection condition. The possible options are:
* <rc-type>(rc|windres) - Indicates the type of options the command
accepts.
Even though the arguments are all optional, only when a command, condition,
and at minimum the rc-type option are given will the command be configured.
This is so that callers don't have to check auto-configuration values
before calling this. And still get the functionality of build failures when
the resource compiler can't be found.
"""
rc_type = feature.get_values('<rc-type>', options)
if rc_type:
assert(len(rc_type) == 1)
rc_type = rc_type[0]
if command and condition and rc_type:
flags('rc.compile.resource', '.RC', condition, command)
flags('rc.compile.resource', '.RC_TYPE', condition, [rc_type.lower()])
flags('rc.compile.resource', 'DEFINES', [], ['<define>'])
flags('rc.compile.resource', 'INCLUDES', [], ['<include>'])
if debug():
print 'notice: using rc compiler ::', condition, '::', command | python | def configure (command = None, condition = None, options = None):
"""
Configures a new resource compilation command specific to a condition,
usually a toolset selection condition. The possible options are:
* <rc-type>(rc|windres) - Indicates the type of options the command
accepts.
Even though the arguments are all optional, only when a command, condition,
and at minimum the rc-type option are given will the command be configured.
This is so that callers don't have to check auto-configuration values
before calling this. And still get the functionality of build failures when
the resource compiler can't be found.
"""
rc_type = feature.get_values('<rc-type>', options)
if rc_type:
assert(len(rc_type) == 1)
rc_type = rc_type[0]
if command and condition and rc_type:
flags('rc.compile.resource', '.RC', condition, command)
flags('rc.compile.resource', '.RC_TYPE', condition, [rc_type.lower()])
flags('rc.compile.resource', 'DEFINES', [], ['<define>'])
flags('rc.compile.resource', 'INCLUDES', [], ['<include>'])
if debug():
print 'notice: using rc compiler ::', condition, '::', command | [
"def",
"configure",
"(",
"command",
"=",
"None",
",",
"condition",
"=",
"None",
",",
"options",
"=",
"None",
")",
":",
"rc_type",
"=",
"feature",
".",
"get_values",
"(",
"'<rc-type>'",
",",
"options",
")",
"if",
"rc_type",
":",
"assert",
"(",
"len",
"(",
"rc_type",
")",
"==",
"1",
")",
"rc_type",
"=",
"rc_type",
"[",
"0",
"]",
"if",
"command",
"and",
"condition",
"and",
"rc_type",
":",
"flags",
"(",
"'rc.compile.resource'",
",",
"'.RC'",
",",
"condition",
",",
"command",
")",
"flags",
"(",
"'rc.compile.resource'",
",",
"'.RC_TYPE'",
",",
"condition",
",",
"[",
"rc_type",
".",
"lower",
"(",
")",
"]",
")",
"flags",
"(",
"'rc.compile.resource'",
",",
"'DEFINES'",
",",
"[",
"]",
",",
"[",
"'<define>'",
"]",
")",
"flags",
"(",
"'rc.compile.resource'",
",",
"'INCLUDES'",
",",
"[",
"]",
",",
"[",
"'<include>'",
"]",
")",
"if",
"debug",
"(",
")",
":",
"print",
"'notice: using rc compiler ::'",
",",
"condition",
",",
"'::'",
",",
"command"
] | Configures a new resource compilation command specific to a condition,
usually a toolset selection condition. The possible options are:
* <rc-type>(rc|windres) - Indicates the type of options the command
accepts.
Even though the arguments are all optional, only when a command, condition,
and at minimum the rc-type option are given will the command be configured.
This is so that callers don't have to check auto-configuration values
before calling this. And still get the functionality of build failures when
the resource compiler can't be found. | [
"Configures",
"a",
"new",
"resource",
"compilation",
"command",
"specific",
"to",
"a",
"condition",
"usually",
"a",
"toolset",
"selection",
"condition",
".",
"The",
"possible",
"options",
"are",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/rc.py#L50-L75 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_NuSVR.py | convert | def convert(model, feature_names, target):
"""Convert a Nu Support Vector Regression (NuSVR) model to the protobuf spec.
Parameters
----------
model: NuSVR
A trained NuSVR encoder model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
_sklearn_util.check_expected_type(model, _NuSVR)
return _SVR.convert(model, feature_names, target) | python | def convert(model, feature_names, target):
"""Convert a Nu Support Vector Regression (NuSVR) model to the protobuf spec.
Parameters
----------
model: NuSVR
A trained NuSVR encoder model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
_sklearn_util.check_expected_type(model, _NuSVR)
return _SVR.convert(model, feature_names, target) | [
"def",
"convert",
"(",
"model",
",",
"feature_names",
",",
"target",
")",
":",
"if",
"not",
"(",
"_HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"_sklearn_util",
".",
"check_expected_type",
"(",
"model",
",",
"_NuSVR",
")",
"return",
"_SVR",
".",
"convert",
"(",
"model",
",",
"feature_names",
",",
"target",
")"
] | Convert a Nu Support Vector Regression (NuSVR) model to the protobuf spec.
Parameters
----------
model: NuSVR
A trained NuSVR encoder model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"Nu",
"Support",
"Vector",
"Regression",
"(",
"NuSVR",
")",
"model",
"to",
"the",
"protobuf",
"spec",
".",
"Parameters",
"----------",
"model",
":",
"NuSVR",
"A",
"trained",
"NuSVR",
"encoder",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_NuSVR.py#L20-L42 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/linear_regression.py | create | def create(dataset, target, features=None, l2_penalty=1e-2, l1_penalty=0.0,
solver='auto', feature_rescaling=True,
convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'],
step_size = _DEFAULT_SOLVER_OPTIONS['step_size'],
lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'],
max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'],
validation_set = "auto",
verbose=True):
"""
Create a :class:`~turicreate.linear_regression.LinearRegression` to
predict a scalar target variable as a linear function of one or more
features. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
The linear regression module can be used for ridge regression, Lasso, and
elastic net regression (see References for more detail on these methods). By
default, this model has an l2 regularization weight of 0.01.
Parameters
----------
dataset : SFrame
The dataset to use for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
l2_penalty : float, optional
Weight on the l2-regularizer of the model. The larger this weight, the
more the model coefficients shrink toward 0. This introduces bias into
the model but decreases variance, potentially leading to better
predictions. The default value is 0.01; setting this parameter to 0
corresponds to unregularized linear regression. See the ridge
regression reference for more detail.
l1_penalty : float, optional
Weight on l1 regularization of the model. Like the l2 penalty, the
higher the l1 penalty, the more the estimated coefficients shrink toward
0. The l1 penalty, however, completely zeros out sufficiently small
coefficients, automatically indicating features that are not useful for
the model. The default weight of 0 prevents any features from being
discarded. See the LASSO regression reference for more detail.
solver : string, optional
Solver to use for training the model. See the references for more detail
on each solver.
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *newton*: Newton-Raphson
- *lbfgs*: limited memory BFGS
- *fista*: accelerated gradient descent
The model is trained using a carefully engineered collection of methods
that are automatically picked based on the input data. The ``newton``
method works best for datasets with plenty of examples and few features
(long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for
wide datasets (i.e datasets with many coefficients). ``fista`` is the
default solver for l1-regularized linear regression. The solvers are
all automatically tuned and the default options should function well.
See the solver options guide for setting additional parameters for each
of the solvers.
See the user guide for additional details on how the solver is chosen.
feature_rescaling : boolean, optional
Feature rescaling is an important pre-processing step that ensures that
all features are on the same scale. An l2-norm rescaling is performed
to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that are
used to represent them. The coefficients are returned in original scale
of the problem. This process is particularly useful when features
vary widely in their ranges.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
convergence_threshold : float, optional
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
lbfgs_memory_level : int, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level`` can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
step_size : float, optional (fista only)
The starting step size to use for the ``fista`` and ``gd`` solvers. The
default is set to 1.0, this is an aggressive setting. If the first
iteration takes a considerable amount of time, reducing this parameter
may speed up model training.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LinearRegression
A trained model of type
:class:`~turicreate.linear_regression.LinearRegression`.
See Also
--------
LinearRegression, turicreate.boosted_trees_regression.BoostedTreesRegression, turicreate.regression.create
Notes
-----
- Categorical variables are encoded by creating dummy variables. For a
variable with :math:`K` categories, the encoding creates :math:`K-1` dummy
variables, while the first category encountered in the data is used as the
baseline.
- For prediction and evaluation of linear regression models with sparse
dictionary inputs, new keys/columns that were not seen during training
are silently ignored.
- Any 'None' values in the data will result in an error being thrown.
- A constant term is automatically added for the model intercept. This term
is not regularized.
- Standard errors on coefficients are only available when `solver=newton`
or when the default `auto` solver option chooses the newton method and if
the number of examples in the training data is more than the number of
coefficients. If standard errors cannot be estimated, a column of `None`
values are returned.
References
----------
- Hoerl, A.E. and Kennard, R.W. (1970) `Ridge regression: Biased Estimation
for Nonorthogonal Problems
<http://amstat.tandfonline.com/doi/abs/10.1080/00401706.1970.10488634>`_.
Technometrics 12(1) pp.55-67
- Tibshirani, R. (1996) `Regression Shrinkage and Selection via the Lasso <h
ttp://www.jstor.org/discover/10.2307/2346178?uid=3739256&uid=2&uid=4&sid=2
1104169934983>`_. Journal of the Royal Statistical Society. Series B
(Methodological) 58(1) pp.267-288.
- Zhu, C., et al. (1997) `Algorithm 778: L-BFGS-B: Fortran subroutines for
large-scale bound-constrained optimization
<https://dl.acm.org/citation.cfm?id=279236>`_. ACM Transactions on
Mathematical Software 23(4) pp.550-560.
- Barzilai, J. and Borwein, J. `Two-Point Step Size Gradient Methods
<http://imajna.oxfordjournals.org/content/8/1/141.short>`_. IMA Journal of
Numerical Analysis 8(1) pp.141-148.
- Beck, A. and Teboulle, M. (2009) `A Fast Iterative Shrinkage-Thresholding
Algorithm for Linear Inverse Problems
<http://epubs.siam.org/doi/abs/10.1137/080716542>`_. SIAM Journal on
Imaging Sciences 2(1) pp.183-202.
- Zhang, T. (2004) `Solving large scale linear prediction problems using
stochastic gradient descent algorithms
<https://dl.acm.org/citation.cfm?id=1015332>`_. ICML '04: Proceedings of
the twenty-first international conference on Machine learning p.116.
Examples
--------
Given an :class:`~turicreate.SFrame` ``sf`` with a list of columns
[``feature_1`` ... ``feature_K``] denoting features and a target column
``target``, we can create a
:class:`~turicreate.linear_regression.LinearRegression` as follows:
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
For ridge regression, we can set the ``l2_penalty`` parameter higher (the
default is 0.01). For Lasso regression, we set the l1_penalty higher, and
for elastic net, we set both to be higher.
.. sourcecode:: python
# Ridge regression
>>> model_ridge = turicreate.linear_regression.create(data, 'price', l2_penalty=0.1)
# Lasso
>>> model_lasso = turicreate.linear_regression.create(data, 'price', l2_penalty=0.,
l1_penalty=1.0)
# Elastic net regression
>>> model_enet = turicreate.linear_regression.create(data, 'price', l2_penalty=0.5,
l1_penalty=0.5)
"""
# Regression model names.
model_name = "regression_linear_regression"
solver = solver.lower()
model = _sl.create(dataset, target, model_name, features=features,
validation_set = validation_set,
solver = solver, verbose = verbose,
l2_penalty=l2_penalty, l1_penalty = l1_penalty,
feature_rescaling = feature_rescaling,
convergence_threshold = convergence_threshold,
step_size = step_size,
lbfgs_memory_level = lbfgs_memory_level,
max_iterations = max_iterations)
return LinearRegression(model.__proxy__) | python | def create(dataset, target, features=None, l2_penalty=1e-2, l1_penalty=0.0,
solver='auto', feature_rescaling=True,
convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'],
step_size = _DEFAULT_SOLVER_OPTIONS['step_size'],
lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'],
max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'],
validation_set = "auto",
verbose=True):
"""
Create a :class:`~turicreate.linear_regression.LinearRegression` to
predict a scalar target variable as a linear function of one or more
features. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
The linear regression module can be used for ridge regression, Lasso, and
elastic net regression (see References for more detail on these methods). By
default, this model has an l2 regularization weight of 0.01.
Parameters
----------
dataset : SFrame
The dataset to use for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
l2_penalty : float, optional
Weight on the l2-regularizer of the model. The larger this weight, the
more the model coefficients shrink toward 0. This introduces bias into
the model but decreases variance, potentially leading to better
predictions. The default value is 0.01; setting this parameter to 0
corresponds to unregularized linear regression. See the ridge
regression reference for more detail.
l1_penalty : float, optional
Weight on l1 regularization of the model. Like the l2 penalty, the
higher the l1 penalty, the more the estimated coefficients shrink toward
0. The l1 penalty, however, completely zeros out sufficiently small
coefficients, automatically indicating features that are not useful for
the model. The default weight of 0 prevents any features from being
discarded. See the LASSO regression reference for more detail.
solver : string, optional
Solver to use for training the model. See the references for more detail
on each solver.
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *newton*: Newton-Raphson
- *lbfgs*: limited memory BFGS
- *fista*: accelerated gradient descent
The model is trained using a carefully engineered collection of methods
that are automatically picked based on the input data. The ``newton``
method works best for datasets with plenty of examples and few features
(long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for
wide datasets (i.e datasets with many coefficients). ``fista`` is the
default solver for l1-regularized linear regression. The solvers are
all automatically tuned and the default options should function well.
See the solver options guide for setting additional parameters for each
of the solvers.
See the user guide for additional details on how the solver is chosen.
feature_rescaling : boolean, optional
Feature rescaling is an important pre-processing step that ensures that
all features are on the same scale. An l2-norm rescaling is performed
to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that are
used to represent them. The coefficients are returned in original scale
of the problem. This process is particularly useful when features
vary widely in their ranges.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
convergence_threshold : float, optional
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
lbfgs_memory_level : int, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level`` can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
step_size : float, optional (fista only)
The starting step size to use for the ``fista`` and ``gd`` solvers. The
default is set to 1.0, this is an aggressive setting. If the first
iteration takes a considerable amount of time, reducing this parameter
may speed up model training.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LinearRegression
A trained model of type
:class:`~turicreate.linear_regression.LinearRegression`.
See Also
--------
LinearRegression, turicreate.boosted_trees_regression.BoostedTreesRegression, turicreate.regression.create
Notes
-----
- Categorical variables are encoded by creating dummy variables. For a
variable with :math:`K` categories, the encoding creates :math:`K-1` dummy
variables, while the first category encountered in the data is used as the
baseline.
- For prediction and evaluation of linear regression models with sparse
dictionary inputs, new keys/columns that were not seen during training
are silently ignored.
- Any 'None' values in the data will result in an error being thrown.
- A constant term is automatically added for the model intercept. This term
is not regularized.
- Standard errors on coefficients are only available when `solver=newton`
or when the default `auto` solver option chooses the newton method and if
the number of examples in the training data is more than the number of
coefficients. If standard errors cannot be estimated, a column of `None`
values are returned.
References
----------
- Hoerl, A.E. and Kennard, R.W. (1970) `Ridge regression: Biased Estimation
for Nonorthogonal Problems
<http://amstat.tandfonline.com/doi/abs/10.1080/00401706.1970.10488634>`_.
Technometrics 12(1) pp.55-67
- Tibshirani, R. (1996) `Regression Shrinkage and Selection via the Lasso <h
ttp://www.jstor.org/discover/10.2307/2346178?uid=3739256&uid=2&uid=4&sid=2
1104169934983>`_. Journal of the Royal Statistical Society. Series B
(Methodological) 58(1) pp.267-288.
- Zhu, C., et al. (1997) `Algorithm 778: L-BFGS-B: Fortran subroutines for
large-scale bound-constrained optimization
<https://dl.acm.org/citation.cfm?id=279236>`_. ACM Transactions on
Mathematical Software 23(4) pp.550-560.
- Barzilai, J. and Borwein, J. `Two-Point Step Size Gradient Methods
<http://imajna.oxfordjournals.org/content/8/1/141.short>`_. IMA Journal of
Numerical Analysis 8(1) pp.141-148.
- Beck, A. and Teboulle, M. (2009) `A Fast Iterative Shrinkage-Thresholding
Algorithm for Linear Inverse Problems
<http://epubs.siam.org/doi/abs/10.1137/080716542>`_. SIAM Journal on
Imaging Sciences 2(1) pp.183-202.
- Zhang, T. (2004) `Solving large scale linear prediction problems using
stochastic gradient descent algorithms
<https://dl.acm.org/citation.cfm?id=1015332>`_. ICML '04: Proceedings of
the twenty-first international conference on Machine learning p.116.
Examples
--------
Given an :class:`~turicreate.SFrame` ``sf`` with a list of columns
[``feature_1`` ... ``feature_K``] denoting features and a target column
``target``, we can create a
:class:`~turicreate.linear_regression.LinearRegression` as follows:
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
For ridge regression, we can set the ``l2_penalty`` parameter higher (the
default is 0.01). For Lasso regression, we set the l1_penalty higher, and
for elastic net, we set both to be higher.
.. sourcecode:: python
# Ridge regression
>>> model_ridge = turicreate.linear_regression.create(data, 'price', l2_penalty=0.1)
# Lasso
>>> model_lasso = turicreate.linear_regression.create(data, 'price', l2_penalty=0.,
l1_penalty=1.0)
# Elastic net regression
>>> model_enet = turicreate.linear_regression.create(data, 'price', l2_penalty=0.5,
l1_penalty=0.5)
"""
# Regression model names.
model_name = "regression_linear_regression"
solver = solver.lower()
model = _sl.create(dataset, target, model_name, features=features,
validation_set = validation_set,
solver = solver, verbose = verbose,
l2_penalty=l2_penalty, l1_penalty = l1_penalty,
feature_rescaling = feature_rescaling,
convergence_threshold = convergence_threshold,
step_size = step_size,
lbfgs_memory_level = lbfgs_memory_level,
max_iterations = max_iterations)
return LinearRegression(model.__proxy__) | [
"def",
"create",
"(",
"dataset",
",",
"target",
",",
"features",
"=",
"None",
",",
"l2_penalty",
"=",
"1e-2",
",",
"l1_penalty",
"=",
"0.0",
",",
"solver",
"=",
"'auto'",
",",
"feature_rescaling",
"=",
"True",
",",
"convergence_threshold",
"=",
"_DEFAULT_SOLVER_OPTIONS",
"[",
"'convergence_threshold'",
"]",
",",
"step_size",
"=",
"_DEFAULT_SOLVER_OPTIONS",
"[",
"'step_size'",
"]",
",",
"lbfgs_memory_level",
"=",
"_DEFAULT_SOLVER_OPTIONS",
"[",
"'lbfgs_memory_level'",
"]",
",",
"max_iterations",
"=",
"_DEFAULT_SOLVER_OPTIONS",
"[",
"'max_iterations'",
"]",
",",
"validation_set",
"=",
"\"auto\"",
",",
"verbose",
"=",
"True",
")",
":",
"# Regression model names.",
"model_name",
"=",
"\"regression_linear_regression\"",
"solver",
"=",
"solver",
".",
"lower",
"(",
")",
"model",
"=",
"_sl",
".",
"create",
"(",
"dataset",
",",
"target",
",",
"model_name",
",",
"features",
"=",
"features",
",",
"validation_set",
"=",
"validation_set",
",",
"solver",
"=",
"solver",
",",
"verbose",
"=",
"verbose",
",",
"l2_penalty",
"=",
"l2_penalty",
",",
"l1_penalty",
"=",
"l1_penalty",
",",
"feature_rescaling",
"=",
"feature_rescaling",
",",
"convergence_threshold",
"=",
"convergence_threshold",
",",
"step_size",
"=",
"step_size",
",",
"lbfgs_memory_level",
"=",
"lbfgs_memory_level",
",",
"max_iterations",
"=",
"max_iterations",
")",
"return",
"LinearRegression",
"(",
"model",
".",
"__proxy__",
")"
] | Create a :class:`~turicreate.linear_regression.LinearRegression` to
predict a scalar target variable as a linear function of one or more
features. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
The linear regression module can be used for ridge regression, Lasso, and
elastic net regression (see References for more detail on these methods). By
default, this model has an l2 regularization weight of 0.01.
Parameters
----------
dataset : SFrame
The dataset to use for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
l2_penalty : float, optional
Weight on the l2-regularizer of the model. The larger this weight, the
more the model coefficients shrink toward 0. This introduces bias into
the model but decreases variance, potentially leading to better
predictions. The default value is 0.01; setting this parameter to 0
corresponds to unregularized linear regression. See the ridge
regression reference for more detail.
l1_penalty : float, optional
Weight on l1 regularization of the model. Like the l2 penalty, the
higher the l1 penalty, the more the estimated coefficients shrink toward
0. The l1 penalty, however, completely zeros out sufficiently small
coefficients, automatically indicating features that are not useful for
the model. The default weight of 0 prevents any features from being
discarded. See the LASSO regression reference for more detail.
solver : string, optional
Solver to use for training the model. See the references for more detail
on each solver.
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *newton*: Newton-Raphson
- *lbfgs*: limited memory BFGS
- *fista*: accelerated gradient descent
The model is trained using a carefully engineered collection of methods
that are automatically picked based on the input data. The ``newton``
method works best for datasets with plenty of examples and few features
(long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for
wide datasets (i.e datasets with many coefficients). ``fista`` is the
default solver for l1-regularized linear regression. The solvers are
all automatically tuned and the default options should function well.
See the solver options guide for setting additional parameters for each
of the solvers.
See the user guide for additional details on how the solver is chosen.
feature_rescaling : boolean, optional
Feature rescaling is an important pre-processing step that ensures that
all features are on the same scale. An l2-norm rescaling is performed
to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that are
used to represent them. The coefficients are returned in original scale
of the problem. This process is particularly useful when features
vary widely in their ranges.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
convergence_threshold : float, optional
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
lbfgs_memory_level : int, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level`` can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
step_size : float, optional (fista only)
The starting step size to use for the ``fista`` and ``gd`` solvers. The
default is set to 1.0, this is an aggressive setting. If the first
iteration takes a considerable amount of time, reducing this parameter
may speed up model training.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LinearRegression
A trained model of type
:class:`~turicreate.linear_regression.LinearRegression`.
See Also
--------
LinearRegression, turicreate.boosted_trees_regression.BoostedTreesRegression, turicreate.regression.create
Notes
-----
- Categorical variables are encoded by creating dummy variables. For a
variable with :math:`K` categories, the encoding creates :math:`K-1` dummy
variables, while the first category encountered in the data is used as the
baseline.
- For prediction and evaluation of linear regression models with sparse
dictionary inputs, new keys/columns that were not seen during training
are silently ignored.
- Any 'None' values in the data will result in an error being thrown.
- A constant term is automatically added for the model intercept. This term
is not regularized.
- Standard errors on coefficients are only available when `solver=newton`
or when the default `auto` solver option chooses the newton method and if
the number of examples in the training data is more than the number of
coefficients. If standard errors cannot be estimated, a column of `None`
values are returned.
References
----------
- Hoerl, A.E. and Kennard, R.W. (1970) `Ridge regression: Biased Estimation
for Nonorthogonal Problems
<http://amstat.tandfonline.com/doi/abs/10.1080/00401706.1970.10488634>`_.
Technometrics 12(1) pp.55-67
- Tibshirani, R. (1996) `Regression Shrinkage and Selection via the Lasso <h
ttp://www.jstor.org/discover/10.2307/2346178?uid=3739256&uid=2&uid=4&sid=2
1104169934983>`_. Journal of the Royal Statistical Society. Series B
(Methodological) 58(1) pp.267-288.
- Zhu, C., et al. (1997) `Algorithm 778: L-BFGS-B: Fortran subroutines for
large-scale bound-constrained optimization
<https://dl.acm.org/citation.cfm?id=279236>`_. ACM Transactions on
Mathematical Software 23(4) pp.550-560.
- Barzilai, J. and Borwein, J. `Two-Point Step Size Gradient Methods
<http://imajna.oxfordjournals.org/content/8/1/141.short>`_. IMA Journal of
Numerical Analysis 8(1) pp.141-148.
- Beck, A. and Teboulle, M. (2009) `A Fast Iterative Shrinkage-Thresholding
Algorithm for Linear Inverse Problems
<http://epubs.siam.org/doi/abs/10.1137/080716542>`_. SIAM Journal on
Imaging Sciences 2(1) pp.183-202.
- Zhang, T. (2004) `Solving large scale linear prediction problems using
stochastic gradient descent algorithms
<https://dl.acm.org/citation.cfm?id=1015332>`_. ICML '04: Proceedings of
the twenty-first international conference on Machine learning p.116.
Examples
--------
Given an :class:`~turicreate.SFrame` ``sf`` with a list of columns
[``feature_1`` ... ``feature_K``] denoting features and a target column
``target``, we can create a
:class:`~turicreate.linear_regression.LinearRegression` as follows:
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
For ridge regression, we can set the ``l2_penalty`` parameter higher (the
default is 0.01). For Lasso regression, we set the l1_penalty higher, and
for elastic net, we set both to be higher.
.. sourcecode:: python
# Ridge regression
>>> model_ridge = turicreate.linear_regression.create(data, 'price', l2_penalty=0.1)
# Lasso
>>> model_lasso = turicreate.linear_regression.create(data, 'price', l2_penalty=0.,
l1_penalty=1.0)
# Elastic net regression
>>> model_enet = turicreate.linear_regression.create(data, 'price', l2_penalty=0.5,
l1_penalty=0.5) | [
"Create",
"a",
":",
"class",
":",
"~turicreate",
".",
"linear_regression",
".",
"LinearRegression",
"to",
"predict",
"a",
"scalar",
"target",
"variable",
"as",
"a",
"linear",
"function",
"of",
"one",
"or",
"more",
"features",
".",
"In",
"addition",
"to",
"standard",
"numeric",
"and",
"categorical",
"types",
"features",
"can",
"also",
"be",
"extracted",
"automatically",
"from",
"list",
"-",
"or",
"dictionary",
"-",
"type",
"SFrame",
"columns",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/linear_regression.py#L28-L285 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/linear_regression.py | LinearRegression.export_coreml | def export_coreml(self, filename):
"""
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml("MyModel.mlmodel")
"""
from turicreate.extensions import _linear_regression_export_as_model_asset
from turicreate.toolkits import _coreml_utils
display_name = "linear regression"
short_description = _coreml_utils._mlmodel_short_description(display_name)
context = {"class": self.__class__.__name__,
"version": _turicreate.__version__,
"short_description": short_description,
'user_defined':{
'turicreate_version': _turicreate.__version__
}
}
_linear_regression_export_as_model_asset(self.__proxy__, filename, context) | python | def export_coreml(self, filename):
"""
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml("MyModel.mlmodel")
"""
from turicreate.extensions import _linear_regression_export_as_model_asset
from turicreate.toolkits import _coreml_utils
display_name = "linear regression"
short_description = _coreml_utils._mlmodel_short_description(display_name)
context = {"class": self.__class__.__name__,
"version": _turicreate.__version__,
"short_description": short_description,
'user_defined':{
'turicreate_version': _turicreate.__version__
}
}
_linear_regression_export_as_model_asset(self.__proxy__, filename, context) | [
"def",
"export_coreml",
"(",
"self",
",",
"filename",
")",
":",
"from",
"turicreate",
".",
"extensions",
"import",
"_linear_regression_export_as_model_asset",
"from",
"turicreate",
".",
"toolkits",
"import",
"_coreml_utils",
"display_name",
"=",
"\"linear regression\"",
"short_description",
"=",
"_coreml_utils",
".",
"_mlmodel_short_description",
"(",
"display_name",
")",
"context",
"=",
"{",
"\"class\"",
":",
"self",
".",
"__class__",
".",
"__name__",
",",
"\"version\"",
":",
"_turicreate",
".",
"__version__",
",",
"\"short_description\"",
":",
"short_description",
",",
"'user_defined'",
":",
"{",
"'turicreate_version'",
":",
"_turicreate",
".",
"__version__",
"}",
"}",
"_linear_regression_export_as_model_asset",
"(",
"self",
".",
"__proxy__",
",",
"filename",
",",
"context",
")"
] | Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml("MyModel.mlmodel") | [
"Export",
"the",
"model",
"in",
"Core",
"ML",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/linear_regression.py#L427-L451 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/linear_regression.py | LinearRegression.predict | def predict(self, dataset, missing_value_action='auto'):
"""
Return target value predictions for ``dataset``, using the trained
linear regression model. This method can be used to get fitted values
for the model by inputting the training dataset.
Parameters
----------
dataset : SFrame | pandas.Dataframe
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, evaluate
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> results = model.predict(data)
"""
return super(LinearRegression, self).predict(dataset, missing_value_action=missing_value_action) | python | def predict(self, dataset, missing_value_action='auto'):
"""
Return target value predictions for ``dataset``, using the trained
linear regression model. This method can be used to get fitted values
for the model by inputting the training dataset.
Parameters
----------
dataset : SFrame | pandas.Dataframe
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, evaluate
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> results = model.predict(data)
"""
return super(LinearRegression, self).predict(dataset, missing_value_action=missing_value_action) | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"return",
"super",
"(",
"LinearRegression",
",",
"self",
")",
".",
"predict",
"(",
"dataset",
",",
"missing_value_action",
"=",
"missing_value_action",
")"
] | Return target value predictions for ``dataset``, using the trained
linear regression model. This method can be used to get fitted values
for the model by inputting the training dataset.
Parameters
----------
dataset : SFrame | pandas.Dataframe
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, evaluate
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> results = model.predict(data) | [
"Return",
"target",
"value",
"predictions",
"for",
"dataset",
"using",
"the",
"trained",
"linear",
"regression",
"model",
".",
"This",
"method",
"can",
"be",
"used",
"to",
"get",
"fitted",
"values",
"for",
"the",
"model",
"by",
"inputting",
"the",
"training",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/linear_regression.py#L519-L564 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/linear_regression.py | LinearRegression.evaluate | def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
r"""Evaluate the model by making target value predictions and comparing
to actual values.
Two metrics are used to evaluate linear regression models. The first
is root-mean-squared error (RMSE) while the second is the absolute
value of the maximum error between the actual and predicted values.
Let :math:`y` and :math:`\hat{y}` denote vectors of length :math:`N`
(number of examples) with actual and predicted values. The RMSE is
defined as:
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2}
while the max-error is defined as
.. math::
max-error = \max_{i=1}^N \|\widehat{y}_i - y_i\|
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
Results from model evaluation procedure.
See Also
----------
create, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> results = model.evaluate(data)
"""
_raise_error_evaluation_metric_is_valid(metric,
['auto', 'rmse', 'max_error'])
return super(LinearRegression, self).evaluate(dataset, missing_value_action=missing_value_action,
metric=metric) | python | def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
r"""Evaluate the model by making target value predictions and comparing
to actual values.
Two metrics are used to evaluate linear regression models. The first
is root-mean-squared error (RMSE) while the second is the absolute
value of the maximum error between the actual and predicted values.
Let :math:`y` and :math:`\hat{y}` denote vectors of length :math:`N`
(number of examples) with actual and predicted values. The RMSE is
defined as:
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2}
while the max-error is defined as
.. math::
max-error = \max_{i=1}^N \|\widehat{y}_i - y_i\|
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
Results from model evaluation procedure.
See Also
----------
create, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> results = model.evaluate(data)
"""
_raise_error_evaluation_metric_is_valid(metric,
['auto', 'rmse', 'max_error'])
return super(LinearRegression, self).evaluate(dataset, missing_value_action=missing_value_action,
metric=metric) | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"_raise_error_evaluation_metric_is_valid",
"(",
"metric",
",",
"[",
"'auto'",
",",
"'rmse'",
",",
"'max_error'",
"]",
")",
"return",
"super",
"(",
"LinearRegression",
",",
"self",
")",
".",
"evaluate",
"(",
"dataset",
",",
"missing_value_action",
"=",
"missing_value_action",
",",
"metric",
"=",
"metric",
")"
] | r"""Evaluate the model by making target value predictions and comparing
to actual values.
Two metrics are used to evaluate linear regression models. The first
is root-mean-squared error (RMSE) while the second is the absolute
value of the maximum error between the actual and predicted values.
Let :math:`y` and :math:`\hat{y}` denote vectors of length :math:`N`
(number of examples) with actual and predicted values. The RMSE is
defined as:
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2}
while the max-error is defined as
.. math::
max-error = \max_{i=1}^N \|\widehat{y}_i - y_i\|
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
Results from model evaluation procedure.
See Also
----------
create, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> results = model.evaluate(data) | [
"r",
"Evaluate",
"the",
"model",
"by",
"making",
"target",
"value",
"predictions",
"and",
"comparing",
"to",
"actual",
"values",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/linear_regression.py#L567-L635 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/sound_classifier/mel_features.py | frame | def frame(data, window_length, hop_length):
"""Convert array into a sequence of successive possibly overlapping frames.
An n-dimensional array of shape (num_samples, ...) is converted into an
(n+1)-D array of shape (num_frames, window_length, ...), where each frame
starts hop_length points after the preceding one.
This is accomplished using stride_tricks, so the original data is not
copied. However, there is no zero-padding, so any incomplete frames at the
end are not included.
Args:
data: np.array of dimension N >= 1.
window_length: Number of samples in each frame.
hop_length: Advance (in samples) between each window.
Returns:
(N+1)-D np.array with as many rows as there are complete frames that can be
extracted.
"""
num_samples = data.shape[0]
num_frames = 1 + int(np.floor((num_samples - window_length) / hop_length))
shape = (num_frames, window_length) + data.shape[1:]
strides = (data.strides[0] * hop_length,) + data.strides
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides) | python | def frame(data, window_length, hop_length):
"""Convert array into a sequence of successive possibly overlapping frames.
An n-dimensional array of shape (num_samples, ...) is converted into an
(n+1)-D array of shape (num_frames, window_length, ...), where each frame
starts hop_length points after the preceding one.
This is accomplished using stride_tricks, so the original data is not
copied. However, there is no zero-padding, so any incomplete frames at the
end are not included.
Args:
data: np.array of dimension N >= 1.
window_length: Number of samples in each frame.
hop_length: Advance (in samples) between each window.
Returns:
(N+1)-D np.array with as many rows as there are complete frames that can be
extracted.
"""
num_samples = data.shape[0]
num_frames = 1 + int(np.floor((num_samples - window_length) / hop_length))
shape = (num_frames, window_length) + data.shape[1:]
strides = (data.strides[0] * hop_length,) + data.strides
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides) | [
"def",
"frame",
"(",
"data",
",",
"window_length",
",",
"hop_length",
")",
":",
"num_samples",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"num_frames",
"=",
"1",
"+",
"int",
"(",
"np",
".",
"floor",
"(",
"(",
"num_samples",
"-",
"window_length",
")",
"/",
"hop_length",
")",
")",
"shape",
"=",
"(",
"num_frames",
",",
"window_length",
")",
"+",
"data",
".",
"shape",
"[",
"1",
":",
"]",
"strides",
"=",
"(",
"data",
".",
"strides",
"[",
"0",
"]",
"*",
"hop_length",
",",
")",
"+",
"data",
".",
"strides",
"return",
"np",
".",
"lib",
".",
"stride_tricks",
".",
"as_strided",
"(",
"data",
",",
"shape",
"=",
"shape",
",",
"strides",
"=",
"strides",
")"
] | Convert array into a sequence of successive possibly overlapping frames.
An n-dimensional array of shape (num_samples, ...) is converted into an
(n+1)-D array of shape (num_frames, window_length, ...), where each frame
starts hop_length points after the preceding one.
This is accomplished using stride_tricks, so the original data is not
copied. However, there is no zero-padding, so any incomplete frames at the
end are not included.
Args:
data: np.array of dimension N >= 1.
window_length: Number of samples in each frame.
hop_length: Advance (in samples) between each window.
Returns:
(N+1)-D np.array with as many rows as there are complete frames that can be
extracted. | [
"Convert",
"array",
"into",
"a",
"sequence",
"of",
"successive",
"possibly",
"overlapping",
"frames",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/sound_classifier/mel_features.py#L21-L45 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/sound_classifier/mel_features.py | periodic_hann | def periodic_hann(window_length):
"""Calculate a "periodic" Hann window.
The classic Hann window is defined as a raised cosine that starts and
ends on zero, and where every value appears twice, except the middle
point for an odd-length window. Matlab calls this a "symmetric" window
and np.hanning() returns it. However, for Fourier analysis, this
actually represents just over one cycle of a period N-1 cosine, and
thus is not compactly expressed on a length-N Fourier basis. Instead,
it's better to use a raised cosine that ends just before the final
zero value - i.e. a complete cycle of a period-N cosine. Matlab
calls this a "periodic" window. This routine calculates it.
Args:
window_length: The number of points in the returned window.
Returns:
A 1D np.array containing the periodic hann window.
"""
return 0.5 - (0.5 * np.cos(2 * np.pi / window_length *
np.arange(window_length))) | python | def periodic_hann(window_length):
"""Calculate a "periodic" Hann window.
The classic Hann window is defined as a raised cosine that starts and
ends on zero, and where every value appears twice, except the middle
point for an odd-length window. Matlab calls this a "symmetric" window
and np.hanning() returns it. However, for Fourier analysis, this
actually represents just over one cycle of a period N-1 cosine, and
thus is not compactly expressed on a length-N Fourier basis. Instead,
it's better to use a raised cosine that ends just before the final
zero value - i.e. a complete cycle of a period-N cosine. Matlab
calls this a "periodic" window. This routine calculates it.
Args:
window_length: The number of points in the returned window.
Returns:
A 1D np.array containing the periodic hann window.
"""
return 0.5 - (0.5 * np.cos(2 * np.pi / window_length *
np.arange(window_length))) | [
"def",
"periodic_hann",
"(",
"window_length",
")",
":",
"return",
"0.5",
"-",
"(",
"0.5",
"*",
"np",
".",
"cos",
"(",
"2",
"*",
"np",
".",
"pi",
"/",
"window_length",
"*",
"np",
".",
"arange",
"(",
"window_length",
")",
")",
")"
] | Calculate a "periodic" Hann window.
The classic Hann window is defined as a raised cosine that starts and
ends on zero, and where every value appears twice, except the middle
point for an odd-length window. Matlab calls this a "symmetric" window
and np.hanning() returns it. However, for Fourier analysis, this
actually represents just over one cycle of a period N-1 cosine, and
thus is not compactly expressed on a length-N Fourier basis. Instead,
it's better to use a raised cosine that ends just before the final
zero value - i.e. a complete cycle of a period-N cosine. Matlab
calls this a "periodic" window. This routine calculates it.
Args:
window_length: The number of points in the returned window.
Returns:
A 1D np.array containing the periodic hann window. | [
"Calculate",
"a",
"periodic",
"Hann",
"window",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/sound_classifier/mel_features.py#L48-L68 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/sound_classifier/mel_features.py | stft_magnitude | def stft_magnitude(signal, fft_length,
hop_length=None,
window_length=None):
"""Calculate the short-time Fourier transform magnitude.
Args:
signal: 1D np.array of the input time-domain signal.
fft_length: Size of the FFT to apply.
hop_length: Advance (in samples) between each frame passed to FFT.
window_length: Length of each block of samples to pass to FFT.
Returns:
2D np.array where each row contains the magnitudes of the fft_length/2+1
unique values of the FFT for the corresponding frame of input samples.
"""
frames = frame(signal, window_length, hop_length)
# Apply frame window to each frame. We use a periodic Hann (cosine of period
# window_length) instead of the symmetric Hann of np.hanning (period
# window_length-1).
window = periodic_hann(window_length)
windowed_frames = frames * window
return np.abs(np.fft.rfft(windowed_frames, int(fft_length))) | python | def stft_magnitude(signal, fft_length,
hop_length=None,
window_length=None):
"""Calculate the short-time Fourier transform magnitude.
Args:
signal: 1D np.array of the input time-domain signal.
fft_length: Size of the FFT to apply.
hop_length: Advance (in samples) between each frame passed to FFT.
window_length: Length of each block of samples to pass to FFT.
Returns:
2D np.array where each row contains the magnitudes of the fft_length/2+1
unique values of the FFT for the corresponding frame of input samples.
"""
frames = frame(signal, window_length, hop_length)
# Apply frame window to each frame. We use a periodic Hann (cosine of period
# window_length) instead of the symmetric Hann of np.hanning (period
# window_length-1).
window = periodic_hann(window_length)
windowed_frames = frames * window
return np.abs(np.fft.rfft(windowed_frames, int(fft_length))) | [
"def",
"stft_magnitude",
"(",
"signal",
",",
"fft_length",
",",
"hop_length",
"=",
"None",
",",
"window_length",
"=",
"None",
")",
":",
"frames",
"=",
"frame",
"(",
"signal",
",",
"window_length",
",",
"hop_length",
")",
"# Apply frame window to each frame. We use a periodic Hann (cosine of period",
"# window_length) instead of the symmetric Hann of np.hanning (period",
"# window_length-1).",
"window",
"=",
"periodic_hann",
"(",
"window_length",
")",
"windowed_frames",
"=",
"frames",
"*",
"window",
"return",
"np",
".",
"abs",
"(",
"np",
".",
"fft",
".",
"rfft",
"(",
"windowed_frames",
",",
"int",
"(",
"fft_length",
")",
")",
")"
] | Calculate the short-time Fourier transform magnitude.
Args:
signal: 1D np.array of the input time-domain signal.
fft_length: Size of the FFT to apply.
hop_length: Advance (in samples) between each frame passed to FFT.
window_length: Length of each block of samples to pass to FFT.
Returns:
2D np.array where each row contains the magnitudes of the fft_length/2+1
unique values of the FFT for the corresponding frame of input samples. | [
"Calculate",
"the",
"short",
"-",
"time",
"Fourier",
"transform",
"magnitude",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/sound_classifier/mel_features.py#L71-L92 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/sound_classifier/mel_features.py | spectrogram_to_mel_matrix | def spectrogram_to_mel_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
audio_sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0):
"""Return a matrix that can post-multiply spectrogram rows to make mel.
Returns a np.array matrix A that can be used to post-multiply a matrix S of
spectrogram values (STFT magnitudes) arranged as frames x bins to generate a
"mel spectrogram" M of frames x num_mel_bins. M = S A.
The classic HTK algorithm exploits the complementarity of adjacent mel bands
to multiply each FFT bin by only one mel weight, then add it, with positive
and negative signs, to the two adjacent mel bands to which that bin
contributes. Here, by expressing this operation as a matrix multiply, we go
from num_fft multiplies per frame (plus around 2*num_fft adds) to around
num_fft^2 multiplies and adds. However, because these are all presumably
accomplished in a single call to np.dot(), it's not clear which approach is
faster in Python. The matrix multiplication has the attraction of being more
general and flexible, and much easier to read.
Args:
num_mel_bins: How many bands in the resulting mel spectrum. This is
the number of columns in the output matrix.
num_spectrogram_bins: How many bins there are in the source spectrogram
data, which is understood to be fft_size/2 + 1, i.e. the spectrogram
only contains the nonredundant FFT bins.
audio_sample_rate: Samples per second of the audio at the input to the
spectrogram. We need this to figure out the actual frequencies for
each spectrogram bin, which dictates how they are mapped into mel.
lower_edge_hertz: Lower bound on the frequencies to be included in the mel
spectrum. This corresponds to the lower edge of the lowest triangular
band.
upper_edge_hertz: The desired top edge of the highest frequency band.
Returns:
An np.array with shape (num_spectrogram_bins, num_mel_bins).
Raises:
ValueError: if frequency edges are incorrectly ordered or out of range.
"""
nyquist_hertz = audio_sample_rate / 2.
if lower_edge_hertz < 0.0:
raise ValueError("lower_edge_hertz %.1f must be >= 0" % lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError("lower_edge_hertz %.1f >= upper_edge_hertz %.1f" %
(lower_edge_hertz, upper_edge_hertz))
if upper_edge_hertz > nyquist_hertz:
raise ValueError("upper_edge_hertz %.1f is greater than Nyquist %.1f" %
(upper_edge_hertz, nyquist_hertz))
spectrogram_bins_hertz = np.linspace(0.0, nyquist_hertz, num_spectrogram_bins)
spectrogram_bins_mel = hertz_to_mel(spectrogram_bins_hertz)
# The i'th mel band (starting from i=1) has center frequency
# band_edges_mel[i], lower edge band_edges_mel[i-1], and higher edge
# band_edges_mel[i+1]. Thus, we need num_mel_bins + 2 values in
# the band_edges_mel arrays.
band_edges_mel = np.linspace(hertz_to_mel(lower_edge_hertz),
hertz_to_mel(upper_edge_hertz), num_mel_bins + 2)
# Matrix to post-multiply feature arrays whose rows are num_spectrogram_bins
# of spectrogram values.
mel_weights_matrix = np.empty((num_spectrogram_bins, num_mel_bins))
for i in range(num_mel_bins):
lower_edge_mel, center_mel, upper_edge_mel = band_edges_mel[i:i + 3]
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the *mel* domain, not hertz.
lower_slope = ((spectrogram_bins_mel - lower_edge_mel) /
(center_mel - lower_edge_mel))
upper_slope = ((upper_edge_mel - spectrogram_bins_mel) /
(upper_edge_mel - center_mel))
# .. then intersect them with each other and zero.
mel_weights_matrix[:, i] = np.maximum(0.0, np.minimum(lower_slope,
upper_slope))
# HTK excludes the spectrogram DC bin; make sure it always gets a zero
# coefficient.
mel_weights_matrix[0, :] = 0.0
return mel_weights_matrix | python | def spectrogram_to_mel_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
audio_sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0):
"""Return a matrix that can post-multiply spectrogram rows to make mel.
Returns a np.array matrix A that can be used to post-multiply a matrix S of
spectrogram values (STFT magnitudes) arranged as frames x bins to generate a
"mel spectrogram" M of frames x num_mel_bins. M = S A.
The classic HTK algorithm exploits the complementarity of adjacent mel bands
to multiply each FFT bin by only one mel weight, then add it, with positive
and negative signs, to the two adjacent mel bands to which that bin
contributes. Here, by expressing this operation as a matrix multiply, we go
from num_fft multiplies per frame (plus around 2*num_fft adds) to around
num_fft^2 multiplies and adds. However, because these are all presumably
accomplished in a single call to np.dot(), it's not clear which approach is
faster in Python. The matrix multiplication has the attraction of being more
general and flexible, and much easier to read.
Args:
num_mel_bins: How many bands in the resulting mel spectrum. This is
the number of columns in the output matrix.
num_spectrogram_bins: How many bins there are in the source spectrogram
data, which is understood to be fft_size/2 + 1, i.e. the spectrogram
only contains the nonredundant FFT bins.
audio_sample_rate: Samples per second of the audio at the input to the
spectrogram. We need this to figure out the actual frequencies for
each spectrogram bin, which dictates how they are mapped into mel.
lower_edge_hertz: Lower bound on the frequencies to be included in the mel
spectrum. This corresponds to the lower edge of the lowest triangular
band.
upper_edge_hertz: The desired top edge of the highest frequency band.
Returns:
An np.array with shape (num_spectrogram_bins, num_mel_bins).
Raises:
ValueError: if frequency edges are incorrectly ordered or out of range.
"""
nyquist_hertz = audio_sample_rate / 2.
if lower_edge_hertz < 0.0:
raise ValueError("lower_edge_hertz %.1f must be >= 0" % lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError("lower_edge_hertz %.1f >= upper_edge_hertz %.1f" %
(lower_edge_hertz, upper_edge_hertz))
if upper_edge_hertz > nyquist_hertz:
raise ValueError("upper_edge_hertz %.1f is greater than Nyquist %.1f" %
(upper_edge_hertz, nyquist_hertz))
spectrogram_bins_hertz = np.linspace(0.0, nyquist_hertz, num_spectrogram_bins)
spectrogram_bins_mel = hertz_to_mel(spectrogram_bins_hertz)
# The i'th mel band (starting from i=1) has center frequency
# band_edges_mel[i], lower edge band_edges_mel[i-1], and higher edge
# band_edges_mel[i+1]. Thus, we need num_mel_bins + 2 values in
# the band_edges_mel arrays.
band_edges_mel = np.linspace(hertz_to_mel(lower_edge_hertz),
hertz_to_mel(upper_edge_hertz), num_mel_bins + 2)
# Matrix to post-multiply feature arrays whose rows are num_spectrogram_bins
# of spectrogram values.
mel_weights_matrix = np.empty((num_spectrogram_bins, num_mel_bins))
for i in range(num_mel_bins):
lower_edge_mel, center_mel, upper_edge_mel = band_edges_mel[i:i + 3]
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the *mel* domain, not hertz.
lower_slope = ((spectrogram_bins_mel - lower_edge_mel) /
(center_mel - lower_edge_mel))
upper_slope = ((upper_edge_mel - spectrogram_bins_mel) /
(upper_edge_mel - center_mel))
# .. then intersect them with each other and zero.
mel_weights_matrix[:, i] = np.maximum(0.0, np.minimum(lower_slope,
upper_slope))
# HTK excludes the spectrogram DC bin; make sure it always gets a zero
# coefficient.
mel_weights_matrix[0, :] = 0.0
return mel_weights_matrix | [
"def",
"spectrogram_to_mel_matrix",
"(",
"num_mel_bins",
"=",
"20",
",",
"num_spectrogram_bins",
"=",
"129",
",",
"audio_sample_rate",
"=",
"8000",
",",
"lower_edge_hertz",
"=",
"125.0",
",",
"upper_edge_hertz",
"=",
"3800.0",
")",
":",
"nyquist_hertz",
"=",
"audio_sample_rate",
"/",
"2.",
"if",
"lower_edge_hertz",
"<",
"0.0",
":",
"raise",
"ValueError",
"(",
"\"lower_edge_hertz %.1f must be >= 0\"",
"%",
"lower_edge_hertz",
")",
"if",
"lower_edge_hertz",
">=",
"upper_edge_hertz",
":",
"raise",
"ValueError",
"(",
"\"lower_edge_hertz %.1f >= upper_edge_hertz %.1f\"",
"%",
"(",
"lower_edge_hertz",
",",
"upper_edge_hertz",
")",
")",
"if",
"upper_edge_hertz",
">",
"nyquist_hertz",
":",
"raise",
"ValueError",
"(",
"\"upper_edge_hertz %.1f is greater than Nyquist %.1f\"",
"%",
"(",
"upper_edge_hertz",
",",
"nyquist_hertz",
")",
")",
"spectrogram_bins_hertz",
"=",
"np",
".",
"linspace",
"(",
"0.0",
",",
"nyquist_hertz",
",",
"num_spectrogram_bins",
")",
"spectrogram_bins_mel",
"=",
"hertz_to_mel",
"(",
"spectrogram_bins_hertz",
")",
"# The i'th mel band (starting from i=1) has center frequency",
"# band_edges_mel[i], lower edge band_edges_mel[i-1], and higher edge",
"# band_edges_mel[i+1]. Thus, we need num_mel_bins + 2 values in",
"# the band_edges_mel arrays.",
"band_edges_mel",
"=",
"np",
".",
"linspace",
"(",
"hertz_to_mel",
"(",
"lower_edge_hertz",
")",
",",
"hertz_to_mel",
"(",
"upper_edge_hertz",
")",
",",
"num_mel_bins",
"+",
"2",
")",
"# Matrix to post-multiply feature arrays whose rows are num_spectrogram_bins",
"# of spectrogram values.",
"mel_weights_matrix",
"=",
"np",
".",
"empty",
"(",
"(",
"num_spectrogram_bins",
",",
"num_mel_bins",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_mel_bins",
")",
":",
"lower_edge_mel",
",",
"center_mel",
",",
"upper_edge_mel",
"=",
"band_edges_mel",
"[",
"i",
":",
"i",
"+",
"3",
"]",
"# Calculate lower and upper slopes for every spectrogram bin.",
"# Line segments are linear in the *mel* domain, not hertz.",
"lower_slope",
"=",
"(",
"(",
"spectrogram_bins_mel",
"-",
"lower_edge_mel",
")",
"/",
"(",
"center_mel",
"-",
"lower_edge_mel",
")",
")",
"upper_slope",
"=",
"(",
"(",
"upper_edge_mel",
"-",
"spectrogram_bins_mel",
")",
"/",
"(",
"upper_edge_mel",
"-",
"center_mel",
")",
")",
"# .. then intersect them with each other and zero.",
"mel_weights_matrix",
"[",
":",
",",
"i",
"]",
"=",
"np",
".",
"maximum",
"(",
"0.0",
",",
"np",
".",
"minimum",
"(",
"lower_slope",
",",
"upper_slope",
")",
")",
"# HTK excludes the spectrogram DC bin; make sure it always gets a zero",
"# coefficient.",
"mel_weights_matrix",
"[",
"0",
",",
":",
"]",
"=",
"0.0",
"return",
"mel_weights_matrix"
] | Return a matrix that can post-multiply spectrogram rows to make mel.
Returns a np.array matrix A that can be used to post-multiply a matrix S of
spectrogram values (STFT magnitudes) arranged as frames x bins to generate a
"mel spectrogram" M of frames x num_mel_bins. M = S A.
The classic HTK algorithm exploits the complementarity of adjacent mel bands
to multiply each FFT bin by only one mel weight, then add it, with positive
and negative signs, to the two adjacent mel bands to which that bin
contributes. Here, by expressing this operation as a matrix multiply, we go
from num_fft multiplies per frame (plus around 2*num_fft adds) to around
num_fft^2 multiplies and adds. However, because these are all presumably
accomplished in a single call to np.dot(), it's not clear which approach is
faster in Python. The matrix multiplication has the attraction of being more
general and flexible, and much easier to read.
Args:
num_mel_bins: How many bands in the resulting mel spectrum. This is
the number of columns in the output matrix.
num_spectrogram_bins: How many bins there are in the source spectrogram
data, which is understood to be fft_size/2 + 1, i.e. the spectrogram
only contains the nonredundant FFT bins.
audio_sample_rate: Samples per second of the audio at the input to the
spectrogram. We need this to figure out the actual frequencies for
each spectrogram bin, which dictates how they are mapped into mel.
lower_edge_hertz: Lower bound on the frequencies to be included in the mel
spectrum. This corresponds to the lower edge of the lowest triangular
band.
upper_edge_hertz: The desired top edge of the highest frequency band.
Returns:
An np.array with shape (num_spectrogram_bins, num_mel_bins).
Raises:
ValueError: if frequency edges are incorrectly ordered or out of range. | [
"Return",
"a",
"matrix",
"that",
"can",
"post",
"-",
"multiply",
"spectrogram",
"rows",
"to",
"make",
"mel",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/sound_classifier/mel_features.py#L114-L189 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/sound_classifier/mel_features.py | log_mel_spectrogram | def log_mel_spectrogram(data,
audio_sample_rate=8000,
log_offset=0.0,
window_length_secs=0.025,
hop_length_secs=0.010,
**kwargs):
"""Convert waveform to a log magnitude mel-frequency spectrogram.
Args:
data: 1D np.array of waveform data.
audio_sample_rate: The sampling rate of data.
log_offset: Add this to values when taking log to avoid -Infs.
window_length_secs: Duration of each window to analyze.
hop_length_secs: Advance between successive analysis windows.
**kwargs: Additional arguments to pass to spectrogram_to_mel_matrix.
Returns:
2D np.array of (num_frames, num_mel_bins) consisting of log mel filterbank
magnitudes for successive frames.
"""
window_length_samples = int(round(audio_sample_rate * window_length_secs))
hop_length_samples = int(round(audio_sample_rate * hop_length_secs))
fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0)))
spectrogram = stft_magnitude(
data,
fft_length=fft_length,
hop_length=hop_length_samples,
window_length=window_length_samples)
mel_spectrogram = np.dot(spectrogram, spectrogram_to_mel_matrix(
num_spectrogram_bins=spectrogram.shape[1],
audio_sample_rate=audio_sample_rate, **kwargs))
return np.log(mel_spectrogram + log_offset) | python | def log_mel_spectrogram(data,
audio_sample_rate=8000,
log_offset=0.0,
window_length_secs=0.025,
hop_length_secs=0.010,
**kwargs):
"""Convert waveform to a log magnitude mel-frequency spectrogram.
Args:
data: 1D np.array of waveform data.
audio_sample_rate: The sampling rate of data.
log_offset: Add this to values when taking log to avoid -Infs.
window_length_secs: Duration of each window to analyze.
hop_length_secs: Advance between successive analysis windows.
**kwargs: Additional arguments to pass to spectrogram_to_mel_matrix.
Returns:
2D np.array of (num_frames, num_mel_bins) consisting of log mel filterbank
magnitudes for successive frames.
"""
window_length_samples = int(round(audio_sample_rate * window_length_secs))
hop_length_samples = int(round(audio_sample_rate * hop_length_secs))
fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0)))
spectrogram = stft_magnitude(
data,
fft_length=fft_length,
hop_length=hop_length_samples,
window_length=window_length_samples)
mel_spectrogram = np.dot(spectrogram, spectrogram_to_mel_matrix(
num_spectrogram_bins=spectrogram.shape[1],
audio_sample_rate=audio_sample_rate, **kwargs))
return np.log(mel_spectrogram + log_offset) | [
"def",
"log_mel_spectrogram",
"(",
"data",
",",
"audio_sample_rate",
"=",
"8000",
",",
"log_offset",
"=",
"0.0",
",",
"window_length_secs",
"=",
"0.025",
",",
"hop_length_secs",
"=",
"0.010",
",",
"*",
"*",
"kwargs",
")",
":",
"window_length_samples",
"=",
"int",
"(",
"round",
"(",
"audio_sample_rate",
"*",
"window_length_secs",
")",
")",
"hop_length_samples",
"=",
"int",
"(",
"round",
"(",
"audio_sample_rate",
"*",
"hop_length_secs",
")",
")",
"fft_length",
"=",
"2",
"**",
"int",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"log",
"(",
"window_length_samples",
")",
"/",
"np",
".",
"log",
"(",
"2.0",
")",
")",
")",
"spectrogram",
"=",
"stft_magnitude",
"(",
"data",
",",
"fft_length",
"=",
"fft_length",
",",
"hop_length",
"=",
"hop_length_samples",
",",
"window_length",
"=",
"window_length_samples",
")",
"mel_spectrogram",
"=",
"np",
".",
"dot",
"(",
"spectrogram",
",",
"spectrogram_to_mel_matrix",
"(",
"num_spectrogram_bins",
"=",
"spectrogram",
".",
"shape",
"[",
"1",
"]",
",",
"audio_sample_rate",
"=",
"audio_sample_rate",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"np",
".",
"log",
"(",
"mel_spectrogram",
"+",
"log_offset",
")"
] | Convert waveform to a log magnitude mel-frequency spectrogram.
Args:
data: 1D np.array of waveform data.
audio_sample_rate: The sampling rate of data.
log_offset: Add this to values when taking log to avoid -Infs.
window_length_secs: Duration of each window to analyze.
hop_length_secs: Advance between successive analysis windows.
**kwargs: Additional arguments to pass to spectrogram_to_mel_matrix.
Returns:
2D np.array of (num_frames, num_mel_bins) consisting of log mel filterbank
magnitudes for successive frames. | [
"Convert",
"waveform",
"to",
"a",
"log",
"magnitude",
"mel",
"-",
"frequency",
"spectrogram",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/sound_classifier/mel_features.py#L192-L223 | train |
apple/turicreate | src/unity/python/turicreate/util/_sframe_generation.py | generate_random_sframe | def generate_random_sframe(num_rows, column_codes, random_seed = 0):
"""
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
"""
from ..extensions import _generate_random_sframe
assert isinstance(column_codes, str)
assert isinstance(num_rows, int)
assert isinstance(random_seed, int)
X = _generate_random_sframe(num_rows, column_codes, random_seed, False, 0)
X.__materialize__()
return X | python | def generate_random_sframe(num_rows, column_codes, random_seed = 0):
"""
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
"""
from ..extensions import _generate_random_sframe
assert isinstance(column_codes, str)
assert isinstance(num_rows, int)
assert isinstance(random_seed, int)
X = _generate_random_sframe(num_rows, column_codes, random_seed, False, 0)
X.__materialize__()
return X | [
"def",
"generate_random_sframe",
"(",
"num_rows",
",",
"column_codes",
",",
"random_seed",
"=",
"0",
")",
":",
"from",
".",
".",
"extensions",
"import",
"_generate_random_sframe",
"assert",
"isinstance",
"(",
"column_codes",
",",
"str",
")",
"assert",
"isinstance",
"(",
"num_rows",
",",
"int",
")",
"assert",
"isinstance",
"(",
"random_seed",
",",
"int",
")",
"X",
"=",
"_generate_random_sframe",
"(",
"num_rows",
",",
"column_codes",
",",
"random_seed",
",",
"False",
",",
"0",
")",
"X",
".",
"__materialize__",
"(",
")",
"return",
"X"
] | Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors. | [
"Creates",
"a",
"random",
"SFrame",
"with",
"num_rows",
"rows",
"and",
"randomly",
"generated",
"column",
"types",
"determined",
"by",
"column_codes",
".",
"The",
"output",
"SFrame",
"is",
"deterministic",
"based",
"on",
"random_seed",
".",
"column_types",
"is",
"a",
"string",
"with",
"each",
"character",
"denoting",
"one",
"type",
"of",
"column",
"with",
"the",
"output",
"SFrame",
"having",
"one",
"column",
"for",
"each",
"character",
"in",
"the",
"string",
".",
"The",
"legend",
"is",
"as",
"follows",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_sframe_generation.py#L13-L71 | train |
apple/turicreate | src/unity/python/turicreate/util/_sframe_generation.py | generate_random_regression_sframe | def generate_random_regression_sframe(num_rows, column_codes, random_seed = 0, target_noise_level = 0.25):
"""
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`. In addition, a
target column is generated with values dependent on the randomly
generated features in a given row.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
Target Generation
-----------------
the target value is a linear
combination of the features chosen for each row plus uniform noise.
- For each numeric and vector columns, each value, with the range
scaled to [-0.5, 0.5] (so r and R type values affect the target just
as much as n an N), is added to the target value. NaNs are ignored.
- For each categorical or string values, it is hash-mapped to a lookup
table of 512 randomly chosen values, each in [-0.5, 0.5], and the
result is added to the target.
- For dictionary columns, the keys are treated as adding a categorical
value and the values are treated as adding a numeric value.
At the end, a uniform random value is added to the target in the
range [(max_target - min_target) * noise_level], where max_target
and min_target are the maximum and minimum target values generated
by the above process.
The final target values are then scaled to [0, 1].
"""
from ..extensions import _generate_random_sframe
assert isinstance(column_codes, str)
assert isinstance(num_rows, int)
assert isinstance(random_seed, int)
X = _generate_random_sframe(num_rows, column_codes, random_seed, True, target_noise_level)
X.__materialize__()
return X | python | def generate_random_regression_sframe(num_rows, column_codes, random_seed = 0, target_noise_level = 0.25):
"""
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`. In addition, a
target column is generated with values dependent on the randomly
generated features in a given row.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
Target Generation
-----------------
the target value is a linear
combination of the features chosen for each row plus uniform noise.
- For each numeric and vector columns, each value, with the range
scaled to [-0.5, 0.5] (so r and R type values affect the target just
as much as n an N), is added to the target value. NaNs are ignored.
- For each categorical or string values, it is hash-mapped to a lookup
table of 512 randomly chosen values, each in [-0.5, 0.5], and the
result is added to the target.
- For dictionary columns, the keys are treated as adding a categorical
value and the values are treated as adding a numeric value.
At the end, a uniform random value is added to the target in the
range [(max_target - min_target) * noise_level], where max_target
and min_target are the maximum and minimum target values generated
by the above process.
The final target values are then scaled to [0, 1].
"""
from ..extensions import _generate_random_sframe
assert isinstance(column_codes, str)
assert isinstance(num_rows, int)
assert isinstance(random_seed, int)
X = _generate_random_sframe(num_rows, column_codes, random_seed, True, target_noise_level)
X.__materialize__()
return X | [
"def",
"generate_random_regression_sframe",
"(",
"num_rows",
",",
"column_codes",
",",
"random_seed",
"=",
"0",
",",
"target_noise_level",
"=",
"0.25",
")",
":",
"from",
".",
".",
"extensions",
"import",
"_generate_random_sframe",
"assert",
"isinstance",
"(",
"column_codes",
",",
"str",
")",
"assert",
"isinstance",
"(",
"num_rows",
",",
"int",
")",
"assert",
"isinstance",
"(",
"random_seed",
",",
"int",
")",
"X",
"=",
"_generate_random_sframe",
"(",
"num_rows",
",",
"column_codes",
",",
"random_seed",
",",
"True",
",",
"target_noise_level",
")",
"X",
".",
"__materialize__",
"(",
")",
"return",
"X"
] | Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`. In addition, a
target column is generated with values dependent on the randomly
generated features in a given row.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
Target Generation
-----------------
the target value is a linear
combination of the features chosen for each row plus uniform noise.
- For each numeric and vector columns, each value, with the range
scaled to [-0.5, 0.5] (so r and R type values affect the target just
as much as n an N), is added to the target value. NaNs are ignored.
- For each categorical or string values, it is hash-mapped to a lookup
table of 512 randomly chosen values, each in [-0.5, 0.5], and the
result is added to the target.
- For dictionary columns, the keys are treated as adding a categorical
value and the values are treated as adding a numeric value.
At the end, a uniform random value is added to the target in the
range [(max_target - min_target) * noise_level], where max_target
and min_target are the maximum and minimum target values generated
by the above process.
The final target values are then scaled to [0, 1]. | [
"Creates",
"a",
"random",
"SFrame",
"with",
"num_rows",
"rows",
"and",
"randomly",
"generated",
"column",
"types",
"determined",
"by",
"column_codes",
".",
"The",
"output",
"SFrame",
"is",
"deterministic",
"based",
"on",
"random_seed",
".",
"In",
"addition",
"a",
"target",
"column",
"is",
"generated",
"with",
"values",
"dependent",
"on",
"the",
"randomly",
"generated",
"features",
"in",
"a",
"given",
"row",
".",
"column_types",
"is",
"a",
"string",
"with",
"each",
"character",
"denoting",
"one",
"type",
"of",
"column",
"with",
"the",
"output",
"SFrame",
"having",
"one",
"column",
"for",
"each",
"character",
"in",
"the",
"string",
".",
"The",
"legend",
"is",
"as",
"follows",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_sframe_generation.py#L73-L157 | train |
apple/turicreate | src/unity/python/turicreate/util/_sframe_generation.py | generate_random_classification_sframe | def generate_random_classification_sframe(num_rows, column_codes, num_classes,
misclassification_spread = 0.25,
num_extra_class_bins = None,
random_seed = 0):
"""
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`. In addition, a
target column is generated with values dependent on the randomly
generated features in a given row.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
Target Generation
-----------------
The target column, called "target", is an integer value that
represents the binning of the output of a noisy linear function of
the chosen random variables into `num_classes + num_extra_class_bins`
bins, shuffled, and then each bin is mapped to a class. This
means that some non-linearity is present if num_extra_class_bins > 0.
The default value for num_extra_class_bins is 2*num_classes.
The `misclassification_probability` controls the spread of the
binning -- if misclassification_spread equals 0.25, then a random
variable of 0.25 * bin_width is added to the numeric prediction of
the class, meaning the actual class may be mispredicted.
"""
from ..extensions import _generate_random_classification_sframe
if num_classes < 2:
raise ValueError("num_classes must be >= 2.")
if num_extra_class_bins is None:
num_extra_class_bins = 2*num_classes
if num_extra_class_bins < 0:
raise ValueError("num_extra_class_bins must be >= 0.")
if misclassification_spread < 0:
raise ValueError("misclassification_spread must be >= 0.")
assert isinstance(column_codes, str)
assert isinstance(num_rows, int)
assert isinstance(random_seed, int)
assert isinstance(num_classes, int)
assert isinstance(num_extra_class_bins, int)
X = _generate_random_classification_sframe(
num_rows, column_codes, random_seed,
num_classes, num_extra_class_bins, misclassification_spread)
X.__materialize__()
return X | python | def generate_random_classification_sframe(num_rows, column_codes, num_classes,
misclassification_spread = 0.25,
num_extra_class_bins = None,
random_seed = 0):
"""
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`. In addition, a
target column is generated with values dependent on the randomly
generated features in a given row.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
Target Generation
-----------------
The target column, called "target", is an integer value that
represents the binning of the output of a noisy linear function of
the chosen random variables into `num_classes + num_extra_class_bins`
bins, shuffled, and then each bin is mapped to a class. This
means that some non-linearity is present if num_extra_class_bins > 0.
The default value for num_extra_class_bins is 2*num_classes.
The `misclassification_probability` controls the spread of the
binning -- if misclassification_spread equals 0.25, then a random
variable of 0.25 * bin_width is added to the numeric prediction of
the class, meaning the actual class may be mispredicted.
"""
from ..extensions import _generate_random_classification_sframe
if num_classes < 2:
raise ValueError("num_classes must be >= 2.")
if num_extra_class_bins is None:
num_extra_class_bins = 2*num_classes
if num_extra_class_bins < 0:
raise ValueError("num_extra_class_bins must be >= 0.")
if misclassification_spread < 0:
raise ValueError("misclassification_spread must be >= 0.")
assert isinstance(column_codes, str)
assert isinstance(num_rows, int)
assert isinstance(random_seed, int)
assert isinstance(num_classes, int)
assert isinstance(num_extra_class_bins, int)
X = _generate_random_classification_sframe(
num_rows, column_codes, random_seed,
num_classes, num_extra_class_bins, misclassification_spread)
X.__materialize__()
return X | [
"def",
"generate_random_classification_sframe",
"(",
"num_rows",
",",
"column_codes",
",",
"num_classes",
",",
"misclassification_spread",
"=",
"0.25",
",",
"num_extra_class_bins",
"=",
"None",
",",
"random_seed",
"=",
"0",
")",
":",
"from",
".",
".",
"extensions",
"import",
"_generate_random_classification_sframe",
"if",
"num_classes",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"num_classes must be >= 2.\"",
")",
"if",
"num_extra_class_bins",
"is",
"None",
":",
"num_extra_class_bins",
"=",
"2",
"*",
"num_classes",
"if",
"num_extra_class_bins",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"num_extra_class_bins must be >= 0.\"",
")",
"if",
"misclassification_spread",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"misclassification_spread must be >= 0.\"",
")",
"assert",
"isinstance",
"(",
"column_codes",
",",
"str",
")",
"assert",
"isinstance",
"(",
"num_rows",
",",
"int",
")",
"assert",
"isinstance",
"(",
"random_seed",
",",
"int",
")",
"assert",
"isinstance",
"(",
"num_classes",
",",
"int",
")",
"assert",
"isinstance",
"(",
"num_extra_class_bins",
",",
"int",
")",
"X",
"=",
"_generate_random_classification_sframe",
"(",
"num_rows",
",",
"column_codes",
",",
"random_seed",
",",
"num_classes",
",",
"num_extra_class_bins",
",",
"misclassification_spread",
")",
"X",
".",
"__materialize__",
"(",
")",
"return",
"X"
] | Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`. In addition, a
target column is generated with values dependent on the randomly
generated features in a given row.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
Target Generation
-----------------
The target column, called "target", is an integer value that
represents the binning of the output of a noisy linear function of
the chosen random variables into `num_classes + num_extra_class_bins`
bins, shuffled, and then each bin is mapped to a class. This
means that some non-linearity is present if num_extra_class_bins > 0.
The default value for num_extra_class_bins is 2*num_classes.
The `misclassification_probability` controls the spread of the
binning -- if misclassification_spread equals 0.25, then a random
variable of 0.25 * bin_width is added to the numeric prediction of
the class, meaning the actual class may be mispredicted. | [
"Creates",
"a",
"random",
"SFrame",
"with",
"num_rows",
"rows",
"and",
"randomly",
"generated",
"column",
"types",
"determined",
"by",
"column_codes",
".",
"The",
"output",
"SFrame",
"is",
"deterministic",
"based",
"on",
"random_seed",
".",
"In",
"addition",
"a",
"target",
"column",
"is",
"generated",
"with",
"values",
"dependent",
"on",
"the",
"randomly",
"generated",
"features",
"in",
"a",
"given",
"row",
".",
"column_types",
"is",
"a",
"string",
"with",
"each",
"character",
"denoting",
"one",
"type",
"of",
"column",
"with",
"the",
"output",
"SFrame",
"having",
"one",
"column",
"for",
"each",
"character",
"in",
"the",
"string",
".",
"The",
"legend",
"is",
"as",
"follows",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_sframe_generation.py#L159-L255 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/_infer_shapes_nn_mlmodel.py | infer_shapes | def infer_shapes(nn_spec, input_spec, input_shape_dict = None):
"""
Input:
spec : mlmodel spec
input_shape_dict: dictionary of string --> tuple
string: input name
tuple: input shape as a 5 length tuple in order (Seq, Batch, C, H, W)
If input_shape_dict is not provided, input shapes are inferred from the input description in the mlmodel.
Since the description in the specification only contains values of C,H,W; Seq and Batch dimensions are set to 1.
Output:
shape_dict: dictionary containing all the blobs in the neural network and their shapes, expressed as length 5 tuples,
to be interpreted in order (Seq, Batch, C, H, W).
"""
shape_dict = {}
if input_shape_dict:
for key, value in input_shape_dict.items():
assert len(value) == 5, 'Shape of the input must be of length 5'
shape_dict[key] = value
# construct input_shape_dict from the model description
else:
for inp in input_spec:
input_name = inp.name
C = H = W = 1
if inp.type.WhichOneof('Type') == 'imageType':
W = int(inp.type.imageType.width)
H = int(inp.type.imageType.height)
colorspace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Name(inp.type.imageType.colorSpace)
if colorspace == 'GRAYSCALE':
C = 1
elif colorspace == 'RGB' or colorspace == 'BGR':
C = 3
else:
raise ValueError('Input %s : Invalid Colorspace' %(input_name))
elif inp.type.WhichOneof('Type') == 'multiArrayType':
array_shape = inp.type.multiArrayType.shape
if len(array_shape) == 1:
C = array_shape[0]
elif len(array_shape) == 3:
C, H, W = map(int, array_shape)
else:
raise ValueError("Input %s : Multi array must be of length 1 or 3" %(input_name))
else:
raise ValueError("Input %s : Input type must be image or multi-array" %(input_name))
shape_dict[input_name] = (1, 1, C, H, W)
layers = nn_spec.layers
for i, layer in enumerate(layers):
for inp in layer.input:
assert inp in shape_dict, ('Input %s shape not cannot be determined' %(inp))
layer_type = layer.WhichOneof('layer')
if layer_type == 'custom':
break
layer_translator = _get_translator_function(layer_type)
layer_translator(layer, shape_dict)
return shape_dict | python | def infer_shapes(nn_spec, input_spec, input_shape_dict = None):
"""
Input:
spec : mlmodel spec
input_shape_dict: dictionary of string --> tuple
string: input name
tuple: input shape as a 5 length tuple in order (Seq, Batch, C, H, W)
If input_shape_dict is not provided, input shapes are inferred from the input description in the mlmodel.
Since the description in the specification only contains values of C,H,W; Seq and Batch dimensions are set to 1.
Output:
shape_dict: dictionary containing all the blobs in the neural network and their shapes, expressed as length 5 tuples,
to be interpreted in order (Seq, Batch, C, H, W).
"""
shape_dict = {}
if input_shape_dict:
for key, value in input_shape_dict.items():
assert len(value) == 5, 'Shape of the input must be of length 5'
shape_dict[key] = value
# construct input_shape_dict from the model description
else:
for inp in input_spec:
input_name = inp.name
C = H = W = 1
if inp.type.WhichOneof('Type') == 'imageType':
W = int(inp.type.imageType.width)
H = int(inp.type.imageType.height)
colorspace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Name(inp.type.imageType.colorSpace)
if colorspace == 'GRAYSCALE':
C = 1
elif colorspace == 'RGB' or colorspace == 'BGR':
C = 3
else:
raise ValueError('Input %s : Invalid Colorspace' %(input_name))
elif inp.type.WhichOneof('Type') == 'multiArrayType':
array_shape = inp.type.multiArrayType.shape
if len(array_shape) == 1:
C = array_shape[0]
elif len(array_shape) == 3:
C, H, W = map(int, array_shape)
else:
raise ValueError("Input %s : Multi array must be of length 1 or 3" %(input_name))
else:
raise ValueError("Input %s : Input type must be image or multi-array" %(input_name))
shape_dict[input_name] = (1, 1, C, H, W)
layers = nn_spec.layers
for i, layer in enumerate(layers):
for inp in layer.input:
assert inp in shape_dict, ('Input %s shape not cannot be determined' %(inp))
layer_type = layer.WhichOneof('layer')
if layer_type == 'custom':
break
layer_translator = _get_translator_function(layer_type)
layer_translator(layer, shape_dict)
return shape_dict | [
"def",
"infer_shapes",
"(",
"nn_spec",
",",
"input_spec",
",",
"input_shape_dict",
"=",
"None",
")",
":",
"shape_dict",
"=",
"{",
"}",
"if",
"input_shape_dict",
":",
"for",
"key",
",",
"value",
"in",
"input_shape_dict",
".",
"items",
"(",
")",
":",
"assert",
"len",
"(",
"value",
")",
"==",
"5",
",",
"'Shape of the input must be of length 5'",
"shape_dict",
"[",
"key",
"]",
"=",
"value",
"# construct input_shape_dict from the model description",
"else",
":",
"for",
"inp",
"in",
"input_spec",
":",
"input_name",
"=",
"inp",
".",
"name",
"C",
"=",
"H",
"=",
"W",
"=",
"1",
"if",
"inp",
".",
"type",
".",
"WhichOneof",
"(",
"'Type'",
")",
"==",
"'imageType'",
":",
"W",
"=",
"int",
"(",
"inp",
".",
"type",
".",
"imageType",
".",
"width",
")",
"H",
"=",
"int",
"(",
"inp",
".",
"type",
".",
"imageType",
".",
"height",
")",
"colorspace",
"=",
"_FeatureTypes_pb2",
".",
"ImageFeatureType",
".",
"ColorSpace",
".",
"Name",
"(",
"inp",
".",
"type",
".",
"imageType",
".",
"colorSpace",
")",
"if",
"colorspace",
"==",
"'GRAYSCALE'",
":",
"C",
"=",
"1",
"elif",
"colorspace",
"==",
"'RGB'",
"or",
"colorspace",
"==",
"'BGR'",
":",
"C",
"=",
"3",
"else",
":",
"raise",
"ValueError",
"(",
"'Input %s : Invalid Colorspace'",
"%",
"(",
"input_name",
")",
")",
"elif",
"inp",
".",
"type",
".",
"WhichOneof",
"(",
"'Type'",
")",
"==",
"'multiArrayType'",
":",
"array_shape",
"=",
"inp",
".",
"type",
".",
"multiArrayType",
".",
"shape",
"if",
"len",
"(",
"array_shape",
")",
"==",
"1",
":",
"C",
"=",
"array_shape",
"[",
"0",
"]",
"elif",
"len",
"(",
"array_shape",
")",
"==",
"3",
":",
"C",
",",
"H",
",",
"W",
"=",
"map",
"(",
"int",
",",
"array_shape",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Input %s : Multi array must be of length 1 or 3\"",
"%",
"(",
"input_name",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Input %s : Input type must be image or multi-array\"",
"%",
"(",
"input_name",
")",
")",
"shape_dict",
"[",
"input_name",
"]",
"=",
"(",
"1",
",",
"1",
",",
"C",
",",
"H",
",",
"W",
")",
"layers",
"=",
"nn_spec",
".",
"layers",
"for",
"i",
",",
"layer",
"in",
"enumerate",
"(",
"layers",
")",
":",
"for",
"inp",
"in",
"layer",
".",
"input",
":",
"assert",
"inp",
"in",
"shape_dict",
",",
"(",
"'Input %s shape not cannot be determined'",
"%",
"(",
"inp",
")",
")",
"layer_type",
"=",
"layer",
".",
"WhichOneof",
"(",
"'layer'",
")",
"if",
"layer_type",
"==",
"'custom'",
":",
"break",
"layer_translator",
"=",
"_get_translator_function",
"(",
"layer_type",
")",
"layer_translator",
"(",
"layer",
",",
"shape_dict",
")",
"return",
"shape_dict"
] | Input:
spec : mlmodel spec
input_shape_dict: dictionary of string --> tuple
string: input name
tuple: input shape as a 5 length tuple in order (Seq, Batch, C, H, W)
If input_shape_dict is not provided, input shapes are inferred from the input description in the mlmodel.
Since the description in the specification only contains values of C,H,W; Seq and Batch dimensions are set to 1.
Output:
shape_dict: dictionary containing all the blobs in the neural network and their shapes, expressed as length 5 tuples,
to be interpreted in order (Seq, Batch, C, H, W). | [
"Input",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/_infer_shapes_nn_mlmodel.py#L402-L466 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/libsvm/_libsvm_converter.py | convert | def convert(libsvm_model, feature_names, target, input_length, probability):
"""Convert a svm model to the protobuf spec.
This currently supports:
* C-SVC
* nu-SVC
* Epsilon-SVR
* nu-SVR
Parameters
----------
model_path: libsvm_model
Libsvm representation of the model.
feature_names : [str] | str
Names of each of the features.
target: str
Name of the predicted class column.
probability: str
Name of the class probability column. Only used for C-SVC and nu-SVC.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
import svm as libsvm
from ...proto import SVM_pb2
from ...proto import Model_pb2
from ...proto import FeatureTypes_pb2
from ...models import MLModel
svm_type_enum = libsvm_model.param.svm_type
# Create the spec
export_spec = Model_pb2.Model()
export_spec.specificationVersion = SPECIFICATION_VERSION
if(svm_type_enum == libsvm.EPSILON_SVR or svm_type_enum == libsvm.NU_SVR):
svm = export_spec.supportVectorRegressor
else:
svm = export_spec.supportVectorClassifier
# Set the features names
inferred_length = _infer_min_num_features(libsvm_model)
if isinstance(feature_names, str):
# input will be a single array
if input_length == 'auto':
print("[WARNING] Infering an input length of %d. If this is not correct,"
" use the 'input_length' parameter." % inferred_length)
input_length = inferred_length
elif inferred_length > input_length:
raise ValueError("An input length of %d was given, but the model requires an"
" input of at least %d." % (input_length, inferred_length))
input = export_spec.description.input.add()
input.name = feature_names
input.type.multiArrayType.shape.append(input_length)
input.type.multiArrayType.dataType = Model_pb2.ArrayFeatureType.DOUBLE
else:
# input will be a series of doubles
if inferred_length > len(feature_names):
raise ValueError("%d feature names were given, but the model requires at"
" least %d features." % (len(feature_names), inferred_length))
for cur_input_name in feature_names:
input = export_spec.description.input.add()
input.name = cur_input_name
input.type.doubleType.MergeFromString(b'')
# Set target
output = export_spec.description.output.add()
output.name = target
# Set the interface types
if(svm_type_enum == libsvm.EPSILON_SVR or svm_type_enum == libsvm.NU_SVR):
export_spec.description.predictedFeatureName = target
output.type.doubleType.MergeFromString(b'')
nr_class = 2
elif(svm_type_enum == libsvm.C_SVC or svm_type_enum == libsvm.NU_SVC):
export_spec.description.predictedFeatureName = target
output.type.int64Type.MergeFromString(b'')
nr_class = len(libsvm_model.get_labels())
for i in range(nr_class):
svm.numberOfSupportVectorsPerClass.append(libsvm_model.nSV[i])
svm.int64ClassLabels.vector.append(libsvm_model.label[i])
if probability and bool(libsvm_model.probA):
output = export_spec.description.output.add()
output.name = probability
output.type.dictionaryType.MergeFromString(b'')
output.type.dictionaryType.int64KeyType.MergeFromString(b'')
export_spec.description.predictedProbabilitiesName = probability
else:
raise ValueError('Only the following SVM types are supported: C_SVC, NU_SVC, EPSILON_SVR, NU_SVR')
if(libsvm_model.param.kernel_type == libsvm.LINEAR):
svm.kernel.linearKernel.MergeFromString(b'') # Hack to set kernel to an empty type
elif(libsvm_model.param.kernel_type == libsvm.RBF):
svm.kernel.rbfKernel.gamma = libsvm_model.param.gamma
elif(libsvm_model.param.kernel_type == libsvm.POLY):
svm.kernel.polyKernel.degree = libsvm_model.param.degree
svm.kernel.polyKernel.c = libsvm_model.param.coef0
svm.kernel.polyKernel.gamma = libsvm_model.param.gamma
elif(libsvm_model.param.kernel_type == libsvm.SIGMOID):
svm.kernel.sigmoidKernel.c = libsvm_model.param.coef0
svm.kernel.sigmoidKernel.gamma = libsvm_model.param.gamma
else:
raise ValueError('Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid.')
# set rho
# also set probA/ProbB only for SVC
if(svm_type_enum == libsvm.C_SVC or svm_type_enum == libsvm.NU_SVC):
num_class_pairs = nr_class * (nr_class-1)//2
for i in range(num_class_pairs):
svm.rho.append(libsvm_model.rho[i])
if(bool(libsvm_model.probA) and bool(libsvm_model.probB)):
for i in range(num_class_pairs):
svm.probA.append(libsvm_model.probA[i])
svm.probB.append(libsvm_model.probB[i])
else:
svm.rho = libsvm_model.rho[0]
# set coefficents
if(svm_type_enum == libsvm.C_SVC or svm_type_enum == libsvm.NU_SVC):
for _ in range(nr_class - 1):
svm.coefficients.add()
for i in range(libsvm_model.l):
for j in range(nr_class - 1):
svm.coefficients[j].alpha.append(libsvm_model.sv_coef[j][i])
else:
for i in range(libsvm_model.l):
svm.coefficients.alpha.append(libsvm_model.sv_coef[0][i])
# set support vectors
for i in range(libsvm_model.l):
j = 0
cur_support_vector = svm.sparseSupportVectors.vectors.add()
while libsvm_model.SV[i][j].index != -1:
cur_node = cur_support_vector.nodes.add()
cur_node.index = libsvm_model.SV[i][j].index
cur_node.value = libsvm_model.SV[i][j].value
j += 1
return MLModel(export_spec) | python | def convert(libsvm_model, feature_names, target, input_length, probability):
"""Convert a svm model to the protobuf spec.
This currently supports:
* C-SVC
* nu-SVC
* Epsilon-SVR
* nu-SVR
Parameters
----------
model_path: libsvm_model
Libsvm representation of the model.
feature_names : [str] | str
Names of each of the features.
target: str
Name of the predicted class column.
probability: str
Name of the class probability column. Only used for C-SVC and nu-SVC.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
import svm as libsvm
from ...proto import SVM_pb2
from ...proto import Model_pb2
from ...proto import FeatureTypes_pb2
from ...models import MLModel
svm_type_enum = libsvm_model.param.svm_type
# Create the spec
export_spec = Model_pb2.Model()
export_spec.specificationVersion = SPECIFICATION_VERSION
if(svm_type_enum == libsvm.EPSILON_SVR or svm_type_enum == libsvm.NU_SVR):
svm = export_spec.supportVectorRegressor
else:
svm = export_spec.supportVectorClassifier
# Set the features names
inferred_length = _infer_min_num_features(libsvm_model)
if isinstance(feature_names, str):
# input will be a single array
if input_length == 'auto':
print("[WARNING] Infering an input length of %d. If this is not correct,"
" use the 'input_length' parameter." % inferred_length)
input_length = inferred_length
elif inferred_length > input_length:
raise ValueError("An input length of %d was given, but the model requires an"
" input of at least %d." % (input_length, inferred_length))
input = export_spec.description.input.add()
input.name = feature_names
input.type.multiArrayType.shape.append(input_length)
input.type.multiArrayType.dataType = Model_pb2.ArrayFeatureType.DOUBLE
else:
# input will be a series of doubles
if inferred_length > len(feature_names):
raise ValueError("%d feature names were given, but the model requires at"
" least %d features." % (len(feature_names), inferred_length))
for cur_input_name in feature_names:
input = export_spec.description.input.add()
input.name = cur_input_name
input.type.doubleType.MergeFromString(b'')
# Set target
output = export_spec.description.output.add()
output.name = target
# Set the interface types
if(svm_type_enum == libsvm.EPSILON_SVR or svm_type_enum == libsvm.NU_SVR):
export_spec.description.predictedFeatureName = target
output.type.doubleType.MergeFromString(b'')
nr_class = 2
elif(svm_type_enum == libsvm.C_SVC or svm_type_enum == libsvm.NU_SVC):
export_spec.description.predictedFeatureName = target
output.type.int64Type.MergeFromString(b'')
nr_class = len(libsvm_model.get_labels())
for i in range(nr_class):
svm.numberOfSupportVectorsPerClass.append(libsvm_model.nSV[i])
svm.int64ClassLabels.vector.append(libsvm_model.label[i])
if probability and bool(libsvm_model.probA):
output = export_spec.description.output.add()
output.name = probability
output.type.dictionaryType.MergeFromString(b'')
output.type.dictionaryType.int64KeyType.MergeFromString(b'')
export_spec.description.predictedProbabilitiesName = probability
else:
raise ValueError('Only the following SVM types are supported: C_SVC, NU_SVC, EPSILON_SVR, NU_SVR')
if(libsvm_model.param.kernel_type == libsvm.LINEAR):
svm.kernel.linearKernel.MergeFromString(b'') # Hack to set kernel to an empty type
elif(libsvm_model.param.kernel_type == libsvm.RBF):
svm.kernel.rbfKernel.gamma = libsvm_model.param.gamma
elif(libsvm_model.param.kernel_type == libsvm.POLY):
svm.kernel.polyKernel.degree = libsvm_model.param.degree
svm.kernel.polyKernel.c = libsvm_model.param.coef0
svm.kernel.polyKernel.gamma = libsvm_model.param.gamma
elif(libsvm_model.param.kernel_type == libsvm.SIGMOID):
svm.kernel.sigmoidKernel.c = libsvm_model.param.coef0
svm.kernel.sigmoidKernel.gamma = libsvm_model.param.gamma
else:
raise ValueError('Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid.')
# set rho
# also set probA/ProbB only for SVC
if(svm_type_enum == libsvm.C_SVC or svm_type_enum == libsvm.NU_SVC):
num_class_pairs = nr_class * (nr_class-1)//2
for i in range(num_class_pairs):
svm.rho.append(libsvm_model.rho[i])
if(bool(libsvm_model.probA) and bool(libsvm_model.probB)):
for i in range(num_class_pairs):
svm.probA.append(libsvm_model.probA[i])
svm.probB.append(libsvm_model.probB[i])
else:
svm.rho = libsvm_model.rho[0]
# set coefficents
if(svm_type_enum == libsvm.C_SVC or svm_type_enum == libsvm.NU_SVC):
for _ in range(nr_class - 1):
svm.coefficients.add()
for i in range(libsvm_model.l):
for j in range(nr_class - 1):
svm.coefficients[j].alpha.append(libsvm_model.sv_coef[j][i])
else:
for i in range(libsvm_model.l):
svm.coefficients.alpha.append(libsvm_model.sv_coef[0][i])
# set support vectors
for i in range(libsvm_model.l):
j = 0
cur_support_vector = svm.sparseSupportVectors.vectors.add()
while libsvm_model.SV[i][j].index != -1:
cur_node = cur_support_vector.nodes.add()
cur_node.index = libsvm_model.SV[i][j].index
cur_node.value = libsvm_model.SV[i][j].value
j += 1
return MLModel(export_spec) | [
"def",
"convert",
"(",
"libsvm_model",
",",
"feature_names",
",",
"target",
",",
"input_length",
",",
"probability",
")",
":",
"if",
"not",
"(",
"HAS_LIBSVM",
")",
":",
"raise",
"RuntimeError",
"(",
"'libsvm not found. libsvm conversion API is disabled.'",
")",
"import",
"svm",
"as",
"libsvm",
"from",
".",
".",
".",
"proto",
"import",
"SVM_pb2",
"from",
".",
".",
".",
"proto",
"import",
"Model_pb2",
"from",
".",
".",
".",
"proto",
"import",
"FeatureTypes_pb2",
"from",
".",
".",
".",
"models",
"import",
"MLModel",
"svm_type_enum",
"=",
"libsvm_model",
".",
"param",
".",
"svm_type",
"# Create the spec",
"export_spec",
"=",
"Model_pb2",
".",
"Model",
"(",
")",
"export_spec",
".",
"specificationVersion",
"=",
"SPECIFICATION_VERSION",
"if",
"(",
"svm_type_enum",
"==",
"libsvm",
".",
"EPSILON_SVR",
"or",
"svm_type_enum",
"==",
"libsvm",
".",
"NU_SVR",
")",
":",
"svm",
"=",
"export_spec",
".",
"supportVectorRegressor",
"else",
":",
"svm",
"=",
"export_spec",
".",
"supportVectorClassifier",
"# Set the features names",
"inferred_length",
"=",
"_infer_min_num_features",
"(",
"libsvm_model",
")",
"if",
"isinstance",
"(",
"feature_names",
",",
"str",
")",
":",
"# input will be a single array",
"if",
"input_length",
"==",
"'auto'",
":",
"print",
"(",
"\"[WARNING] Infering an input length of %d. If this is not correct,\"",
"\" use the 'input_length' parameter.\"",
"%",
"inferred_length",
")",
"input_length",
"=",
"inferred_length",
"elif",
"inferred_length",
">",
"input_length",
":",
"raise",
"ValueError",
"(",
"\"An input length of %d was given, but the model requires an\"",
"\" input of at least %d.\"",
"%",
"(",
"input_length",
",",
"inferred_length",
")",
")",
"input",
"=",
"export_spec",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"input",
".",
"name",
"=",
"feature_names",
"input",
".",
"type",
".",
"multiArrayType",
".",
"shape",
".",
"append",
"(",
"input_length",
")",
"input",
".",
"type",
".",
"multiArrayType",
".",
"dataType",
"=",
"Model_pb2",
".",
"ArrayFeatureType",
".",
"DOUBLE",
"else",
":",
"# input will be a series of doubles",
"if",
"inferred_length",
">",
"len",
"(",
"feature_names",
")",
":",
"raise",
"ValueError",
"(",
"\"%d feature names were given, but the model requires at\"",
"\" least %d features.\"",
"%",
"(",
"len",
"(",
"feature_names",
")",
",",
"inferred_length",
")",
")",
"for",
"cur_input_name",
"in",
"feature_names",
":",
"input",
"=",
"export_spec",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"input",
".",
"name",
"=",
"cur_input_name",
"input",
".",
"type",
".",
"doubleType",
".",
"MergeFromString",
"(",
"b''",
")",
"# Set target",
"output",
"=",
"export_spec",
".",
"description",
".",
"output",
".",
"add",
"(",
")",
"output",
".",
"name",
"=",
"target",
"# Set the interface types",
"if",
"(",
"svm_type_enum",
"==",
"libsvm",
".",
"EPSILON_SVR",
"or",
"svm_type_enum",
"==",
"libsvm",
".",
"NU_SVR",
")",
":",
"export_spec",
".",
"description",
".",
"predictedFeatureName",
"=",
"target",
"output",
".",
"type",
".",
"doubleType",
".",
"MergeFromString",
"(",
"b''",
")",
"nr_class",
"=",
"2",
"elif",
"(",
"svm_type_enum",
"==",
"libsvm",
".",
"C_SVC",
"or",
"svm_type_enum",
"==",
"libsvm",
".",
"NU_SVC",
")",
":",
"export_spec",
".",
"description",
".",
"predictedFeatureName",
"=",
"target",
"output",
".",
"type",
".",
"int64Type",
".",
"MergeFromString",
"(",
"b''",
")",
"nr_class",
"=",
"len",
"(",
"libsvm_model",
".",
"get_labels",
"(",
")",
")",
"for",
"i",
"in",
"range",
"(",
"nr_class",
")",
":",
"svm",
".",
"numberOfSupportVectorsPerClass",
".",
"append",
"(",
"libsvm_model",
".",
"nSV",
"[",
"i",
"]",
")",
"svm",
".",
"int64ClassLabels",
".",
"vector",
".",
"append",
"(",
"libsvm_model",
".",
"label",
"[",
"i",
"]",
")",
"if",
"probability",
"and",
"bool",
"(",
"libsvm_model",
".",
"probA",
")",
":",
"output",
"=",
"export_spec",
".",
"description",
".",
"output",
".",
"add",
"(",
")",
"output",
".",
"name",
"=",
"probability",
"output",
".",
"type",
".",
"dictionaryType",
".",
"MergeFromString",
"(",
"b''",
")",
"output",
".",
"type",
".",
"dictionaryType",
".",
"int64KeyType",
".",
"MergeFromString",
"(",
"b''",
")",
"export_spec",
".",
"description",
".",
"predictedProbabilitiesName",
"=",
"probability",
"else",
":",
"raise",
"ValueError",
"(",
"'Only the following SVM types are supported: C_SVC, NU_SVC, EPSILON_SVR, NU_SVR'",
")",
"if",
"(",
"libsvm_model",
".",
"param",
".",
"kernel_type",
"==",
"libsvm",
".",
"LINEAR",
")",
":",
"svm",
".",
"kernel",
".",
"linearKernel",
".",
"MergeFromString",
"(",
"b''",
")",
"# Hack to set kernel to an empty type",
"elif",
"(",
"libsvm_model",
".",
"param",
".",
"kernel_type",
"==",
"libsvm",
".",
"RBF",
")",
":",
"svm",
".",
"kernel",
".",
"rbfKernel",
".",
"gamma",
"=",
"libsvm_model",
".",
"param",
".",
"gamma",
"elif",
"(",
"libsvm_model",
".",
"param",
".",
"kernel_type",
"==",
"libsvm",
".",
"POLY",
")",
":",
"svm",
".",
"kernel",
".",
"polyKernel",
".",
"degree",
"=",
"libsvm_model",
".",
"param",
".",
"degree",
"svm",
".",
"kernel",
".",
"polyKernel",
".",
"c",
"=",
"libsvm_model",
".",
"param",
".",
"coef0",
"svm",
".",
"kernel",
".",
"polyKernel",
".",
"gamma",
"=",
"libsvm_model",
".",
"param",
".",
"gamma",
"elif",
"(",
"libsvm_model",
".",
"param",
".",
"kernel_type",
"==",
"libsvm",
".",
"SIGMOID",
")",
":",
"svm",
".",
"kernel",
".",
"sigmoidKernel",
".",
"c",
"=",
"libsvm_model",
".",
"param",
".",
"coef0",
"svm",
".",
"kernel",
".",
"sigmoidKernel",
".",
"gamma",
"=",
"libsvm_model",
".",
"param",
".",
"gamma",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid.'",
")",
"# set rho",
"# also set probA/ProbB only for SVC",
"if",
"(",
"svm_type_enum",
"==",
"libsvm",
".",
"C_SVC",
"or",
"svm_type_enum",
"==",
"libsvm",
".",
"NU_SVC",
")",
":",
"num_class_pairs",
"=",
"nr_class",
"*",
"(",
"nr_class",
"-",
"1",
")",
"//",
"2",
"for",
"i",
"in",
"range",
"(",
"num_class_pairs",
")",
":",
"svm",
".",
"rho",
".",
"append",
"(",
"libsvm_model",
".",
"rho",
"[",
"i",
"]",
")",
"if",
"(",
"bool",
"(",
"libsvm_model",
".",
"probA",
")",
"and",
"bool",
"(",
"libsvm_model",
".",
"probB",
")",
")",
":",
"for",
"i",
"in",
"range",
"(",
"num_class_pairs",
")",
":",
"svm",
".",
"probA",
".",
"append",
"(",
"libsvm_model",
".",
"probA",
"[",
"i",
"]",
")",
"svm",
".",
"probB",
".",
"append",
"(",
"libsvm_model",
".",
"probB",
"[",
"i",
"]",
")",
"else",
":",
"svm",
".",
"rho",
"=",
"libsvm_model",
".",
"rho",
"[",
"0",
"]",
"# set coefficents",
"if",
"(",
"svm_type_enum",
"==",
"libsvm",
".",
"C_SVC",
"or",
"svm_type_enum",
"==",
"libsvm",
".",
"NU_SVC",
")",
":",
"for",
"_",
"in",
"range",
"(",
"nr_class",
"-",
"1",
")",
":",
"svm",
".",
"coefficients",
".",
"add",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"libsvm_model",
".",
"l",
")",
":",
"for",
"j",
"in",
"range",
"(",
"nr_class",
"-",
"1",
")",
":",
"svm",
".",
"coefficients",
"[",
"j",
"]",
".",
"alpha",
".",
"append",
"(",
"libsvm_model",
".",
"sv_coef",
"[",
"j",
"]",
"[",
"i",
"]",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"libsvm_model",
".",
"l",
")",
":",
"svm",
".",
"coefficients",
".",
"alpha",
".",
"append",
"(",
"libsvm_model",
".",
"sv_coef",
"[",
"0",
"]",
"[",
"i",
"]",
")",
"# set support vectors",
"for",
"i",
"in",
"range",
"(",
"libsvm_model",
".",
"l",
")",
":",
"j",
"=",
"0",
"cur_support_vector",
"=",
"svm",
".",
"sparseSupportVectors",
".",
"vectors",
".",
"add",
"(",
")",
"while",
"libsvm_model",
".",
"SV",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"index",
"!=",
"-",
"1",
":",
"cur_node",
"=",
"cur_support_vector",
".",
"nodes",
".",
"add",
"(",
")",
"cur_node",
".",
"index",
"=",
"libsvm_model",
".",
"SV",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"index",
"cur_node",
".",
"value",
"=",
"libsvm_model",
".",
"SV",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"value",
"j",
"+=",
"1",
"return",
"MLModel",
"(",
"export_spec",
")"
] | Convert a svm model to the protobuf spec.
This currently supports:
* C-SVC
* nu-SVC
* Epsilon-SVR
* nu-SVR
Parameters
----------
model_path: libsvm_model
Libsvm representation of the model.
feature_names : [str] | str
Names of each of the features.
target: str
Name of the predicted class column.
probability: str
Name of the class probability column. Only used for C-SVC and nu-SVC.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"svm",
"model",
"to",
"the",
"protobuf",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/libsvm/_libsvm_converter.py#L23-L176 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py | create | def create(dataset, session_id, target, features=None, prediction_window=100,
validation_set='auto', max_iterations=10, batch_size=32, verbose=True):
"""
Create an :class:`ActivityClassifier` model.
Parameters
----------
dataset : SFrame
Input data which consists of `sessions` of data where each session is
a sequence of data. The data must be in `stacked` format, grouped by
session. Within each session, the data is assumed to be sorted
temporally. Columns in `features` will be used to train a model that
will make a prediction using labels in the `target` column.
session_id : string
Name of the column that contains a unique ID for each session.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. Use `model.classes` to
retrieve the order in which the classes are mapped.
features : list[string], optional
Name of the columns containing the input features that will be used
for classification. If set to `None`, all columns except `session_id`
and `target` will be used.
prediction_window : int, optional
Number of time units between predictions. For example, if your input
data is sampled at 100Hz, and the `prediction_window` is set to 100,
then this model will make a prediction every 1 second.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance to
prevent the model from overfitting to the training data.
For each row of the progress table, accuracy is measured over the
provided training dataset and the `validation_set`. The format of this
SFrame must be the same as the training set.
When set to 'auto', a validation set is automatically sampled from the
training data (if the training data has > 100 sessions). If
validation_set is set to None, then all the data will be used for
training.
max_iterations : int , optional
Maximum number of iterations/epochs made over the data during the
training phase.
batch_size : int, optional
Number of sequence chunks used per training step. Must be greater than
the number of GPUs in use.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ActivityClassifier
A trained :class:`ActivityClassifier` model.
Examples
--------
.. sourcecode:: python
>>> import turicreate as tc
# Training on dummy data
>>> data = tc.SFrame({
... 'accelerometer_x': [0.1, 0.2, 0.3, 0.4, 0.5] * 10,
... 'accelerometer_y': [0.5, 0.4, 0.3, 0.2, 0.1] * 10,
... 'accelerometer_z': [0.01, 0.01, 0.02, 0.02, 0.01] * 10,
... 'session_id': [0, 0, 0] * 10 + [1, 1] * 10,
... 'activity': ['walk', 'run', 'run'] * 10 + ['swim', 'swim'] * 10
... })
# Create an activity classifier
>>> model = tc.activity_classifier.create(data,
... session_id='session_id', target='activity',
... features=['accelerometer_x', 'accelerometer_y', 'accelerometer_z'])
# Make predictions (as probability vector, or class)
>>> predictions = model.predict(data)
>>> predictions = model.predict(data, output_type='probability_vector')
# Get both predictions and classes together
>>> predictions = model.classify(data)
# Get topk predictions (instead of only top-1) if your labels have more
# 2 classes
>>> predictions = model.predict_topk(data, k = 3)
# Evaluate the model
>>> results = model.evaluate(data)
See Also
--------
ActivityClassifier, util.random_split_by_session
"""
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
from .._mxnet import _mxnet_utils
from ._mx_model_architecture import _net_params
from ._sframe_sequence_iterator import SFrameSequenceIter as _SFrameSequenceIter
from ._sframe_sequence_iterator import prep_data as _prep_data
from ._mx_model_architecture import _define_model_mxnet, _fit_model_mxnet
from ._mps_model_architecture import _define_model_mps, _fit_model_mps
from .._mps_utils import (use_mps as _use_mps,
mps_device_name as _mps_device_name,
ac_weights_mps_to_mxnet as _ac_weights_mps_to_mxnet)
if not isinstance(target, str):
raise _ToolkitError('target must be of type str')
if not isinstance(session_id, str):
raise _ToolkitError('session_id must be of type str')
_tkutl._raise_error_if_sframe_empty(dataset, 'dataset')
_tkutl._numeric_param_check_range('prediction_window', prediction_window, 1, 400)
_tkutl._numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)
if features is None:
features = _fe_tkutl.get_column_names(dataset,
interpret_as_excluded=True,
column_names=[session_id, target])
if not hasattr(features, '__iter__'):
raise TypeError("Input 'features' must be a list.")
if not all([isinstance(x, str) for x in features]):
raise TypeError("Invalid feature %s: Feature names must be of type str." % x)
if len(features) == 0:
raise TypeError("Input 'features' must contain at least one column name.")
start_time = _time.time()
dataset = _tkutl._toolkits_select_columns(dataset, features + [session_id, target])
_tkutl._raise_error_if_sarray_not_expected_dtype(dataset[target], target, [str, int])
_tkutl._raise_error_if_sarray_not_expected_dtype(dataset[session_id], session_id, [str, int])
if isinstance(validation_set, str) and validation_set == 'auto':
# Computing the number of unique sessions in this way is relatively
# expensive. Ideally we'd incorporate this logic into the C++ code that
# chunks the raw data by prediction window.
# TODO: https://github.com/apple/turicreate/issues/991
unique_sessions = _SFrame({'session': dataset[session_id].unique()})
if len(unique_sessions) < _MIN_NUM_SESSIONS_FOR_SPLIT:
print ("The dataset has less than the minimum of", _MIN_NUM_SESSIONS_FOR_SPLIT, "sessions required for train-validation split. Continuing without validation set")
validation_set = None
else:
dataset, validation_set = _random_split_by_session(dataset, session_id)
# Encode the target column to numerical values
use_target = target is not None
dataset, target_map = _encode_target(dataset, target)
predictions_in_chunk = 20
chunked_data, num_sessions = _prep_data(dataset, features, session_id, prediction_window,
predictions_in_chunk, target=target, verbose=verbose)
# Decide whether to use MPS GPU, MXnet GPU or CPU
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=num_sessions)
use_mps = _use_mps() and num_mxnet_gpus == 0
if verbose:
if use_mps:
print('Using GPU to create model ({})'.format(_mps_device_name()))
elif num_mxnet_gpus == 1:
print('Using GPU to create model (CUDA)')
elif num_mxnet_gpus > 1:
print('Using {} GPUs to create model (CUDA)'.format(num_mxnet_gpus))
else:
print('Using CPU to create model')
# Create data iterators
user_provided_batch_size = batch_size
batch_size = max(batch_size, num_mxnet_gpus, 1)
use_mx_data_batch = not use_mps
data_iter = _SFrameSequenceIter(chunked_data, len(features),
prediction_window, predictions_in_chunk,
batch_size, use_target=use_target, mx_output=use_mx_data_batch)
if validation_set is not None:
_tkutl._raise_error_if_not_sframe(validation_set, 'validation_set')
_tkutl._raise_error_if_sframe_empty(validation_set, 'validation_set')
validation_set = _tkutl._toolkits_select_columns(
validation_set, features + [session_id, target])
validation_set = validation_set.filter_by(list(target_map.keys()), target)
validation_set, mapping = _encode_target(validation_set, target, target_map)
chunked_validation_set, _ = _prep_data(validation_set, features, session_id, prediction_window,
predictions_in_chunk, target=target, verbose=False)
valid_iter = _SFrameSequenceIter(chunked_validation_set, len(features),
prediction_window, predictions_in_chunk,
batch_size, use_target=use_target, mx_output=use_mx_data_batch)
else:
valid_iter = None
# Define model architecture
context = _mxnet_utils.get_mxnet_context(max_devices=num_sessions)
# Always create MXnet models, as the pred_model is later saved to the state
# If MPS is used - the loss_model will be overwritten
loss_model, pred_model = _define_model_mxnet(len(target_map), prediction_window,
predictions_in_chunk, context)
if use_mps:
loss_model = _define_model_mps(batch_size, len(features), len(target_map),
prediction_window, predictions_in_chunk, is_prediction_model=False)
log = _fit_model_mps(loss_model, data_iter, valid_iter, max_iterations, verbose)
else:
# Train the model using Mxnet
log = _fit_model_mxnet(loss_model, data_iter, valid_iter,
max_iterations, num_mxnet_gpus, verbose)
# Set up prediction model
pred_model.bind(data_shapes=data_iter.provide_data, label_shapes=None,
for_training=False)
if use_mps:
mps_params = loss_model.export()
arg_params, aux_params = _ac_weights_mps_to_mxnet(mps_params, _net_params['lstm_h'])
else:
arg_params, aux_params = loss_model.get_params()
pred_model.init_params(arg_params=arg_params, aux_params=aux_params)
# Save the model
state = {
'_pred_model': pred_model,
'verbose': verbose,
'training_time': _time.time() - start_time,
'target': target,
'classes': sorted(target_map.keys()),
'features': features,
'session_id': session_id,
'prediction_window': prediction_window,
'max_iterations': max_iterations,
'num_examples': len(dataset),
'num_sessions': num_sessions,
'num_classes': len(target_map),
'num_features': len(features),
'training_accuracy': log['train_acc'],
'training_log_loss': log['train_loss'],
'_target_id_map': target_map,
'_id_target_map': {v: k for k, v in target_map.items()},
'_predictions_in_chunk': predictions_in_chunk,
'_recalibrated_batch_size': data_iter.batch_size,
'batch_size' : user_provided_batch_size
}
if validation_set is not None:
state['valid_accuracy'] = log['valid_acc']
state['valid_log_loss'] = log['valid_loss']
model = ActivityClassifier(state)
return model | python | def create(dataset, session_id, target, features=None, prediction_window=100,
validation_set='auto', max_iterations=10, batch_size=32, verbose=True):
"""
Create an :class:`ActivityClassifier` model.
Parameters
----------
dataset : SFrame
Input data which consists of `sessions` of data where each session is
a sequence of data. The data must be in `stacked` format, grouped by
session. Within each session, the data is assumed to be sorted
temporally. Columns in `features` will be used to train a model that
will make a prediction using labels in the `target` column.
session_id : string
Name of the column that contains a unique ID for each session.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. Use `model.classes` to
retrieve the order in which the classes are mapped.
features : list[string], optional
Name of the columns containing the input features that will be used
for classification. If set to `None`, all columns except `session_id`
and `target` will be used.
prediction_window : int, optional
Number of time units between predictions. For example, if your input
data is sampled at 100Hz, and the `prediction_window` is set to 100,
then this model will make a prediction every 1 second.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance to
prevent the model from overfitting to the training data.
For each row of the progress table, accuracy is measured over the
provided training dataset and the `validation_set`. The format of this
SFrame must be the same as the training set.
When set to 'auto', a validation set is automatically sampled from the
training data (if the training data has > 100 sessions). If
validation_set is set to None, then all the data will be used for
training.
max_iterations : int , optional
Maximum number of iterations/epochs made over the data during the
training phase.
batch_size : int, optional
Number of sequence chunks used per training step. Must be greater than
the number of GPUs in use.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ActivityClassifier
A trained :class:`ActivityClassifier` model.
Examples
--------
.. sourcecode:: python
>>> import turicreate as tc
# Training on dummy data
>>> data = tc.SFrame({
... 'accelerometer_x': [0.1, 0.2, 0.3, 0.4, 0.5] * 10,
... 'accelerometer_y': [0.5, 0.4, 0.3, 0.2, 0.1] * 10,
... 'accelerometer_z': [0.01, 0.01, 0.02, 0.02, 0.01] * 10,
... 'session_id': [0, 0, 0] * 10 + [1, 1] * 10,
... 'activity': ['walk', 'run', 'run'] * 10 + ['swim', 'swim'] * 10
... })
# Create an activity classifier
>>> model = tc.activity_classifier.create(data,
... session_id='session_id', target='activity',
... features=['accelerometer_x', 'accelerometer_y', 'accelerometer_z'])
# Make predictions (as probability vector, or class)
>>> predictions = model.predict(data)
>>> predictions = model.predict(data, output_type='probability_vector')
# Get both predictions and classes together
>>> predictions = model.classify(data)
# Get topk predictions (instead of only top-1) if your labels have more
# 2 classes
>>> predictions = model.predict_topk(data, k = 3)
# Evaluate the model
>>> results = model.evaluate(data)
See Also
--------
ActivityClassifier, util.random_split_by_session
"""
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
from .._mxnet import _mxnet_utils
from ._mx_model_architecture import _net_params
from ._sframe_sequence_iterator import SFrameSequenceIter as _SFrameSequenceIter
from ._sframe_sequence_iterator import prep_data as _prep_data
from ._mx_model_architecture import _define_model_mxnet, _fit_model_mxnet
from ._mps_model_architecture import _define_model_mps, _fit_model_mps
from .._mps_utils import (use_mps as _use_mps,
mps_device_name as _mps_device_name,
ac_weights_mps_to_mxnet as _ac_weights_mps_to_mxnet)
if not isinstance(target, str):
raise _ToolkitError('target must be of type str')
if not isinstance(session_id, str):
raise _ToolkitError('session_id must be of type str')
_tkutl._raise_error_if_sframe_empty(dataset, 'dataset')
_tkutl._numeric_param_check_range('prediction_window', prediction_window, 1, 400)
_tkutl._numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)
if features is None:
features = _fe_tkutl.get_column_names(dataset,
interpret_as_excluded=True,
column_names=[session_id, target])
if not hasattr(features, '__iter__'):
raise TypeError("Input 'features' must be a list.")
if not all([isinstance(x, str) for x in features]):
raise TypeError("Invalid feature %s: Feature names must be of type str." % x)
if len(features) == 0:
raise TypeError("Input 'features' must contain at least one column name.")
start_time = _time.time()
dataset = _tkutl._toolkits_select_columns(dataset, features + [session_id, target])
_tkutl._raise_error_if_sarray_not_expected_dtype(dataset[target], target, [str, int])
_tkutl._raise_error_if_sarray_not_expected_dtype(dataset[session_id], session_id, [str, int])
if isinstance(validation_set, str) and validation_set == 'auto':
# Computing the number of unique sessions in this way is relatively
# expensive. Ideally we'd incorporate this logic into the C++ code that
# chunks the raw data by prediction window.
# TODO: https://github.com/apple/turicreate/issues/991
unique_sessions = _SFrame({'session': dataset[session_id].unique()})
if len(unique_sessions) < _MIN_NUM_SESSIONS_FOR_SPLIT:
print ("The dataset has less than the minimum of", _MIN_NUM_SESSIONS_FOR_SPLIT, "sessions required for train-validation split. Continuing without validation set")
validation_set = None
else:
dataset, validation_set = _random_split_by_session(dataset, session_id)
# Encode the target column to numerical values
use_target = target is not None
dataset, target_map = _encode_target(dataset, target)
predictions_in_chunk = 20
chunked_data, num_sessions = _prep_data(dataset, features, session_id, prediction_window,
predictions_in_chunk, target=target, verbose=verbose)
# Decide whether to use MPS GPU, MXnet GPU or CPU
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=num_sessions)
use_mps = _use_mps() and num_mxnet_gpus == 0
if verbose:
if use_mps:
print('Using GPU to create model ({})'.format(_mps_device_name()))
elif num_mxnet_gpus == 1:
print('Using GPU to create model (CUDA)')
elif num_mxnet_gpus > 1:
print('Using {} GPUs to create model (CUDA)'.format(num_mxnet_gpus))
else:
print('Using CPU to create model')
# Create data iterators
user_provided_batch_size = batch_size
batch_size = max(batch_size, num_mxnet_gpus, 1)
use_mx_data_batch = not use_mps
data_iter = _SFrameSequenceIter(chunked_data, len(features),
prediction_window, predictions_in_chunk,
batch_size, use_target=use_target, mx_output=use_mx_data_batch)
if validation_set is not None:
_tkutl._raise_error_if_not_sframe(validation_set, 'validation_set')
_tkutl._raise_error_if_sframe_empty(validation_set, 'validation_set')
validation_set = _tkutl._toolkits_select_columns(
validation_set, features + [session_id, target])
validation_set = validation_set.filter_by(list(target_map.keys()), target)
validation_set, mapping = _encode_target(validation_set, target, target_map)
chunked_validation_set, _ = _prep_data(validation_set, features, session_id, prediction_window,
predictions_in_chunk, target=target, verbose=False)
valid_iter = _SFrameSequenceIter(chunked_validation_set, len(features),
prediction_window, predictions_in_chunk,
batch_size, use_target=use_target, mx_output=use_mx_data_batch)
else:
valid_iter = None
# Define model architecture
context = _mxnet_utils.get_mxnet_context(max_devices=num_sessions)
# Always create MXnet models, as the pred_model is later saved to the state
# If MPS is used - the loss_model will be overwritten
loss_model, pred_model = _define_model_mxnet(len(target_map), prediction_window,
predictions_in_chunk, context)
if use_mps:
loss_model = _define_model_mps(batch_size, len(features), len(target_map),
prediction_window, predictions_in_chunk, is_prediction_model=False)
log = _fit_model_mps(loss_model, data_iter, valid_iter, max_iterations, verbose)
else:
# Train the model using Mxnet
log = _fit_model_mxnet(loss_model, data_iter, valid_iter,
max_iterations, num_mxnet_gpus, verbose)
# Set up prediction model
pred_model.bind(data_shapes=data_iter.provide_data, label_shapes=None,
for_training=False)
if use_mps:
mps_params = loss_model.export()
arg_params, aux_params = _ac_weights_mps_to_mxnet(mps_params, _net_params['lstm_h'])
else:
arg_params, aux_params = loss_model.get_params()
pred_model.init_params(arg_params=arg_params, aux_params=aux_params)
# Save the model
state = {
'_pred_model': pred_model,
'verbose': verbose,
'training_time': _time.time() - start_time,
'target': target,
'classes': sorted(target_map.keys()),
'features': features,
'session_id': session_id,
'prediction_window': prediction_window,
'max_iterations': max_iterations,
'num_examples': len(dataset),
'num_sessions': num_sessions,
'num_classes': len(target_map),
'num_features': len(features),
'training_accuracy': log['train_acc'],
'training_log_loss': log['train_loss'],
'_target_id_map': target_map,
'_id_target_map': {v: k for k, v in target_map.items()},
'_predictions_in_chunk': predictions_in_chunk,
'_recalibrated_batch_size': data_iter.batch_size,
'batch_size' : user_provided_batch_size
}
if validation_set is not None:
state['valid_accuracy'] = log['valid_acc']
state['valid_log_loss'] = log['valid_loss']
model = ActivityClassifier(state)
return model | [
"def",
"create",
"(",
"dataset",
",",
"session_id",
",",
"target",
",",
"features",
"=",
"None",
",",
"prediction_window",
"=",
"100",
",",
"validation_set",
"=",
"'auto'",
",",
"max_iterations",
"=",
"10",
",",
"batch_size",
"=",
"32",
",",
"verbose",
"=",
"True",
")",
":",
"_tkutl",
".",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"from",
".",
".",
"_mxnet",
"import",
"_mxnet_utils",
"from",
".",
"_mx_model_architecture",
"import",
"_net_params",
"from",
".",
"_sframe_sequence_iterator",
"import",
"SFrameSequenceIter",
"as",
"_SFrameSequenceIter",
"from",
".",
"_sframe_sequence_iterator",
"import",
"prep_data",
"as",
"_prep_data",
"from",
".",
"_mx_model_architecture",
"import",
"_define_model_mxnet",
",",
"_fit_model_mxnet",
"from",
".",
"_mps_model_architecture",
"import",
"_define_model_mps",
",",
"_fit_model_mps",
"from",
".",
".",
"_mps_utils",
"import",
"(",
"use_mps",
"as",
"_use_mps",
",",
"mps_device_name",
"as",
"_mps_device_name",
",",
"ac_weights_mps_to_mxnet",
"as",
"_ac_weights_mps_to_mxnet",
")",
"if",
"not",
"isinstance",
"(",
"target",
",",
"str",
")",
":",
"raise",
"_ToolkitError",
"(",
"'target must be of type str'",
")",
"if",
"not",
"isinstance",
"(",
"session_id",
",",
"str",
")",
":",
"raise",
"_ToolkitError",
"(",
"'session_id must be of type str'",
")",
"_tkutl",
".",
"_raise_error_if_sframe_empty",
"(",
"dataset",
",",
"'dataset'",
")",
"_tkutl",
".",
"_numeric_param_check_range",
"(",
"'prediction_window'",
",",
"prediction_window",
",",
"1",
",",
"400",
")",
"_tkutl",
".",
"_numeric_param_check_range",
"(",
"'max_iterations'",
",",
"max_iterations",
",",
"0",
",",
"_six",
".",
"MAXSIZE",
")",
"if",
"features",
"is",
"None",
":",
"features",
"=",
"_fe_tkutl",
".",
"get_column_names",
"(",
"dataset",
",",
"interpret_as_excluded",
"=",
"True",
",",
"column_names",
"=",
"[",
"session_id",
",",
"target",
"]",
")",
"if",
"not",
"hasattr",
"(",
"features",
",",
"'__iter__'",
")",
":",
"raise",
"TypeError",
"(",
"\"Input 'features' must be a list.\"",
")",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"x",
",",
"str",
")",
"for",
"x",
"in",
"features",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid feature %s: Feature names must be of type str.\"",
"%",
"x",
")",
"if",
"len",
"(",
"features",
")",
"==",
"0",
":",
"raise",
"TypeError",
"(",
"\"Input 'features' must contain at least one column name.\"",
")",
"start_time",
"=",
"_time",
".",
"time",
"(",
")",
"dataset",
"=",
"_tkutl",
".",
"_toolkits_select_columns",
"(",
"dataset",
",",
"features",
"+",
"[",
"session_id",
",",
"target",
"]",
")",
"_tkutl",
".",
"_raise_error_if_sarray_not_expected_dtype",
"(",
"dataset",
"[",
"target",
"]",
",",
"target",
",",
"[",
"str",
",",
"int",
"]",
")",
"_tkutl",
".",
"_raise_error_if_sarray_not_expected_dtype",
"(",
"dataset",
"[",
"session_id",
"]",
",",
"session_id",
",",
"[",
"str",
",",
"int",
"]",
")",
"if",
"isinstance",
"(",
"validation_set",
",",
"str",
")",
"and",
"validation_set",
"==",
"'auto'",
":",
"# Computing the number of unique sessions in this way is relatively",
"# expensive. Ideally we'd incorporate this logic into the C++ code that",
"# chunks the raw data by prediction window.",
"# TODO: https://github.com/apple/turicreate/issues/991",
"unique_sessions",
"=",
"_SFrame",
"(",
"{",
"'session'",
":",
"dataset",
"[",
"session_id",
"]",
".",
"unique",
"(",
")",
"}",
")",
"if",
"len",
"(",
"unique_sessions",
")",
"<",
"_MIN_NUM_SESSIONS_FOR_SPLIT",
":",
"print",
"(",
"\"The dataset has less than the minimum of\"",
",",
"_MIN_NUM_SESSIONS_FOR_SPLIT",
",",
"\"sessions required for train-validation split. Continuing without validation set\"",
")",
"validation_set",
"=",
"None",
"else",
":",
"dataset",
",",
"validation_set",
"=",
"_random_split_by_session",
"(",
"dataset",
",",
"session_id",
")",
"# Encode the target column to numerical values",
"use_target",
"=",
"target",
"is",
"not",
"None",
"dataset",
",",
"target_map",
"=",
"_encode_target",
"(",
"dataset",
",",
"target",
")",
"predictions_in_chunk",
"=",
"20",
"chunked_data",
",",
"num_sessions",
"=",
"_prep_data",
"(",
"dataset",
",",
"features",
",",
"session_id",
",",
"prediction_window",
",",
"predictions_in_chunk",
",",
"target",
"=",
"target",
",",
"verbose",
"=",
"verbose",
")",
"# Decide whether to use MPS GPU, MXnet GPU or CPU",
"num_mxnet_gpus",
"=",
"_mxnet_utils",
".",
"get_num_gpus_in_use",
"(",
"max_devices",
"=",
"num_sessions",
")",
"use_mps",
"=",
"_use_mps",
"(",
")",
"and",
"num_mxnet_gpus",
"==",
"0",
"if",
"verbose",
":",
"if",
"use_mps",
":",
"print",
"(",
"'Using GPU to create model ({})'",
".",
"format",
"(",
"_mps_device_name",
"(",
")",
")",
")",
"elif",
"num_mxnet_gpus",
"==",
"1",
":",
"print",
"(",
"'Using GPU to create model (CUDA)'",
")",
"elif",
"num_mxnet_gpus",
">",
"1",
":",
"print",
"(",
"'Using {} GPUs to create model (CUDA)'",
".",
"format",
"(",
"num_mxnet_gpus",
")",
")",
"else",
":",
"print",
"(",
"'Using CPU to create model'",
")",
"# Create data iterators",
"user_provided_batch_size",
"=",
"batch_size",
"batch_size",
"=",
"max",
"(",
"batch_size",
",",
"num_mxnet_gpus",
",",
"1",
")",
"use_mx_data_batch",
"=",
"not",
"use_mps",
"data_iter",
"=",
"_SFrameSequenceIter",
"(",
"chunked_data",
",",
"len",
"(",
"features",
")",
",",
"prediction_window",
",",
"predictions_in_chunk",
",",
"batch_size",
",",
"use_target",
"=",
"use_target",
",",
"mx_output",
"=",
"use_mx_data_batch",
")",
"if",
"validation_set",
"is",
"not",
"None",
":",
"_tkutl",
".",
"_raise_error_if_not_sframe",
"(",
"validation_set",
",",
"'validation_set'",
")",
"_tkutl",
".",
"_raise_error_if_sframe_empty",
"(",
"validation_set",
",",
"'validation_set'",
")",
"validation_set",
"=",
"_tkutl",
".",
"_toolkits_select_columns",
"(",
"validation_set",
",",
"features",
"+",
"[",
"session_id",
",",
"target",
"]",
")",
"validation_set",
"=",
"validation_set",
".",
"filter_by",
"(",
"list",
"(",
"target_map",
".",
"keys",
"(",
")",
")",
",",
"target",
")",
"validation_set",
",",
"mapping",
"=",
"_encode_target",
"(",
"validation_set",
",",
"target",
",",
"target_map",
")",
"chunked_validation_set",
",",
"_",
"=",
"_prep_data",
"(",
"validation_set",
",",
"features",
",",
"session_id",
",",
"prediction_window",
",",
"predictions_in_chunk",
",",
"target",
"=",
"target",
",",
"verbose",
"=",
"False",
")",
"valid_iter",
"=",
"_SFrameSequenceIter",
"(",
"chunked_validation_set",
",",
"len",
"(",
"features",
")",
",",
"prediction_window",
",",
"predictions_in_chunk",
",",
"batch_size",
",",
"use_target",
"=",
"use_target",
",",
"mx_output",
"=",
"use_mx_data_batch",
")",
"else",
":",
"valid_iter",
"=",
"None",
"# Define model architecture",
"context",
"=",
"_mxnet_utils",
".",
"get_mxnet_context",
"(",
"max_devices",
"=",
"num_sessions",
")",
"# Always create MXnet models, as the pred_model is later saved to the state",
"# If MPS is used - the loss_model will be overwritten",
"loss_model",
",",
"pred_model",
"=",
"_define_model_mxnet",
"(",
"len",
"(",
"target_map",
")",
",",
"prediction_window",
",",
"predictions_in_chunk",
",",
"context",
")",
"if",
"use_mps",
":",
"loss_model",
"=",
"_define_model_mps",
"(",
"batch_size",
",",
"len",
"(",
"features",
")",
",",
"len",
"(",
"target_map",
")",
",",
"prediction_window",
",",
"predictions_in_chunk",
",",
"is_prediction_model",
"=",
"False",
")",
"log",
"=",
"_fit_model_mps",
"(",
"loss_model",
",",
"data_iter",
",",
"valid_iter",
",",
"max_iterations",
",",
"verbose",
")",
"else",
":",
"# Train the model using Mxnet",
"log",
"=",
"_fit_model_mxnet",
"(",
"loss_model",
",",
"data_iter",
",",
"valid_iter",
",",
"max_iterations",
",",
"num_mxnet_gpus",
",",
"verbose",
")",
"# Set up prediction model",
"pred_model",
".",
"bind",
"(",
"data_shapes",
"=",
"data_iter",
".",
"provide_data",
",",
"label_shapes",
"=",
"None",
",",
"for_training",
"=",
"False",
")",
"if",
"use_mps",
":",
"mps_params",
"=",
"loss_model",
".",
"export",
"(",
")",
"arg_params",
",",
"aux_params",
"=",
"_ac_weights_mps_to_mxnet",
"(",
"mps_params",
",",
"_net_params",
"[",
"'lstm_h'",
"]",
")",
"else",
":",
"arg_params",
",",
"aux_params",
"=",
"loss_model",
".",
"get_params",
"(",
")",
"pred_model",
".",
"init_params",
"(",
"arg_params",
"=",
"arg_params",
",",
"aux_params",
"=",
"aux_params",
")",
"# Save the model",
"state",
"=",
"{",
"'_pred_model'",
":",
"pred_model",
",",
"'verbose'",
":",
"verbose",
",",
"'training_time'",
":",
"_time",
".",
"time",
"(",
")",
"-",
"start_time",
",",
"'target'",
":",
"target",
",",
"'classes'",
":",
"sorted",
"(",
"target_map",
".",
"keys",
"(",
")",
")",
",",
"'features'",
":",
"features",
",",
"'session_id'",
":",
"session_id",
",",
"'prediction_window'",
":",
"prediction_window",
",",
"'max_iterations'",
":",
"max_iterations",
",",
"'num_examples'",
":",
"len",
"(",
"dataset",
")",
",",
"'num_sessions'",
":",
"num_sessions",
",",
"'num_classes'",
":",
"len",
"(",
"target_map",
")",
",",
"'num_features'",
":",
"len",
"(",
"features",
")",
",",
"'training_accuracy'",
":",
"log",
"[",
"'train_acc'",
"]",
",",
"'training_log_loss'",
":",
"log",
"[",
"'train_loss'",
"]",
",",
"'_target_id_map'",
":",
"target_map",
",",
"'_id_target_map'",
":",
"{",
"v",
":",
"k",
"for",
"k",
",",
"v",
"in",
"target_map",
".",
"items",
"(",
")",
"}",
",",
"'_predictions_in_chunk'",
":",
"predictions_in_chunk",
",",
"'_recalibrated_batch_size'",
":",
"data_iter",
".",
"batch_size",
",",
"'batch_size'",
":",
"user_provided_batch_size",
"}",
"if",
"validation_set",
"is",
"not",
"None",
":",
"state",
"[",
"'valid_accuracy'",
"]",
"=",
"log",
"[",
"'valid_acc'",
"]",
"state",
"[",
"'valid_log_loss'",
"]",
"=",
"log",
"[",
"'valid_loss'",
"]",
"model",
"=",
"ActivityClassifier",
"(",
"state",
")",
"return",
"model"
] | Create an :class:`ActivityClassifier` model.
Parameters
----------
dataset : SFrame
Input data which consists of `sessions` of data where each session is
a sequence of data. The data must be in `stacked` format, grouped by
session. Within each session, the data is assumed to be sorted
temporally. Columns in `features` will be used to train a model that
will make a prediction using labels in the `target` column.
session_id : string
Name of the column that contains a unique ID for each session.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. Use `model.classes` to
retrieve the order in which the classes are mapped.
features : list[string], optional
Name of the columns containing the input features that will be used
for classification. If set to `None`, all columns except `session_id`
and `target` will be used.
prediction_window : int, optional
Number of time units between predictions. For example, if your input
data is sampled at 100Hz, and the `prediction_window` is set to 100,
then this model will make a prediction every 1 second.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance to
prevent the model from overfitting to the training data.
For each row of the progress table, accuracy is measured over the
provided training dataset and the `validation_set`. The format of this
SFrame must be the same as the training set.
When set to 'auto', a validation set is automatically sampled from the
training data (if the training data has > 100 sessions). If
validation_set is set to None, then all the data will be used for
training.
max_iterations : int , optional
Maximum number of iterations/epochs made over the data during the
training phase.
batch_size : int, optional
Number of sequence chunks used per training step. Must be greater than
the number of GPUs in use.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ActivityClassifier
A trained :class:`ActivityClassifier` model.
Examples
--------
.. sourcecode:: python
>>> import turicreate as tc
# Training on dummy data
>>> data = tc.SFrame({
... 'accelerometer_x': [0.1, 0.2, 0.3, 0.4, 0.5] * 10,
... 'accelerometer_y': [0.5, 0.4, 0.3, 0.2, 0.1] * 10,
... 'accelerometer_z': [0.01, 0.01, 0.02, 0.02, 0.01] * 10,
... 'session_id': [0, 0, 0] * 10 + [1, 1] * 10,
... 'activity': ['walk', 'run', 'run'] * 10 + ['swim', 'swim'] * 10
... })
# Create an activity classifier
>>> model = tc.activity_classifier.create(data,
... session_id='session_id', target='activity',
... features=['accelerometer_x', 'accelerometer_y', 'accelerometer_z'])
# Make predictions (as probability vector, or class)
>>> predictions = model.predict(data)
>>> predictions = model.predict(data, output_type='probability_vector')
# Get both predictions and classes together
>>> predictions = model.classify(data)
# Get topk predictions (instead of only top-1) if your labels have more
# 2 classes
>>> predictions = model.predict_topk(data, k = 3)
# Evaluate the model
>>> results = model.evaluate(data)
See Also
--------
ActivityClassifier, util.random_split_by_session | [
"Create",
"an",
":",
"class",
":",
"ActivityClassifier",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py#L33-L286 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py | _encode_target | def _encode_target(data, target, mapping=None):
""" Encode targets to integers in [0, num_classes - 1] """
if mapping is None:
mapping = {t: i for i, t in enumerate(sorted(data[target].unique()))}
data[target] = data[target].apply(lambda t: mapping[t])
return data, mapping | python | def _encode_target(data, target, mapping=None):
""" Encode targets to integers in [0, num_classes - 1] """
if mapping is None:
mapping = {t: i for i, t in enumerate(sorted(data[target].unique()))}
data[target] = data[target].apply(lambda t: mapping[t])
return data, mapping | [
"def",
"_encode_target",
"(",
"data",
",",
"target",
",",
"mapping",
"=",
"None",
")",
":",
"if",
"mapping",
"is",
"None",
":",
"mapping",
"=",
"{",
"t",
":",
"i",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"sorted",
"(",
"data",
"[",
"target",
"]",
".",
"unique",
"(",
")",
")",
")",
"}",
"data",
"[",
"target",
"]",
"=",
"data",
"[",
"target",
"]",
".",
"apply",
"(",
"lambda",
"t",
":",
"mapping",
"[",
"t",
"]",
")",
"return",
"data",
",",
"mapping"
] | Encode targets to integers in [0, num_classes - 1] | [
"Encode",
"targets",
"to",
"integers",
"in",
"[",
"0",
"num_classes",
"-",
"1",
"]"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py#L289-L295 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py | ActivityClassifier.export_coreml | def export_coreml(self, filename):
"""
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml("MyModel.mlmodel")
"""
import coremltools as _cmt
import mxnet as _mx
from ._mx_model_architecture import _net_params
prob_name = self.target + 'Probability'
label_name = self.target
input_features = [
('features', _cmt.models.datatypes.Array(*(1, self.prediction_window, self.num_features)))
]
output_features = [
(prob_name, _cmt.models.datatypes.Array(*(self.num_classes,)))
]
model_params = self._pred_model.get_params()
weights = {k: v.asnumpy() for k, v in model_params[0].items()}
weights = _mx.rnn.LSTMCell(num_hidden=_net_params['lstm_h']).unpack_weights(weights)
moving_weights = {k: v.asnumpy() for k, v in model_params[1].items()}
builder = _cmt.models.neural_network.NeuralNetworkBuilder(
input_features,
output_features,
mode='classifier'
)
# Conv
# (1,1,W,C) -> (1,C,1,W)
builder.add_permute(name='permute_layer', dim=(0, 3, 1, 2),
input_name='features', output_name='conv_in')
W = _np.expand_dims(weights['conv_weight'], axis=0).transpose((2, 3, 1, 0))
builder.add_convolution(name='conv_layer',
kernel_channels=self.num_features,
output_channels=_net_params['conv_h'],
height=1, width=self.prediction_window,
stride_height=1, stride_width=self.prediction_window,
border_mode='valid', groups=1,
W=W, b=weights['conv_bias'], has_bias=True,
input_name='conv_in', output_name='relu0_in')
builder.add_activation(name='relu_layer0', non_linearity='RELU',
input_name='relu0_in', output_name='lstm_in')
# LSTM
builder.add_optionals([('lstm_h_in', _net_params['lstm_h']),
('lstm_c_in', _net_params['lstm_h'])],
[('lstm_h_out', _net_params['lstm_h']),
('lstm_c_out', _net_params['lstm_h'])])
W_x = [weights['lstm_i2h_i_weight'], weights['lstm_i2h_f_weight'],
weights['lstm_i2h_o_weight'], weights['lstm_i2h_c_weight']]
W_h = [weights['lstm_h2h_i_weight'], weights['lstm_h2h_f_weight'],
weights['lstm_h2h_o_weight'], weights['lstm_h2h_c_weight']]
bias = [weights['lstm_h2h_i_bias'], weights['lstm_h2h_f_bias'],
weights['lstm_h2h_o_bias'], weights['lstm_h2h_c_bias']]
builder.add_unilstm(name='lstm_layer',
W_h=W_h, W_x=W_x, b=bias,
input_size=_net_params['conv_h'],
hidden_size=_net_params['lstm_h'],
input_names=['lstm_in', 'lstm_h_in', 'lstm_c_in'],
output_names=['dense0_in', 'lstm_h_out', 'lstm_c_out'],
inner_activation='SIGMOID')
# Dense
builder.add_inner_product(name='dense_layer',
W=weights['dense0_weight'], b=weights['dense0_bias'],
input_channels=_net_params['lstm_h'],
output_channels=_net_params['dense_h'],
has_bias=True,
input_name='dense0_in',
output_name='bn_in')
builder.add_batchnorm(name='bn_layer',
channels=_net_params['dense_h'],
gamma=weights['bn_gamma'], beta=weights['bn_beta'],
mean=moving_weights['bn_moving_mean'],
variance=moving_weights['bn_moving_var'],
input_name='bn_in', output_name='relu1_in',
epsilon=0.001)
builder.add_activation(name='relu_layer1', non_linearity='RELU',
input_name='relu1_in', output_name='dense1_in')
# Softmax
builder.add_inner_product(name='dense_layer1',
W=weights['dense1_weight'], b=weights['dense1_bias'],
has_bias=True,
input_channels=_net_params['dense_h'],
output_channels=self.num_classes,
input_name='dense1_in', output_name='softmax_in')
builder.add_softmax(name=prob_name,
input_name='softmax_in',
output_name=prob_name)
labels = list(map(str, sorted(self._target_id_map.keys())))
builder.set_class_labels(labels)
mlmodel = _cmt.models.MLModel(builder.spec)
model_type = 'activity classifier'
mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)
# Add useful information to the mlmodel
features_str = ', '.join(self.features)
mlmodel.input_description['features'] = u'Window \xd7 [%s]' % features_str
mlmodel.input_description['lstm_h_in'] = 'LSTM hidden state input'
mlmodel.input_description['lstm_c_in'] = 'LSTM cell state input'
mlmodel.output_description[prob_name] = 'Activity prediction probabilities'
mlmodel.output_description['classLabel'] = 'Class label of top prediction'
mlmodel.output_description['lstm_h_out'] = 'LSTM hidden state output'
mlmodel.output_description['lstm_c_out'] = 'LSTM cell state output'
_coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {
'prediction_window': str(self.prediction_window),
'session_id': self.session_id,
'target': self.target,
'features': ','.join(self.features),
'max_iterations': str(self.max_iterations),
}, version=ActivityClassifier._PYTHON_ACTIVITY_CLASSIFIER_VERSION)
spec = mlmodel.get_spec()
_cmt.models.utils.rename_feature(spec, 'classLabel', label_name)
_cmt.models.utils.rename_feature(spec, 'lstm_h_in', 'hiddenIn')
_cmt.models.utils.rename_feature(spec, 'lstm_c_in', 'cellIn')
_cmt.models.utils.rename_feature(spec, 'lstm_h_out', 'hiddenOut')
_cmt.models.utils.rename_feature(spec, 'lstm_c_out', 'cellOut')
_cmt.utils.save_spec(spec, filename) | python | def export_coreml(self, filename):
"""
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml("MyModel.mlmodel")
"""
import coremltools as _cmt
import mxnet as _mx
from ._mx_model_architecture import _net_params
prob_name = self.target + 'Probability'
label_name = self.target
input_features = [
('features', _cmt.models.datatypes.Array(*(1, self.prediction_window, self.num_features)))
]
output_features = [
(prob_name, _cmt.models.datatypes.Array(*(self.num_classes,)))
]
model_params = self._pred_model.get_params()
weights = {k: v.asnumpy() for k, v in model_params[0].items()}
weights = _mx.rnn.LSTMCell(num_hidden=_net_params['lstm_h']).unpack_weights(weights)
moving_weights = {k: v.asnumpy() for k, v in model_params[1].items()}
builder = _cmt.models.neural_network.NeuralNetworkBuilder(
input_features,
output_features,
mode='classifier'
)
# Conv
# (1,1,W,C) -> (1,C,1,W)
builder.add_permute(name='permute_layer', dim=(0, 3, 1, 2),
input_name='features', output_name='conv_in')
W = _np.expand_dims(weights['conv_weight'], axis=0).transpose((2, 3, 1, 0))
builder.add_convolution(name='conv_layer',
kernel_channels=self.num_features,
output_channels=_net_params['conv_h'],
height=1, width=self.prediction_window,
stride_height=1, stride_width=self.prediction_window,
border_mode='valid', groups=1,
W=W, b=weights['conv_bias'], has_bias=True,
input_name='conv_in', output_name='relu0_in')
builder.add_activation(name='relu_layer0', non_linearity='RELU',
input_name='relu0_in', output_name='lstm_in')
# LSTM
builder.add_optionals([('lstm_h_in', _net_params['lstm_h']),
('lstm_c_in', _net_params['lstm_h'])],
[('lstm_h_out', _net_params['lstm_h']),
('lstm_c_out', _net_params['lstm_h'])])
W_x = [weights['lstm_i2h_i_weight'], weights['lstm_i2h_f_weight'],
weights['lstm_i2h_o_weight'], weights['lstm_i2h_c_weight']]
W_h = [weights['lstm_h2h_i_weight'], weights['lstm_h2h_f_weight'],
weights['lstm_h2h_o_weight'], weights['lstm_h2h_c_weight']]
bias = [weights['lstm_h2h_i_bias'], weights['lstm_h2h_f_bias'],
weights['lstm_h2h_o_bias'], weights['lstm_h2h_c_bias']]
builder.add_unilstm(name='lstm_layer',
W_h=W_h, W_x=W_x, b=bias,
input_size=_net_params['conv_h'],
hidden_size=_net_params['lstm_h'],
input_names=['lstm_in', 'lstm_h_in', 'lstm_c_in'],
output_names=['dense0_in', 'lstm_h_out', 'lstm_c_out'],
inner_activation='SIGMOID')
# Dense
builder.add_inner_product(name='dense_layer',
W=weights['dense0_weight'], b=weights['dense0_bias'],
input_channels=_net_params['lstm_h'],
output_channels=_net_params['dense_h'],
has_bias=True,
input_name='dense0_in',
output_name='bn_in')
builder.add_batchnorm(name='bn_layer',
channels=_net_params['dense_h'],
gamma=weights['bn_gamma'], beta=weights['bn_beta'],
mean=moving_weights['bn_moving_mean'],
variance=moving_weights['bn_moving_var'],
input_name='bn_in', output_name='relu1_in',
epsilon=0.001)
builder.add_activation(name='relu_layer1', non_linearity='RELU',
input_name='relu1_in', output_name='dense1_in')
# Softmax
builder.add_inner_product(name='dense_layer1',
W=weights['dense1_weight'], b=weights['dense1_bias'],
has_bias=True,
input_channels=_net_params['dense_h'],
output_channels=self.num_classes,
input_name='dense1_in', output_name='softmax_in')
builder.add_softmax(name=prob_name,
input_name='softmax_in',
output_name=prob_name)
labels = list(map(str, sorted(self._target_id_map.keys())))
builder.set_class_labels(labels)
mlmodel = _cmt.models.MLModel(builder.spec)
model_type = 'activity classifier'
mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)
# Add useful information to the mlmodel
features_str = ', '.join(self.features)
mlmodel.input_description['features'] = u'Window \xd7 [%s]' % features_str
mlmodel.input_description['lstm_h_in'] = 'LSTM hidden state input'
mlmodel.input_description['lstm_c_in'] = 'LSTM cell state input'
mlmodel.output_description[prob_name] = 'Activity prediction probabilities'
mlmodel.output_description['classLabel'] = 'Class label of top prediction'
mlmodel.output_description['lstm_h_out'] = 'LSTM hidden state output'
mlmodel.output_description['lstm_c_out'] = 'LSTM cell state output'
_coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {
'prediction_window': str(self.prediction_window),
'session_id': self.session_id,
'target': self.target,
'features': ','.join(self.features),
'max_iterations': str(self.max_iterations),
}, version=ActivityClassifier._PYTHON_ACTIVITY_CLASSIFIER_VERSION)
spec = mlmodel.get_spec()
_cmt.models.utils.rename_feature(spec, 'classLabel', label_name)
_cmt.models.utils.rename_feature(spec, 'lstm_h_in', 'hiddenIn')
_cmt.models.utils.rename_feature(spec, 'lstm_c_in', 'cellIn')
_cmt.models.utils.rename_feature(spec, 'lstm_h_out', 'hiddenOut')
_cmt.models.utils.rename_feature(spec, 'lstm_c_out', 'cellOut')
_cmt.utils.save_spec(spec, filename) | [
"def",
"export_coreml",
"(",
"self",
",",
"filename",
")",
":",
"import",
"coremltools",
"as",
"_cmt",
"import",
"mxnet",
"as",
"_mx",
"from",
".",
"_mx_model_architecture",
"import",
"_net_params",
"prob_name",
"=",
"self",
".",
"target",
"+",
"'Probability'",
"label_name",
"=",
"self",
".",
"target",
"input_features",
"=",
"[",
"(",
"'features'",
",",
"_cmt",
".",
"models",
".",
"datatypes",
".",
"Array",
"(",
"*",
"(",
"1",
",",
"self",
".",
"prediction_window",
",",
"self",
".",
"num_features",
")",
")",
")",
"]",
"output_features",
"=",
"[",
"(",
"prob_name",
",",
"_cmt",
".",
"models",
".",
"datatypes",
".",
"Array",
"(",
"*",
"(",
"self",
".",
"num_classes",
",",
")",
")",
")",
"]",
"model_params",
"=",
"self",
".",
"_pred_model",
".",
"get_params",
"(",
")",
"weights",
"=",
"{",
"k",
":",
"v",
".",
"asnumpy",
"(",
")",
"for",
"k",
",",
"v",
"in",
"model_params",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
"weights",
"=",
"_mx",
".",
"rnn",
".",
"LSTMCell",
"(",
"num_hidden",
"=",
"_net_params",
"[",
"'lstm_h'",
"]",
")",
".",
"unpack_weights",
"(",
"weights",
")",
"moving_weights",
"=",
"{",
"k",
":",
"v",
".",
"asnumpy",
"(",
")",
"for",
"k",
",",
"v",
"in",
"model_params",
"[",
"1",
"]",
".",
"items",
"(",
")",
"}",
"builder",
"=",
"_cmt",
".",
"models",
".",
"neural_network",
".",
"NeuralNetworkBuilder",
"(",
"input_features",
",",
"output_features",
",",
"mode",
"=",
"'classifier'",
")",
"# Conv",
"# (1,1,W,C) -> (1,C,1,W)",
"builder",
".",
"add_permute",
"(",
"name",
"=",
"'permute_layer'",
",",
"dim",
"=",
"(",
"0",
",",
"3",
",",
"1",
",",
"2",
")",
",",
"input_name",
"=",
"'features'",
",",
"output_name",
"=",
"'conv_in'",
")",
"W",
"=",
"_np",
".",
"expand_dims",
"(",
"weights",
"[",
"'conv_weight'",
"]",
",",
"axis",
"=",
"0",
")",
".",
"transpose",
"(",
"(",
"2",
",",
"3",
",",
"1",
",",
"0",
")",
")",
"builder",
".",
"add_convolution",
"(",
"name",
"=",
"'conv_layer'",
",",
"kernel_channels",
"=",
"self",
".",
"num_features",
",",
"output_channels",
"=",
"_net_params",
"[",
"'conv_h'",
"]",
",",
"height",
"=",
"1",
",",
"width",
"=",
"self",
".",
"prediction_window",
",",
"stride_height",
"=",
"1",
",",
"stride_width",
"=",
"self",
".",
"prediction_window",
",",
"border_mode",
"=",
"'valid'",
",",
"groups",
"=",
"1",
",",
"W",
"=",
"W",
",",
"b",
"=",
"weights",
"[",
"'conv_bias'",
"]",
",",
"has_bias",
"=",
"True",
",",
"input_name",
"=",
"'conv_in'",
",",
"output_name",
"=",
"'relu0_in'",
")",
"builder",
".",
"add_activation",
"(",
"name",
"=",
"'relu_layer0'",
",",
"non_linearity",
"=",
"'RELU'",
",",
"input_name",
"=",
"'relu0_in'",
",",
"output_name",
"=",
"'lstm_in'",
")",
"# LSTM",
"builder",
".",
"add_optionals",
"(",
"[",
"(",
"'lstm_h_in'",
",",
"_net_params",
"[",
"'lstm_h'",
"]",
")",
",",
"(",
"'lstm_c_in'",
",",
"_net_params",
"[",
"'lstm_h'",
"]",
")",
"]",
",",
"[",
"(",
"'lstm_h_out'",
",",
"_net_params",
"[",
"'lstm_h'",
"]",
")",
",",
"(",
"'lstm_c_out'",
",",
"_net_params",
"[",
"'lstm_h'",
"]",
")",
"]",
")",
"W_x",
"=",
"[",
"weights",
"[",
"'lstm_i2h_i_weight'",
"]",
",",
"weights",
"[",
"'lstm_i2h_f_weight'",
"]",
",",
"weights",
"[",
"'lstm_i2h_o_weight'",
"]",
",",
"weights",
"[",
"'lstm_i2h_c_weight'",
"]",
"]",
"W_h",
"=",
"[",
"weights",
"[",
"'lstm_h2h_i_weight'",
"]",
",",
"weights",
"[",
"'lstm_h2h_f_weight'",
"]",
",",
"weights",
"[",
"'lstm_h2h_o_weight'",
"]",
",",
"weights",
"[",
"'lstm_h2h_c_weight'",
"]",
"]",
"bias",
"=",
"[",
"weights",
"[",
"'lstm_h2h_i_bias'",
"]",
",",
"weights",
"[",
"'lstm_h2h_f_bias'",
"]",
",",
"weights",
"[",
"'lstm_h2h_o_bias'",
"]",
",",
"weights",
"[",
"'lstm_h2h_c_bias'",
"]",
"]",
"builder",
".",
"add_unilstm",
"(",
"name",
"=",
"'lstm_layer'",
",",
"W_h",
"=",
"W_h",
",",
"W_x",
"=",
"W_x",
",",
"b",
"=",
"bias",
",",
"input_size",
"=",
"_net_params",
"[",
"'conv_h'",
"]",
",",
"hidden_size",
"=",
"_net_params",
"[",
"'lstm_h'",
"]",
",",
"input_names",
"=",
"[",
"'lstm_in'",
",",
"'lstm_h_in'",
",",
"'lstm_c_in'",
"]",
",",
"output_names",
"=",
"[",
"'dense0_in'",
",",
"'lstm_h_out'",
",",
"'lstm_c_out'",
"]",
",",
"inner_activation",
"=",
"'SIGMOID'",
")",
"# Dense",
"builder",
".",
"add_inner_product",
"(",
"name",
"=",
"'dense_layer'",
",",
"W",
"=",
"weights",
"[",
"'dense0_weight'",
"]",
",",
"b",
"=",
"weights",
"[",
"'dense0_bias'",
"]",
",",
"input_channels",
"=",
"_net_params",
"[",
"'lstm_h'",
"]",
",",
"output_channels",
"=",
"_net_params",
"[",
"'dense_h'",
"]",
",",
"has_bias",
"=",
"True",
",",
"input_name",
"=",
"'dense0_in'",
",",
"output_name",
"=",
"'bn_in'",
")",
"builder",
".",
"add_batchnorm",
"(",
"name",
"=",
"'bn_layer'",
",",
"channels",
"=",
"_net_params",
"[",
"'dense_h'",
"]",
",",
"gamma",
"=",
"weights",
"[",
"'bn_gamma'",
"]",
",",
"beta",
"=",
"weights",
"[",
"'bn_beta'",
"]",
",",
"mean",
"=",
"moving_weights",
"[",
"'bn_moving_mean'",
"]",
",",
"variance",
"=",
"moving_weights",
"[",
"'bn_moving_var'",
"]",
",",
"input_name",
"=",
"'bn_in'",
",",
"output_name",
"=",
"'relu1_in'",
",",
"epsilon",
"=",
"0.001",
")",
"builder",
".",
"add_activation",
"(",
"name",
"=",
"'relu_layer1'",
",",
"non_linearity",
"=",
"'RELU'",
",",
"input_name",
"=",
"'relu1_in'",
",",
"output_name",
"=",
"'dense1_in'",
")",
"# Softmax",
"builder",
".",
"add_inner_product",
"(",
"name",
"=",
"'dense_layer1'",
",",
"W",
"=",
"weights",
"[",
"'dense1_weight'",
"]",
",",
"b",
"=",
"weights",
"[",
"'dense1_bias'",
"]",
",",
"has_bias",
"=",
"True",
",",
"input_channels",
"=",
"_net_params",
"[",
"'dense_h'",
"]",
",",
"output_channels",
"=",
"self",
".",
"num_classes",
",",
"input_name",
"=",
"'dense1_in'",
",",
"output_name",
"=",
"'softmax_in'",
")",
"builder",
".",
"add_softmax",
"(",
"name",
"=",
"prob_name",
",",
"input_name",
"=",
"'softmax_in'",
",",
"output_name",
"=",
"prob_name",
")",
"labels",
"=",
"list",
"(",
"map",
"(",
"str",
",",
"sorted",
"(",
"self",
".",
"_target_id_map",
".",
"keys",
"(",
")",
")",
")",
")",
"builder",
".",
"set_class_labels",
"(",
"labels",
")",
"mlmodel",
"=",
"_cmt",
".",
"models",
".",
"MLModel",
"(",
"builder",
".",
"spec",
")",
"model_type",
"=",
"'activity classifier'",
"mlmodel",
".",
"short_description",
"=",
"_coreml_utils",
".",
"_mlmodel_short_description",
"(",
"model_type",
")",
"# Add useful information to the mlmodel",
"features_str",
"=",
"', '",
".",
"join",
"(",
"self",
".",
"features",
")",
"mlmodel",
".",
"input_description",
"[",
"'features'",
"]",
"=",
"u'Window \\xd7 [%s]'",
"%",
"features_str",
"mlmodel",
".",
"input_description",
"[",
"'lstm_h_in'",
"]",
"=",
"'LSTM hidden state input'",
"mlmodel",
".",
"input_description",
"[",
"'lstm_c_in'",
"]",
"=",
"'LSTM cell state input'",
"mlmodel",
".",
"output_description",
"[",
"prob_name",
"]",
"=",
"'Activity prediction probabilities'",
"mlmodel",
".",
"output_description",
"[",
"'classLabel'",
"]",
"=",
"'Class label of top prediction'",
"mlmodel",
".",
"output_description",
"[",
"'lstm_h_out'",
"]",
"=",
"'LSTM hidden state output'",
"mlmodel",
".",
"output_description",
"[",
"'lstm_c_out'",
"]",
"=",
"'LSTM cell state output'",
"_coreml_utils",
".",
"_set_model_metadata",
"(",
"mlmodel",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"{",
"'prediction_window'",
":",
"str",
"(",
"self",
".",
"prediction_window",
")",
",",
"'session_id'",
":",
"self",
".",
"session_id",
",",
"'target'",
":",
"self",
".",
"target",
",",
"'features'",
":",
"','",
".",
"join",
"(",
"self",
".",
"features",
")",
",",
"'max_iterations'",
":",
"str",
"(",
"self",
".",
"max_iterations",
")",
",",
"}",
",",
"version",
"=",
"ActivityClassifier",
".",
"_PYTHON_ACTIVITY_CLASSIFIER_VERSION",
")",
"spec",
"=",
"mlmodel",
".",
"get_spec",
"(",
")",
"_cmt",
".",
"models",
".",
"utils",
".",
"rename_feature",
"(",
"spec",
",",
"'classLabel'",
",",
"label_name",
")",
"_cmt",
".",
"models",
".",
"utils",
".",
"rename_feature",
"(",
"spec",
",",
"'lstm_h_in'",
",",
"'hiddenIn'",
")",
"_cmt",
".",
"models",
".",
"utils",
".",
"rename_feature",
"(",
"spec",
",",
"'lstm_c_in'",
",",
"'cellIn'",
")",
"_cmt",
".",
"models",
".",
"utils",
".",
"rename_feature",
"(",
"spec",
",",
"'lstm_h_out'",
",",
"'hiddenOut'",
")",
"_cmt",
".",
"models",
".",
"utils",
".",
"rename_feature",
"(",
"spec",
",",
"'lstm_c_out'",
",",
"'cellOut'",
")",
"_cmt",
".",
"utils",
".",
"save_spec",
"(",
"spec",
",",
"filename",
")"
] | Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml("MyModel.mlmodel") | [
"Export",
"the",
"model",
"in",
"Core",
"ML",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py#L351-L485 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py | ActivityClassifier.predict | def predict(self, dataset, output_type='class', output_frequency='per_row'):
"""
Return predictions for ``dataset``, using the trained activity classifier.
Predictions can be generated as class labels, or as a probability
vector with probabilities for each class.
The activity classifier generates a single prediction for each
``prediction_window`` rows in ``dataset``, per ``session_id``. Thus the
number of predictions is smaller than the length of ``dataset``. By
default each prediction is replicated by ``prediction_window`` to return
a prediction for each row of ``dataset``. Use ``output_frequency`` to
get the unreplicated predictions.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'class', 'probability_vector'}, optional
Form of each prediction which is one of:
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. This returns the class with maximum
probability.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
- 'per_row': Convenience option to make sure the number of
predictions match the number of rows in the dataset. Each
prediction from the model is repeated ``prediction_window``
times during that window.
Returns
-------
out : SArray | SFrame
If ``output_frequency`` is 'per_row' return an SArray with predictions
for each row in ``dataset``.
If ``output_frequency`` is 'per_window' return an SFrame with
predictions for ``prediction_window`` rows in ``dataset``.
See Also
----------
create, evaluate, classify
Examples
--------
.. sourcecode:: python
# One prediction per row
>>> probability_predictions = model.predict(
... data, output_type='probability_vector', output_frequency='per_row')[:4]
>>> probability_predictions
dtype: array
Rows: 4
[array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),
array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),
array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),
array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086])]
# One prediction per window
>>> class_predictions = model.predict(
... data, output_type='class', output_frequency='per_window')
>>> class_predictions
+---------------+------------+-----+
| prediction_id | session_id |class|
+---------------+------------+-----+
| 0 | 3 | 5 |
| 1 | 3 | 5 |
| 2 | 3 | 5 |
| 3 | 3 | 5 |
| 4 | 3 | 5 |
| 5 | 3 | 5 |
| 6 | 3 | 5 |
| 7 | 3 | 4 |
| 8 | 3 | 4 |
| 9 | 3 | 4 |
| ... | ... | ... |
+---------------+------------+-----+
"""
_tkutl._raise_error_if_not_sframe(dataset, 'dataset')
_tkutl._check_categorical_option_type(
'output_frequency', output_frequency, ['per_window', 'per_row'])
_tkutl._check_categorical_option_type(
'output_type', output_type, ['probability_vector', 'class'])
from ._sframe_sequence_iterator import SFrameSequenceIter as _SFrameSequenceIter
from ._sframe_sequence_iterator import prep_data as _prep_data
from ._sframe_sequence_iterator import _ceil_dev
from ._mx_model_architecture import _net_params
from ._mps_model_architecture import _define_model_mps, _predict_mps
from .._mps_utils import (use_mps as _use_mps,
ac_weights_mxnet_to_mps as _ac_weights_mxnet_to_mps,)
from .._mxnet import _mxnet_utils
prediction_window = self.prediction_window
chunked_dataset, num_sessions = _prep_data(dataset, self.features, self.session_id, prediction_window,
self._predictions_in_chunk, verbose=False)
# Decide whether to use MPS GPU, MXnet GPU or CPU
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=num_sessions)
use_mps = _use_mps() and num_mxnet_gpus == 0
data_iter = _SFrameSequenceIter(chunked_dataset, len(self.features),
prediction_window, self._predictions_in_chunk,
self._recalibrated_batch_size, use_pad=True, mx_output=not use_mps)
if use_mps:
arg_params, aux_params = self._pred_model.get_params()
mps_params = _ac_weights_mxnet_to_mps(arg_params, aux_params, _net_params['lstm_h'])
mps_pred_model = _define_model_mps(self.batch_size, len(self.features), len(self._target_id_map),
prediction_window, self._predictions_in_chunk, is_prediction_model=True)
mps_pred_model.load(mps_params)
preds = _predict_mps(mps_pred_model, data_iter)
else:
preds = self._pred_model.predict(data_iter).asnumpy()
chunked_data = data_iter.dataset
if output_frequency == 'per_row':
# Replicate each prediction times prediction_window
preds = preds.repeat(prediction_window, axis=1)
# Remove predictions for padded rows
unpadded_len = chunked_data['chunk_len'].to_numpy()
preds = [p[:unpadded_len[i]] for i, p in enumerate(preds)]
# Reshape from (num_of_chunks, chunk_size, num_of_classes)
# to (ceil(length / prediction_window), num_of_classes)
# chunk_size is DIFFERENT between chunks - since padding was removed.
out = _np.concatenate(preds)
out = out.reshape((-1, len(self._target_id_map)))
out = _SArray(out)
if output_type == 'class':
id_target_map = self._id_target_map
out = out.apply(lambda c: id_target_map[_np.argmax(c)])
elif output_frequency == 'per_window':
# Calculate the number of expected predictions and
# remove predictions for padded data
unpadded_len = chunked_data['chunk_len'].apply(
lambda l: _ceil_dev(l, prediction_window)).to_numpy()
preds = [list(p[:unpadded_len[i]]) for i, p in enumerate(preds)]
out = _SFrame({
self.session_id: chunked_data['session_id'],
'preds': _SArray(preds, dtype=list)
}).stack('preds', new_column_name='probability_vector')
# Calculate the prediction index per session
out = out.add_row_number(column_name='prediction_id')
start_sess_idx = out.groupby(
self.session_id, {'start_idx': _agg.MIN('prediction_id')})
start_sess_idx = start_sess_idx.unstack(
[self.session_id, 'start_idx'], new_column_name='idx')['idx'][0]
if output_type == 'class':
id_target_map = self._id_target_map
out['probability_vector'] = out['probability_vector'].apply(
lambda c: id_target_map[_np.argmax(c)])
out = out.rename({'probability_vector': 'class'})
return out | python | def predict(self, dataset, output_type='class', output_frequency='per_row'):
"""
Return predictions for ``dataset``, using the trained activity classifier.
Predictions can be generated as class labels, or as a probability
vector with probabilities for each class.
The activity classifier generates a single prediction for each
``prediction_window`` rows in ``dataset``, per ``session_id``. Thus the
number of predictions is smaller than the length of ``dataset``. By
default each prediction is replicated by ``prediction_window`` to return
a prediction for each row of ``dataset``. Use ``output_frequency`` to
get the unreplicated predictions.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'class', 'probability_vector'}, optional
Form of each prediction which is one of:
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. This returns the class with maximum
probability.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
- 'per_row': Convenience option to make sure the number of
predictions match the number of rows in the dataset. Each
prediction from the model is repeated ``prediction_window``
times during that window.
Returns
-------
out : SArray | SFrame
If ``output_frequency`` is 'per_row' return an SArray with predictions
for each row in ``dataset``.
If ``output_frequency`` is 'per_window' return an SFrame with
predictions for ``prediction_window`` rows in ``dataset``.
See Also
----------
create, evaluate, classify
Examples
--------
.. sourcecode:: python
# One prediction per row
>>> probability_predictions = model.predict(
... data, output_type='probability_vector', output_frequency='per_row')[:4]
>>> probability_predictions
dtype: array
Rows: 4
[array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),
array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),
array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),
array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086])]
# One prediction per window
>>> class_predictions = model.predict(
... data, output_type='class', output_frequency='per_window')
>>> class_predictions
+---------------+------------+-----+
| prediction_id | session_id |class|
+---------------+------------+-----+
| 0 | 3 | 5 |
| 1 | 3 | 5 |
| 2 | 3 | 5 |
| 3 | 3 | 5 |
| 4 | 3 | 5 |
| 5 | 3 | 5 |
| 6 | 3 | 5 |
| 7 | 3 | 4 |
| 8 | 3 | 4 |
| 9 | 3 | 4 |
| ... | ... | ... |
+---------------+------------+-----+
"""
_tkutl._raise_error_if_not_sframe(dataset, 'dataset')
_tkutl._check_categorical_option_type(
'output_frequency', output_frequency, ['per_window', 'per_row'])
_tkutl._check_categorical_option_type(
'output_type', output_type, ['probability_vector', 'class'])
from ._sframe_sequence_iterator import SFrameSequenceIter as _SFrameSequenceIter
from ._sframe_sequence_iterator import prep_data as _prep_data
from ._sframe_sequence_iterator import _ceil_dev
from ._mx_model_architecture import _net_params
from ._mps_model_architecture import _define_model_mps, _predict_mps
from .._mps_utils import (use_mps as _use_mps,
ac_weights_mxnet_to_mps as _ac_weights_mxnet_to_mps,)
from .._mxnet import _mxnet_utils
prediction_window = self.prediction_window
chunked_dataset, num_sessions = _prep_data(dataset, self.features, self.session_id, prediction_window,
self._predictions_in_chunk, verbose=False)
# Decide whether to use MPS GPU, MXnet GPU or CPU
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=num_sessions)
use_mps = _use_mps() and num_mxnet_gpus == 0
data_iter = _SFrameSequenceIter(chunked_dataset, len(self.features),
prediction_window, self._predictions_in_chunk,
self._recalibrated_batch_size, use_pad=True, mx_output=not use_mps)
if use_mps:
arg_params, aux_params = self._pred_model.get_params()
mps_params = _ac_weights_mxnet_to_mps(arg_params, aux_params, _net_params['lstm_h'])
mps_pred_model = _define_model_mps(self.batch_size, len(self.features), len(self._target_id_map),
prediction_window, self._predictions_in_chunk, is_prediction_model=True)
mps_pred_model.load(mps_params)
preds = _predict_mps(mps_pred_model, data_iter)
else:
preds = self._pred_model.predict(data_iter).asnumpy()
chunked_data = data_iter.dataset
if output_frequency == 'per_row':
# Replicate each prediction times prediction_window
preds = preds.repeat(prediction_window, axis=1)
# Remove predictions for padded rows
unpadded_len = chunked_data['chunk_len'].to_numpy()
preds = [p[:unpadded_len[i]] for i, p in enumerate(preds)]
# Reshape from (num_of_chunks, chunk_size, num_of_classes)
# to (ceil(length / prediction_window), num_of_classes)
# chunk_size is DIFFERENT between chunks - since padding was removed.
out = _np.concatenate(preds)
out = out.reshape((-1, len(self._target_id_map)))
out = _SArray(out)
if output_type == 'class':
id_target_map = self._id_target_map
out = out.apply(lambda c: id_target_map[_np.argmax(c)])
elif output_frequency == 'per_window':
# Calculate the number of expected predictions and
# remove predictions for padded data
unpadded_len = chunked_data['chunk_len'].apply(
lambda l: _ceil_dev(l, prediction_window)).to_numpy()
preds = [list(p[:unpadded_len[i]]) for i, p in enumerate(preds)]
out = _SFrame({
self.session_id: chunked_data['session_id'],
'preds': _SArray(preds, dtype=list)
}).stack('preds', new_column_name='probability_vector')
# Calculate the prediction index per session
out = out.add_row_number(column_name='prediction_id')
start_sess_idx = out.groupby(
self.session_id, {'start_idx': _agg.MIN('prediction_id')})
start_sess_idx = start_sess_idx.unstack(
[self.session_id, 'start_idx'], new_column_name='idx')['idx'][0]
if output_type == 'class':
id_target_map = self._id_target_map
out['probability_vector'] = out['probability_vector'].apply(
lambda c: id_target_map[_np.argmax(c)])
out = out.rename({'probability_vector': 'class'})
return out | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"'class'",
",",
"output_frequency",
"=",
"'per_row'",
")",
":",
"_tkutl",
".",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"'dataset'",
")",
"_tkutl",
".",
"_check_categorical_option_type",
"(",
"'output_frequency'",
",",
"output_frequency",
",",
"[",
"'per_window'",
",",
"'per_row'",
"]",
")",
"_tkutl",
".",
"_check_categorical_option_type",
"(",
"'output_type'",
",",
"output_type",
",",
"[",
"'probability_vector'",
",",
"'class'",
"]",
")",
"from",
".",
"_sframe_sequence_iterator",
"import",
"SFrameSequenceIter",
"as",
"_SFrameSequenceIter",
"from",
".",
"_sframe_sequence_iterator",
"import",
"prep_data",
"as",
"_prep_data",
"from",
".",
"_sframe_sequence_iterator",
"import",
"_ceil_dev",
"from",
".",
"_mx_model_architecture",
"import",
"_net_params",
"from",
".",
"_mps_model_architecture",
"import",
"_define_model_mps",
",",
"_predict_mps",
"from",
".",
".",
"_mps_utils",
"import",
"(",
"use_mps",
"as",
"_use_mps",
",",
"ac_weights_mxnet_to_mps",
"as",
"_ac_weights_mxnet_to_mps",
",",
")",
"from",
".",
".",
"_mxnet",
"import",
"_mxnet_utils",
"prediction_window",
"=",
"self",
".",
"prediction_window",
"chunked_dataset",
",",
"num_sessions",
"=",
"_prep_data",
"(",
"dataset",
",",
"self",
".",
"features",
",",
"self",
".",
"session_id",
",",
"prediction_window",
",",
"self",
".",
"_predictions_in_chunk",
",",
"verbose",
"=",
"False",
")",
"# Decide whether to use MPS GPU, MXnet GPU or CPU",
"num_mxnet_gpus",
"=",
"_mxnet_utils",
".",
"get_num_gpus_in_use",
"(",
"max_devices",
"=",
"num_sessions",
")",
"use_mps",
"=",
"_use_mps",
"(",
")",
"and",
"num_mxnet_gpus",
"==",
"0",
"data_iter",
"=",
"_SFrameSequenceIter",
"(",
"chunked_dataset",
",",
"len",
"(",
"self",
".",
"features",
")",
",",
"prediction_window",
",",
"self",
".",
"_predictions_in_chunk",
",",
"self",
".",
"_recalibrated_batch_size",
",",
"use_pad",
"=",
"True",
",",
"mx_output",
"=",
"not",
"use_mps",
")",
"if",
"use_mps",
":",
"arg_params",
",",
"aux_params",
"=",
"self",
".",
"_pred_model",
".",
"get_params",
"(",
")",
"mps_params",
"=",
"_ac_weights_mxnet_to_mps",
"(",
"arg_params",
",",
"aux_params",
",",
"_net_params",
"[",
"'lstm_h'",
"]",
")",
"mps_pred_model",
"=",
"_define_model_mps",
"(",
"self",
".",
"batch_size",
",",
"len",
"(",
"self",
".",
"features",
")",
",",
"len",
"(",
"self",
".",
"_target_id_map",
")",
",",
"prediction_window",
",",
"self",
".",
"_predictions_in_chunk",
",",
"is_prediction_model",
"=",
"True",
")",
"mps_pred_model",
".",
"load",
"(",
"mps_params",
")",
"preds",
"=",
"_predict_mps",
"(",
"mps_pred_model",
",",
"data_iter",
")",
"else",
":",
"preds",
"=",
"self",
".",
"_pred_model",
".",
"predict",
"(",
"data_iter",
")",
".",
"asnumpy",
"(",
")",
"chunked_data",
"=",
"data_iter",
".",
"dataset",
"if",
"output_frequency",
"==",
"'per_row'",
":",
"# Replicate each prediction times prediction_window",
"preds",
"=",
"preds",
".",
"repeat",
"(",
"prediction_window",
",",
"axis",
"=",
"1",
")",
"# Remove predictions for padded rows",
"unpadded_len",
"=",
"chunked_data",
"[",
"'chunk_len'",
"]",
".",
"to_numpy",
"(",
")",
"preds",
"=",
"[",
"p",
"[",
":",
"unpadded_len",
"[",
"i",
"]",
"]",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"preds",
")",
"]",
"# Reshape from (num_of_chunks, chunk_size, num_of_classes)",
"# to (ceil(length / prediction_window), num_of_classes)",
"# chunk_size is DIFFERENT between chunks - since padding was removed.",
"out",
"=",
"_np",
".",
"concatenate",
"(",
"preds",
")",
"out",
"=",
"out",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"len",
"(",
"self",
".",
"_target_id_map",
")",
")",
")",
"out",
"=",
"_SArray",
"(",
"out",
")",
"if",
"output_type",
"==",
"'class'",
":",
"id_target_map",
"=",
"self",
".",
"_id_target_map",
"out",
"=",
"out",
".",
"apply",
"(",
"lambda",
"c",
":",
"id_target_map",
"[",
"_np",
".",
"argmax",
"(",
"c",
")",
"]",
")",
"elif",
"output_frequency",
"==",
"'per_window'",
":",
"# Calculate the number of expected predictions and",
"# remove predictions for padded data",
"unpadded_len",
"=",
"chunked_data",
"[",
"'chunk_len'",
"]",
".",
"apply",
"(",
"lambda",
"l",
":",
"_ceil_dev",
"(",
"l",
",",
"prediction_window",
")",
")",
".",
"to_numpy",
"(",
")",
"preds",
"=",
"[",
"list",
"(",
"p",
"[",
":",
"unpadded_len",
"[",
"i",
"]",
"]",
")",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"preds",
")",
"]",
"out",
"=",
"_SFrame",
"(",
"{",
"self",
".",
"session_id",
":",
"chunked_data",
"[",
"'session_id'",
"]",
",",
"'preds'",
":",
"_SArray",
"(",
"preds",
",",
"dtype",
"=",
"list",
")",
"}",
")",
".",
"stack",
"(",
"'preds'",
",",
"new_column_name",
"=",
"'probability_vector'",
")",
"# Calculate the prediction index per session",
"out",
"=",
"out",
".",
"add_row_number",
"(",
"column_name",
"=",
"'prediction_id'",
")",
"start_sess_idx",
"=",
"out",
".",
"groupby",
"(",
"self",
".",
"session_id",
",",
"{",
"'start_idx'",
":",
"_agg",
".",
"MIN",
"(",
"'prediction_id'",
")",
"}",
")",
"start_sess_idx",
"=",
"start_sess_idx",
".",
"unstack",
"(",
"[",
"self",
".",
"session_id",
",",
"'start_idx'",
"]",
",",
"new_column_name",
"=",
"'idx'",
")",
"[",
"'idx'",
"]",
"[",
"0",
"]",
"if",
"output_type",
"==",
"'class'",
":",
"id_target_map",
"=",
"self",
".",
"_id_target_map",
"out",
"[",
"'probability_vector'",
"]",
"=",
"out",
"[",
"'probability_vector'",
"]",
".",
"apply",
"(",
"lambda",
"c",
":",
"id_target_map",
"[",
"_np",
".",
"argmax",
"(",
"c",
")",
"]",
")",
"out",
"=",
"out",
".",
"rename",
"(",
"{",
"'probability_vector'",
":",
"'class'",
"}",
")",
"return",
"out"
] | Return predictions for ``dataset``, using the trained activity classifier.
Predictions can be generated as class labels, or as a probability
vector with probabilities for each class.
The activity classifier generates a single prediction for each
``prediction_window`` rows in ``dataset``, per ``session_id``. Thus the
number of predictions is smaller than the length of ``dataset``. By
default each prediction is replicated by ``prediction_window`` to return
a prediction for each row of ``dataset``. Use ``output_frequency`` to
get the unreplicated predictions.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'class', 'probability_vector'}, optional
Form of each prediction which is one of:
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. This returns the class with maximum
probability.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
- 'per_row': Convenience option to make sure the number of
predictions match the number of rows in the dataset. Each
prediction from the model is repeated ``prediction_window``
times during that window.
Returns
-------
out : SArray | SFrame
If ``output_frequency`` is 'per_row' return an SArray with predictions
for each row in ``dataset``.
If ``output_frequency`` is 'per_window' return an SFrame with
predictions for ``prediction_window`` rows in ``dataset``.
See Also
----------
create, evaluate, classify
Examples
--------
.. sourcecode:: python
# One prediction per row
>>> probability_predictions = model.predict(
... data, output_type='probability_vector', output_frequency='per_row')[:4]
>>> probability_predictions
dtype: array
Rows: 4
[array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),
array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),
array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),
array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086])]
# One prediction per window
>>> class_predictions = model.predict(
... data, output_type='class', output_frequency='per_window')
>>> class_predictions
+---------------+------------+-----+
| prediction_id | session_id |class|
+---------------+------------+-----+
| 0 | 3 | 5 |
| 1 | 3 | 5 |
| 2 | 3 | 5 |
| 3 | 3 | 5 |
| 4 | 3 | 5 |
| 5 | 3 | 5 |
| 6 | 3 | 5 |
| 7 | 3 | 4 |
| 8 | 3 | 4 |
| 9 | 3 | 4 |
| ... | ... | ... |
+---------------+------------+-----+ | [
"Return",
"predictions",
"for",
"dataset",
"using",
"the",
"trained",
"activity",
"classifier",
".",
"Predictions",
"can",
"be",
"generated",
"as",
"class",
"labels",
"or",
"as",
"a",
"probability",
"vector",
"with",
"probabilities",
"for",
"each",
"class",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py#L487-L664 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py | ActivityClassifier.evaluate | def evaluate(self, dataset, metric='auto'):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the session_id, target and features used for model training.
Additional columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an
ROC curve
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict
Examples
----------
.. sourcecode:: python
>>> results = model.evaluate(data)
>>> print results['accuracy']
"""
avail_metrics = ['accuracy', 'auc', 'precision', 'recall',
'f1_score', 'log_loss', 'confusion_matrix', 'roc_curve']
_tkutl._check_categorical_option_type(
'metric', metric, avail_metrics + ['auto'])
if metric == 'auto':
metrics = avail_metrics
else:
metrics = [metric]
probs = self.predict(dataset, output_type='probability_vector')
classes = self.predict(dataset, output_type='class')
ret = {}
if 'accuracy' in metrics:
ret['accuracy'] = _evaluation.accuracy(dataset[self.target], classes)
if 'auc' in metrics:
ret['auc'] = _evaluation.auc(dataset[self.target], probs, index_map=self._target_id_map)
if 'precision' in metrics:
ret['precision'] = _evaluation.precision(dataset[self.target], classes)
if 'recall' in metrics:
ret['recall'] = _evaluation.recall(dataset[self.target], classes)
if 'f1_score' in metrics:
ret['f1_score'] = _evaluation.f1_score(dataset[self.target], classes)
if 'log_loss' in metrics:
ret['log_loss'] = _evaluation.log_loss(dataset[self.target], probs, index_map=self._target_id_map)
if 'confusion_matrix' in metrics:
ret['confusion_matrix'] = _evaluation.confusion_matrix(dataset[self.target], classes)
if 'roc_curve' in metrics:
ret['roc_curve'] = _evaluation.roc_curve(dataset[self.target], probs, index_map=self._target_id_map)
return ret | python | def evaluate(self, dataset, metric='auto'):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the session_id, target and features used for model training.
Additional columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an
ROC curve
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict
Examples
----------
.. sourcecode:: python
>>> results = model.evaluate(data)
>>> print results['accuracy']
"""
avail_metrics = ['accuracy', 'auc', 'precision', 'recall',
'f1_score', 'log_loss', 'confusion_matrix', 'roc_curve']
_tkutl._check_categorical_option_type(
'metric', metric, avail_metrics + ['auto'])
if metric == 'auto':
metrics = avail_metrics
else:
metrics = [metric]
probs = self.predict(dataset, output_type='probability_vector')
classes = self.predict(dataset, output_type='class')
ret = {}
if 'accuracy' in metrics:
ret['accuracy'] = _evaluation.accuracy(dataset[self.target], classes)
if 'auc' in metrics:
ret['auc'] = _evaluation.auc(dataset[self.target], probs, index_map=self._target_id_map)
if 'precision' in metrics:
ret['precision'] = _evaluation.precision(dataset[self.target], classes)
if 'recall' in metrics:
ret['recall'] = _evaluation.recall(dataset[self.target], classes)
if 'f1_score' in metrics:
ret['f1_score'] = _evaluation.f1_score(dataset[self.target], classes)
if 'log_loss' in metrics:
ret['log_loss'] = _evaluation.log_loss(dataset[self.target], probs, index_map=self._target_id_map)
if 'confusion_matrix' in metrics:
ret['confusion_matrix'] = _evaluation.confusion_matrix(dataset[self.target], classes)
if 'roc_curve' in metrics:
ret['roc_curve'] = _evaluation.roc_curve(dataset[self.target], probs, index_map=self._target_id_map)
return ret | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
")",
":",
"avail_metrics",
"=",
"[",
"'accuracy'",
",",
"'auc'",
",",
"'precision'",
",",
"'recall'",
",",
"'f1_score'",
",",
"'log_loss'",
",",
"'confusion_matrix'",
",",
"'roc_curve'",
"]",
"_tkutl",
".",
"_check_categorical_option_type",
"(",
"'metric'",
",",
"metric",
",",
"avail_metrics",
"+",
"[",
"'auto'",
"]",
")",
"if",
"metric",
"==",
"'auto'",
":",
"metrics",
"=",
"avail_metrics",
"else",
":",
"metrics",
"=",
"[",
"metric",
"]",
"probs",
"=",
"self",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"'probability_vector'",
")",
"classes",
"=",
"self",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"'class'",
")",
"ret",
"=",
"{",
"}",
"if",
"'accuracy'",
"in",
"metrics",
":",
"ret",
"[",
"'accuracy'",
"]",
"=",
"_evaluation",
".",
"accuracy",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"classes",
")",
"if",
"'auc'",
"in",
"metrics",
":",
"ret",
"[",
"'auc'",
"]",
"=",
"_evaluation",
".",
"auc",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"probs",
",",
"index_map",
"=",
"self",
".",
"_target_id_map",
")",
"if",
"'precision'",
"in",
"metrics",
":",
"ret",
"[",
"'precision'",
"]",
"=",
"_evaluation",
".",
"precision",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"classes",
")",
"if",
"'recall'",
"in",
"metrics",
":",
"ret",
"[",
"'recall'",
"]",
"=",
"_evaluation",
".",
"recall",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"classes",
")",
"if",
"'f1_score'",
"in",
"metrics",
":",
"ret",
"[",
"'f1_score'",
"]",
"=",
"_evaluation",
".",
"f1_score",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"classes",
")",
"if",
"'log_loss'",
"in",
"metrics",
":",
"ret",
"[",
"'log_loss'",
"]",
"=",
"_evaluation",
".",
"log_loss",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"probs",
",",
"index_map",
"=",
"self",
".",
"_target_id_map",
")",
"if",
"'confusion_matrix'",
"in",
"metrics",
":",
"ret",
"[",
"'confusion_matrix'",
"]",
"=",
"_evaluation",
".",
"confusion_matrix",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"classes",
")",
"if",
"'roc_curve'",
"in",
"metrics",
":",
"ret",
"[",
"'roc_curve'",
"]",
"=",
"_evaluation",
".",
"roc_curve",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"probs",
",",
"index_map",
"=",
"self",
".",
"_target_id_map",
")",
"return",
"ret"
] | Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the session_id, target and features used for model training.
Additional columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an
ROC curve
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict
Examples
----------
.. sourcecode:: python
>>> results = model.evaluate(data)
>>> print results['accuracy'] | [
"Evaluate",
"the",
"model",
"by",
"making",
"predictions",
"of",
"target",
"values",
"and",
"comparing",
"these",
"to",
"actual",
"values",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py#L666-L743 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py | ActivityClassifier.classify | def classify(self, dataset, output_frequency='per_row'):
"""
Return a classification, for each ``prediction_window`` examples in the
``dataset``, using the trained activity classification model. The output
SFrame contains predictions as both class labels as well as probabilities
that the predicted value is the associated label.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features and session id used for model training, but
does not require a target column. Additional columns are ignored.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_row': Each prediction is returned ``prediction_window`` times.
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities.
See Also
----------
create, evaluate, predict
Examples
----------
>>> classes = model.classify(data)
"""
_tkutl._check_categorical_option_type(
'output_frequency', output_frequency, ['per_window', 'per_row'])
id_target_map = self._id_target_map
preds = self.predict(
dataset, output_type='probability_vector', output_frequency=output_frequency)
if output_frequency == 'per_row':
return _SFrame({
'class': preds.apply(lambda p: id_target_map[_np.argmax(p)]),
'probability': preds.apply(_np.max)
})
elif output_frequency == 'per_window':
preds['class'] = preds['probability_vector'].apply(
lambda p: id_target_map[_np.argmax(p)])
preds['probability'] = preds['probability_vector'].apply(_np.max)
preds = preds.remove_column('probability_vector')
return preds | python | def classify(self, dataset, output_frequency='per_row'):
"""
Return a classification, for each ``prediction_window`` examples in the
``dataset``, using the trained activity classification model. The output
SFrame contains predictions as both class labels as well as probabilities
that the predicted value is the associated label.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features and session id used for model training, but
does not require a target column. Additional columns are ignored.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_row': Each prediction is returned ``prediction_window`` times.
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities.
See Also
----------
create, evaluate, predict
Examples
----------
>>> classes = model.classify(data)
"""
_tkutl._check_categorical_option_type(
'output_frequency', output_frequency, ['per_window', 'per_row'])
id_target_map = self._id_target_map
preds = self.predict(
dataset, output_type='probability_vector', output_frequency=output_frequency)
if output_frequency == 'per_row':
return _SFrame({
'class': preds.apply(lambda p: id_target_map[_np.argmax(p)]),
'probability': preds.apply(_np.max)
})
elif output_frequency == 'per_window':
preds['class'] = preds['probability_vector'].apply(
lambda p: id_target_map[_np.argmax(p)])
preds['probability'] = preds['probability_vector'].apply(_np.max)
preds = preds.remove_column('probability_vector')
return preds | [
"def",
"classify",
"(",
"self",
",",
"dataset",
",",
"output_frequency",
"=",
"'per_row'",
")",
":",
"_tkutl",
".",
"_check_categorical_option_type",
"(",
"'output_frequency'",
",",
"output_frequency",
",",
"[",
"'per_window'",
",",
"'per_row'",
"]",
")",
"id_target_map",
"=",
"self",
".",
"_id_target_map",
"preds",
"=",
"self",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"'probability_vector'",
",",
"output_frequency",
"=",
"output_frequency",
")",
"if",
"output_frequency",
"==",
"'per_row'",
":",
"return",
"_SFrame",
"(",
"{",
"'class'",
":",
"preds",
".",
"apply",
"(",
"lambda",
"p",
":",
"id_target_map",
"[",
"_np",
".",
"argmax",
"(",
"p",
")",
"]",
")",
",",
"'probability'",
":",
"preds",
".",
"apply",
"(",
"_np",
".",
"max",
")",
"}",
")",
"elif",
"output_frequency",
"==",
"'per_window'",
":",
"preds",
"[",
"'class'",
"]",
"=",
"preds",
"[",
"'probability_vector'",
"]",
".",
"apply",
"(",
"lambda",
"p",
":",
"id_target_map",
"[",
"_np",
".",
"argmax",
"(",
"p",
")",
"]",
")",
"preds",
"[",
"'probability'",
"]",
"=",
"preds",
"[",
"'probability_vector'",
"]",
".",
"apply",
"(",
"_np",
".",
"max",
")",
"preds",
"=",
"preds",
".",
"remove_column",
"(",
"'probability_vector'",
")",
"return",
"preds"
] | Return a classification, for each ``prediction_window`` examples in the
``dataset``, using the trained activity classification model. The output
SFrame contains predictions as both class labels as well as probabilities
that the predicted value is the associated label.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features and session id used for model training, but
does not require a target column. Additional columns are ignored.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_row': Each prediction is returned ``prediction_window`` times.
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities.
See Also
----------
create, evaluate, predict
Examples
----------
>>> classes = model.classify(data) | [
"Return",
"a",
"classification",
"for",
"each",
"prediction_window",
"examples",
"in",
"the",
"dataset",
"using",
"the",
"trained",
"activity",
"classification",
"model",
".",
"The",
"output",
"SFrame",
"contains",
"predictions",
"as",
"both",
"class",
"labels",
"as",
"well",
"as",
"probabilities",
"that",
"the",
"predicted",
"value",
"is",
"the",
"associated",
"label",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py#L745-L795 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py | ActivityClassifier.predict_topk | def predict_topk(self, dataset, output_type='probability', k=3, output_frequency='per_row'):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `prediction_id`,
`class`, and `probability`, or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features and session id used for model training, but
does not require a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_row': Each prediction is returned ``prediction_window`` times.
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+---------------+-------+-------------------+
| row_id | class | probability |
+---------------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+---------------+-------+-------------------+
"""
_tkutl._check_categorical_option_type('output_type', output_type, ['probability', 'rank'])
id_target_map = self._id_target_map
preds = self.predict(
dataset, output_type='probability_vector', output_frequency=output_frequency)
if output_frequency == 'per_row':
probs = preds
elif output_frequency == 'per_window':
probs = preds['probability_vector']
if output_type == 'rank':
probs = probs.apply(lambda p: [
{'class': id_target_map[i],
'rank': i}
for i in reversed(_np.argsort(p)[-k:])]
)
elif output_type == 'probability':
probs = probs.apply(lambda p: [
{'class': id_target_map[i],
'probability': p[i]}
for i in reversed(_np.argsort(p)[-k:])]
)
if output_frequency == 'per_row':
output = _SFrame({'probs': probs})
output = output.add_row_number(column_name='row_id')
elif output_frequency == 'per_window':
output = _SFrame({
'probs': probs,
self.session_id: preds[self.session_id],
'prediction_id': preds['prediction_id']
})
output = output.stack('probs', new_column_name='probs')
output = output.unpack('probs', column_name_prefix='')
return output | python | def predict_topk(self, dataset, output_type='probability', k=3, output_frequency='per_row'):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `prediction_id`,
`class`, and `probability`, or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features and session id used for model training, but
does not require a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_row': Each prediction is returned ``prediction_window`` times.
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+---------------+-------+-------------------+
| row_id | class | probability |
+---------------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+---------------+-------+-------------------+
"""
_tkutl._check_categorical_option_type('output_type', output_type, ['probability', 'rank'])
id_target_map = self._id_target_map
preds = self.predict(
dataset, output_type='probability_vector', output_frequency=output_frequency)
if output_frequency == 'per_row':
probs = preds
elif output_frequency == 'per_window':
probs = preds['probability_vector']
if output_type == 'rank':
probs = probs.apply(lambda p: [
{'class': id_target_map[i],
'rank': i}
for i in reversed(_np.argsort(p)[-k:])]
)
elif output_type == 'probability':
probs = probs.apply(lambda p: [
{'class': id_target_map[i],
'probability': p[i]}
for i in reversed(_np.argsort(p)[-k:])]
)
if output_frequency == 'per_row':
output = _SFrame({'probs': probs})
output = output.add_row_number(column_name='row_id')
elif output_frequency == 'per_window':
output = _SFrame({
'probs': probs,
self.session_id: preds[self.session_id],
'prediction_id': preds['prediction_id']
})
output = output.stack('probs', new_column_name='probs')
output = output.unpack('probs', column_name_prefix='')
return output | [
"def",
"predict_topk",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"'probability'",
",",
"k",
"=",
"3",
",",
"output_frequency",
"=",
"'per_row'",
")",
":",
"_tkutl",
".",
"_check_categorical_option_type",
"(",
"'output_type'",
",",
"output_type",
",",
"[",
"'probability'",
",",
"'rank'",
"]",
")",
"id_target_map",
"=",
"self",
".",
"_id_target_map",
"preds",
"=",
"self",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"'probability_vector'",
",",
"output_frequency",
"=",
"output_frequency",
")",
"if",
"output_frequency",
"==",
"'per_row'",
":",
"probs",
"=",
"preds",
"elif",
"output_frequency",
"==",
"'per_window'",
":",
"probs",
"=",
"preds",
"[",
"'probability_vector'",
"]",
"if",
"output_type",
"==",
"'rank'",
":",
"probs",
"=",
"probs",
".",
"apply",
"(",
"lambda",
"p",
":",
"[",
"{",
"'class'",
":",
"id_target_map",
"[",
"i",
"]",
",",
"'rank'",
":",
"i",
"}",
"for",
"i",
"in",
"reversed",
"(",
"_np",
".",
"argsort",
"(",
"p",
")",
"[",
"-",
"k",
":",
"]",
")",
"]",
")",
"elif",
"output_type",
"==",
"'probability'",
":",
"probs",
"=",
"probs",
".",
"apply",
"(",
"lambda",
"p",
":",
"[",
"{",
"'class'",
":",
"id_target_map",
"[",
"i",
"]",
",",
"'probability'",
":",
"p",
"[",
"i",
"]",
"}",
"for",
"i",
"in",
"reversed",
"(",
"_np",
".",
"argsort",
"(",
"p",
")",
"[",
"-",
"k",
":",
"]",
")",
"]",
")",
"if",
"output_frequency",
"==",
"'per_row'",
":",
"output",
"=",
"_SFrame",
"(",
"{",
"'probs'",
":",
"probs",
"}",
")",
"output",
"=",
"output",
".",
"add_row_number",
"(",
"column_name",
"=",
"'row_id'",
")",
"elif",
"output_frequency",
"==",
"'per_window'",
":",
"output",
"=",
"_SFrame",
"(",
"{",
"'probs'",
":",
"probs",
",",
"self",
".",
"session_id",
":",
"preds",
"[",
"self",
".",
"session_id",
"]",
",",
"'prediction_id'",
":",
"preds",
"[",
"'prediction_id'",
"]",
"}",
")",
"output",
"=",
"output",
".",
"stack",
"(",
"'probs'",
",",
"new_column_name",
"=",
"'probs'",
")",
"output",
"=",
"output",
".",
"unpack",
"(",
"'probs'",
",",
"column_name_prefix",
"=",
"''",
")",
"return",
"output"
] | Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `prediction_id`,
`class`, and `probability`, or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features and session id used for model training, but
does not require a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
output_frequency : {'per_row', 'per_window'}, optional
The frequency of the predictions which is one of:
- 'per_row': Each prediction is returned ``prediction_window`` times.
- 'per_window': Return a single prediction for each
``prediction_window`` rows in ``dataset`` per ``session_id``.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+---------------+-------+-------------------+
| row_id | class | probability |
+---------------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+---------------+-------+-------------------+ | [
"Return",
"top",
"-",
"k",
"predictions",
"for",
"the",
"dataset",
"using",
"the",
"trained",
"model",
".",
"Predictions",
"are",
"returned",
"as",
"an",
"SFrame",
"with",
"three",
"columns",
":",
"prediction_id",
"class",
"and",
"probability",
"or",
"rank",
"depending",
"on",
"the",
"output_type",
"parameter",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py#L797-L891 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/char_stat.py | count_characters | def count_characters(root, out):
"""Count the occurrances of the different characters in the files"""
if os.path.isfile(root):
with open(root, 'rb') as in_f:
for line in in_f:
for char in line:
if char not in out:
out[char] = 0
out[char] = out[char] + 1
elif os.path.isdir(root):
for filename in os.listdir(root):
count_characters(os.path.join(root, filename), out) | python | def count_characters(root, out):
"""Count the occurrances of the different characters in the files"""
if os.path.isfile(root):
with open(root, 'rb') as in_f:
for line in in_f:
for char in line:
if char not in out:
out[char] = 0
out[char] = out[char] + 1
elif os.path.isdir(root):
for filename in os.listdir(root):
count_characters(os.path.join(root, filename), out) | [
"def",
"count_characters",
"(",
"root",
",",
"out",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"root",
")",
":",
"with",
"open",
"(",
"root",
",",
"'rb'",
")",
"as",
"in_f",
":",
"for",
"line",
"in",
"in_f",
":",
"for",
"char",
"in",
"line",
":",
"if",
"char",
"not",
"in",
"out",
":",
"out",
"[",
"char",
"]",
"=",
"0",
"out",
"[",
"char",
"]",
"=",
"out",
"[",
"char",
"]",
"+",
"1",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"root",
")",
":",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"root",
")",
":",
"count_characters",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
",",
"out",
")"
] | Count the occurrances of the different characters in the files | [
"Count",
"the",
"occurrances",
"of",
"the",
"different",
"characters",
"in",
"the",
"files"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/char_stat.py#L13-L24 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/char_stat.py | main | def main():
"""The main function of the script"""
desc = 'Generate character statistics from a source tree'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--src',
dest='src',
required=True,
help='The root of the source tree'
)
parser.add_argument(
'--out',
dest='out',
default='chars.py',
help='The output filename'
)
args = parser.parse_args()
stats = generate_statistics(args.src)
with open(args.out, 'wb') as out_f:
out_f.write('CHARS={0}\n'.format(stats)) | python | def main():
"""The main function of the script"""
desc = 'Generate character statistics from a source tree'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--src',
dest='src',
required=True,
help='The root of the source tree'
)
parser.add_argument(
'--out',
dest='out',
default='chars.py',
help='The output filename'
)
args = parser.parse_args()
stats = generate_statistics(args.src)
with open(args.out, 'wb') as out_f:
out_f.write('CHARS={0}\n'.format(stats)) | [
"def",
"main",
"(",
")",
":",
"desc",
"=",
"'Generate character statistics from a source tree'",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"desc",
")",
"parser",
".",
"add_argument",
"(",
"'--src'",
",",
"dest",
"=",
"'src'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'The root of the source tree'",
")",
"parser",
".",
"add_argument",
"(",
"'--out'",
",",
"dest",
"=",
"'out'",
",",
"default",
"=",
"'chars.py'",
",",
"help",
"=",
"'The output filename'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"stats",
"=",
"generate_statistics",
"(",
"args",
".",
"src",
")",
"with",
"open",
"(",
"args",
".",
"out",
",",
"'wb'",
")",
"as",
"out_f",
":",
"out_f",
".",
"write",
"(",
"'CHARS={0}\\n'",
".",
"format",
"(",
"stats",
")",
")"
] | The main function of the script | [
"The",
"main",
"function",
"of",
"the",
"script"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/char_stat.py#L34-L55 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/utils.py | save_spec | def save_spec(spec, filename):
"""
Save a protobuf model specification to file.
Parameters
----------
spec: Model_pb
Protobuf representation of the model
filename: str
File path where the spec gets saved.
Examples
--------
.. sourcecode:: python
>>> coremltools.utils.save_spec(spec, 'HousePricer.mlmodel')
See Also
--------
load_spec
"""
name, ext = _os.path.splitext(filename)
if not ext:
filename = "%s.mlmodel" % filename
else:
if ext != '.mlmodel':
raise Exception("Extension must be .mlmodel (not %s)" % ext)
with open(filename, 'wb') as f:
s = spec.SerializeToString()
f.write(s) | python | def save_spec(spec, filename):
"""
Save a protobuf model specification to file.
Parameters
----------
spec: Model_pb
Protobuf representation of the model
filename: str
File path where the spec gets saved.
Examples
--------
.. sourcecode:: python
>>> coremltools.utils.save_spec(spec, 'HousePricer.mlmodel')
See Also
--------
load_spec
"""
name, ext = _os.path.splitext(filename)
if not ext:
filename = "%s.mlmodel" % filename
else:
if ext != '.mlmodel':
raise Exception("Extension must be .mlmodel (not %s)" % ext)
with open(filename, 'wb') as f:
s = spec.SerializeToString()
f.write(s) | [
"def",
"save_spec",
"(",
"spec",
",",
"filename",
")",
":",
"name",
",",
"ext",
"=",
"_os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"not",
"ext",
":",
"filename",
"=",
"\"%s.mlmodel\"",
"%",
"filename",
"else",
":",
"if",
"ext",
"!=",
"'.mlmodel'",
":",
"raise",
"Exception",
"(",
"\"Extension must be .mlmodel (not %s)\"",
"%",
"ext",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"s",
"=",
"spec",
".",
"SerializeToString",
"(",
")",
"f",
".",
"write",
"(",
"s",
")"
] | Save a protobuf model specification to file.
Parameters
----------
spec: Model_pb
Protobuf representation of the model
filename: str
File path where the spec gets saved.
Examples
--------
.. sourcecode:: python
>>> coremltools.utils.save_spec(spec, 'HousePricer.mlmodel')
See Also
--------
load_spec | [
"Save",
"a",
"protobuf",
"model",
"specification",
"to",
"file",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/utils.py#L28-L59 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/utils.py | load_spec | def load_spec(filename):
"""
Load a protobuf model specification from file
Parameters
----------
filename: str
Location on disk (a valid filepath) from which the file is loaded
as a protobuf spec.
Returns
-------
model_spec: Model_pb
Protobuf representation of the model
Examples
--------
.. sourcecode:: python
>>> spec = coremltools.utils.load_spec('HousePricer.mlmodel')
See Also
--------
save_spec
"""
from ..proto import Model_pb2
spec = Model_pb2.Model()
with open(filename, 'rb') as f:
contents = f.read()
spec.ParseFromString(contents)
return spec | python | def load_spec(filename):
"""
Load a protobuf model specification from file
Parameters
----------
filename: str
Location on disk (a valid filepath) from which the file is loaded
as a protobuf spec.
Returns
-------
model_spec: Model_pb
Protobuf representation of the model
Examples
--------
.. sourcecode:: python
>>> spec = coremltools.utils.load_spec('HousePricer.mlmodel')
See Also
--------
save_spec
"""
from ..proto import Model_pb2
spec = Model_pb2.Model()
with open(filename, 'rb') as f:
contents = f.read()
spec.ParseFromString(contents)
return spec | [
"def",
"load_spec",
"(",
"filename",
")",
":",
"from",
".",
".",
"proto",
"import",
"Model_pb2",
"spec",
"=",
"Model_pb2",
".",
"Model",
"(",
")",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"contents",
"=",
"f",
".",
"read",
"(",
")",
"spec",
".",
"ParseFromString",
"(",
"contents",
")",
"return",
"spec"
] | Load a protobuf model specification from file
Parameters
----------
filename: str
Location on disk (a valid filepath) from which the file is loaded
as a protobuf spec.
Returns
-------
model_spec: Model_pb
Protobuf representation of the model
Examples
--------
.. sourcecode:: python
>>> spec = coremltools.utils.load_spec('HousePricer.mlmodel')
See Also
--------
save_spec | [
"Load",
"a",
"protobuf",
"model",
"specification",
"from",
"file"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/utils.py#L62-L93 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/utils.py | _get_nn_layers | def _get_nn_layers(spec):
"""
Returns a list of neural network layers if the model contains any.
Parameters
----------
spec: Model_pb
A model protobuf specification.
Returns
-------
[NN layer]
list of all layers (including layers from elements of a pipeline
"""
layers = []
if spec.WhichOneof('Type') == 'pipeline':
layers = []
for model_spec in spec.pipeline.models:
if not layers:
return _get_nn_layers(model_spec)
else:
layers.extend(_get_nn_layers(model_spec))
elif spec.WhichOneof('Type') in ['pipelineClassifier',
'pipelineRegressor']:
layers = []
for model_spec in spec.pipeline.models:
if not layers:
return _get_nn_layers(model_spec)
else:
layers.extend(_get_nn_layers(model_spec))
elif spec.neuralNetwork.layers:
layers = spec.neuralNetwork.layers
elif spec.neuralNetworkClassifier.layers:
layers = spec.neuralNetworkClassifier.layers
elif spec.neuralNetworkRegressor.layers:
layers = spec.neuralNetworkRegressor.layers
return layers | python | def _get_nn_layers(spec):
"""
Returns a list of neural network layers if the model contains any.
Parameters
----------
spec: Model_pb
A model protobuf specification.
Returns
-------
[NN layer]
list of all layers (including layers from elements of a pipeline
"""
layers = []
if spec.WhichOneof('Type') == 'pipeline':
layers = []
for model_spec in spec.pipeline.models:
if not layers:
return _get_nn_layers(model_spec)
else:
layers.extend(_get_nn_layers(model_spec))
elif spec.WhichOneof('Type') in ['pipelineClassifier',
'pipelineRegressor']:
layers = []
for model_spec in spec.pipeline.models:
if not layers:
return _get_nn_layers(model_spec)
else:
layers.extend(_get_nn_layers(model_spec))
elif spec.neuralNetwork.layers:
layers = spec.neuralNetwork.layers
elif spec.neuralNetworkClassifier.layers:
layers = spec.neuralNetworkClassifier.layers
elif spec.neuralNetworkRegressor.layers:
layers = spec.neuralNetworkRegressor.layers
return layers | [
"def",
"_get_nn_layers",
"(",
"spec",
")",
":",
"layers",
"=",
"[",
"]",
"if",
"spec",
".",
"WhichOneof",
"(",
"'Type'",
")",
"==",
"'pipeline'",
":",
"layers",
"=",
"[",
"]",
"for",
"model_spec",
"in",
"spec",
".",
"pipeline",
".",
"models",
":",
"if",
"not",
"layers",
":",
"return",
"_get_nn_layers",
"(",
"model_spec",
")",
"else",
":",
"layers",
".",
"extend",
"(",
"_get_nn_layers",
"(",
"model_spec",
")",
")",
"elif",
"spec",
".",
"WhichOneof",
"(",
"'Type'",
")",
"in",
"[",
"'pipelineClassifier'",
",",
"'pipelineRegressor'",
"]",
":",
"layers",
"=",
"[",
"]",
"for",
"model_spec",
"in",
"spec",
".",
"pipeline",
".",
"models",
":",
"if",
"not",
"layers",
":",
"return",
"_get_nn_layers",
"(",
"model_spec",
")",
"else",
":",
"layers",
".",
"extend",
"(",
"_get_nn_layers",
"(",
"model_spec",
")",
")",
"elif",
"spec",
".",
"neuralNetwork",
".",
"layers",
":",
"layers",
"=",
"spec",
".",
"neuralNetwork",
".",
"layers",
"elif",
"spec",
".",
"neuralNetworkClassifier",
".",
"layers",
":",
"layers",
"=",
"spec",
".",
"neuralNetworkClassifier",
".",
"layers",
"elif",
"spec",
".",
"neuralNetworkRegressor",
".",
"layers",
":",
"layers",
"=",
"spec",
".",
"neuralNetworkRegressor",
".",
"layers",
"return",
"layers"
] | Returns a list of neural network layers if the model contains any.
Parameters
----------
spec: Model_pb
A model protobuf specification.
Returns
-------
[NN layer]
list of all layers (including layers from elements of a pipeline | [
"Returns",
"a",
"list",
"of",
"neural",
"network",
"layers",
"if",
"the",
"model",
"contains",
"any",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/utils.py#L96-L137 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/utils.py | evaluate_regressor | def evaluate_regressor(model, data, target="target", verbose=False):
"""
Evaluate a CoreML regression model and compare against predictions
from the original framework (for testing correctness of conversion)
Parameters
----------
filename: [str | MLModel]
File path from which to load the MLModel from (OR) a loaded version of
MLModel.
data: [str | Dataframe]
Test data on which to evaluate the models (dataframe,
or path to a .csv file).
target: str
Name of the column in the dataframe that must be interpreted
as the target column.
verbose: bool
Set to true for a more verbose output.
See Also
--------
evaluate_classifier
Examples
--------
.. sourcecode:: python
>>> metrics = coremltools.utils.evaluate_regressor(spec, 'data_and_predictions.csv', 'target')
>>> print(metrics)
{"samples": 10, "rmse": 0.0, max_error: 0.0}
"""
model = _get_model(model)
if verbose:
print("")
print("Other Framework\t\tPredicted\t\tDelta")
max_error = 0
error_squared = 0
for index,row in data.iterrows():
predicted = model.predict(dict(row))[_to_unicode(target)]
other_framework = row["prediction"]
delta = predicted - other_framework
if verbose:
print("%s\t\t\t\t%s\t\t\t%0.4f" % (other_framework, predicted, delta))
max_error = max(abs(delta), max_error)
error_squared = error_squared + (delta * delta)
ret = {
"samples": len(data),
"rmse": _math.sqrt(error_squared / len(data)),
"max_error": max_error
}
if verbose:
print("results: %s" % ret)
return ret | python | def evaluate_regressor(model, data, target="target", verbose=False):
"""
Evaluate a CoreML regression model and compare against predictions
from the original framework (for testing correctness of conversion)
Parameters
----------
filename: [str | MLModel]
File path from which to load the MLModel from (OR) a loaded version of
MLModel.
data: [str | Dataframe]
Test data on which to evaluate the models (dataframe,
or path to a .csv file).
target: str
Name of the column in the dataframe that must be interpreted
as the target column.
verbose: bool
Set to true for a more verbose output.
See Also
--------
evaluate_classifier
Examples
--------
.. sourcecode:: python
>>> metrics = coremltools.utils.evaluate_regressor(spec, 'data_and_predictions.csv', 'target')
>>> print(metrics)
{"samples": 10, "rmse": 0.0, max_error: 0.0}
"""
model = _get_model(model)
if verbose:
print("")
print("Other Framework\t\tPredicted\t\tDelta")
max_error = 0
error_squared = 0
for index,row in data.iterrows():
predicted = model.predict(dict(row))[_to_unicode(target)]
other_framework = row["prediction"]
delta = predicted - other_framework
if verbose:
print("%s\t\t\t\t%s\t\t\t%0.4f" % (other_framework, predicted, delta))
max_error = max(abs(delta), max_error)
error_squared = error_squared + (delta * delta)
ret = {
"samples": len(data),
"rmse": _math.sqrt(error_squared / len(data)),
"max_error": max_error
}
if verbose:
print("results: %s" % ret)
return ret | [
"def",
"evaluate_regressor",
"(",
"model",
",",
"data",
",",
"target",
"=",
"\"target\"",
",",
"verbose",
"=",
"False",
")",
":",
"model",
"=",
"_get_model",
"(",
"model",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"Other Framework\\t\\tPredicted\\t\\tDelta\"",
")",
"max_error",
"=",
"0",
"error_squared",
"=",
"0",
"for",
"index",
",",
"row",
"in",
"data",
".",
"iterrows",
"(",
")",
":",
"predicted",
"=",
"model",
".",
"predict",
"(",
"dict",
"(",
"row",
")",
")",
"[",
"_to_unicode",
"(",
"target",
")",
"]",
"other_framework",
"=",
"row",
"[",
"\"prediction\"",
"]",
"delta",
"=",
"predicted",
"-",
"other_framework",
"if",
"verbose",
":",
"print",
"(",
"\"%s\\t\\t\\t\\t%s\\t\\t\\t%0.4f\"",
"%",
"(",
"other_framework",
",",
"predicted",
",",
"delta",
")",
")",
"max_error",
"=",
"max",
"(",
"abs",
"(",
"delta",
")",
",",
"max_error",
")",
"error_squared",
"=",
"error_squared",
"+",
"(",
"delta",
"*",
"delta",
")",
"ret",
"=",
"{",
"\"samples\"",
":",
"len",
"(",
"data",
")",
",",
"\"rmse\"",
":",
"_math",
".",
"sqrt",
"(",
"error_squared",
"/",
"len",
"(",
"data",
")",
")",
",",
"\"max_error\"",
":",
"max_error",
"}",
"if",
"verbose",
":",
"print",
"(",
"\"results: %s\"",
"%",
"ret",
")",
"return",
"ret"
] | Evaluate a CoreML regression model and compare against predictions
from the original framework (for testing correctness of conversion)
Parameters
----------
filename: [str | MLModel]
File path from which to load the MLModel from (OR) a loaded version of
MLModel.
data: [str | Dataframe]
Test data on which to evaluate the models (dataframe,
or path to a .csv file).
target: str
Name of the column in the dataframe that must be interpreted
as the target column.
verbose: bool
Set to true for a more verbose output.
See Also
--------
evaluate_classifier
Examples
--------
.. sourcecode:: python
>>> metrics = coremltools.utils.evaluate_regressor(spec, 'data_and_predictions.csv', 'target')
>>> print(metrics)
{"samples": 10, "rmse": 0.0, max_error: 0.0} | [
"Evaluate",
"a",
"CoreML",
"regression",
"model",
"and",
"compare",
"against",
"predictions",
"from",
"the",
"original",
"framework",
"(",
"for",
"testing",
"correctness",
"of",
"conversion",
")"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/utils.py#L386-L448 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/utils.py | evaluate_classifier | def evaluate_classifier(model, data, target='target', verbose=False):
"""
Evaluate a CoreML classifier model and compare against predictions
from the original framework (for testing correctness of conversion). Use
this evaluation for models that don't deal with probabilities.
Parameters
----------
filename: [str | MLModel]
File from where to load the model from (OR) a loaded
version of the MLModel.
data: [str | Dataframe]
Test data on which to evaluate the models (dataframe,
or path to a csv file).
target: str
Column to interpret as the target column
verbose: bool
Set to true for a more verbose output.
See Also
--------
evaluate_regressor, evaluate_classifier_with_probabilities
Examples
--------
.. sourcecode:: python
>>> metrics = coremltools.utils.evaluate_classifier(spec, 'data_and_predictions.csv', 'target')
>>> print(metrics)
{"samples": 10, num_errors: 0}
"""
model = _get_model(model)
if verbose:
print("")
print("Other Framework\t\tPredicted")
num_errors = 0
for index,row in data.iterrows():
predicted = model.predict(dict(row))[_to_unicode(target)]
other_framework = row["prediction"]
if predicted != other_framework:
num_errors += 1
if verbose:
print("%s\t\t\t\t%s" % (other_framework, predicted))
ret = {
"num_samples": len(data),
"num_errors": num_errors
}
if verbose:
print("results: %s" % ret)
return ret | python | def evaluate_classifier(model, data, target='target', verbose=False):
"""
Evaluate a CoreML classifier model and compare against predictions
from the original framework (for testing correctness of conversion). Use
this evaluation for models that don't deal with probabilities.
Parameters
----------
filename: [str | MLModel]
File from where to load the model from (OR) a loaded
version of the MLModel.
data: [str | Dataframe]
Test data on which to evaluate the models (dataframe,
or path to a csv file).
target: str
Column to interpret as the target column
verbose: bool
Set to true for a more verbose output.
See Also
--------
evaluate_regressor, evaluate_classifier_with_probabilities
Examples
--------
.. sourcecode:: python
>>> metrics = coremltools.utils.evaluate_classifier(spec, 'data_and_predictions.csv', 'target')
>>> print(metrics)
{"samples": 10, num_errors: 0}
"""
model = _get_model(model)
if verbose:
print("")
print("Other Framework\t\tPredicted")
num_errors = 0
for index,row in data.iterrows():
predicted = model.predict(dict(row))[_to_unicode(target)]
other_framework = row["prediction"]
if predicted != other_framework:
num_errors += 1
if verbose:
print("%s\t\t\t\t%s" % (other_framework, predicted))
ret = {
"num_samples": len(data),
"num_errors": num_errors
}
if verbose:
print("results: %s" % ret)
return ret | [
"def",
"evaluate_classifier",
"(",
"model",
",",
"data",
",",
"target",
"=",
"'target'",
",",
"verbose",
"=",
"False",
")",
":",
"model",
"=",
"_get_model",
"(",
"model",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"Other Framework\\t\\tPredicted\"",
")",
"num_errors",
"=",
"0",
"for",
"index",
",",
"row",
"in",
"data",
".",
"iterrows",
"(",
")",
":",
"predicted",
"=",
"model",
".",
"predict",
"(",
"dict",
"(",
"row",
")",
")",
"[",
"_to_unicode",
"(",
"target",
")",
"]",
"other_framework",
"=",
"row",
"[",
"\"prediction\"",
"]",
"if",
"predicted",
"!=",
"other_framework",
":",
"num_errors",
"+=",
"1",
"if",
"verbose",
":",
"print",
"(",
"\"%s\\t\\t\\t\\t%s\"",
"%",
"(",
"other_framework",
",",
"predicted",
")",
")",
"ret",
"=",
"{",
"\"num_samples\"",
":",
"len",
"(",
"data",
")",
",",
"\"num_errors\"",
":",
"num_errors",
"}",
"if",
"verbose",
":",
"print",
"(",
"\"results: %s\"",
"%",
"ret",
")",
"return",
"ret"
] | Evaluate a CoreML classifier model and compare against predictions
from the original framework (for testing correctness of conversion). Use
this evaluation for models that don't deal with probabilities.
Parameters
----------
filename: [str | MLModel]
File from where to load the model from (OR) a loaded
version of the MLModel.
data: [str | Dataframe]
Test data on which to evaluate the models (dataframe,
or path to a csv file).
target: str
Column to interpret as the target column
verbose: bool
Set to true for a more verbose output.
See Also
--------
evaluate_regressor, evaluate_classifier_with_probabilities
Examples
--------
.. sourcecode:: python
>>> metrics = coremltools.utils.evaluate_classifier(spec, 'data_and_predictions.csv', 'target')
>>> print(metrics)
{"samples": 10, num_errors: 0} | [
"Evaluate",
"a",
"CoreML",
"classifier",
"model",
"and",
"compare",
"against",
"predictions",
"from",
"the",
"original",
"framework",
"(",
"for",
"testing",
"correctness",
"of",
"conversion",
")",
".",
"Use",
"this",
"evaluation",
"for",
"models",
"that",
"don",
"t",
"deal",
"with",
"probabilities",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/utils.py#L451-L509 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/utils.py | evaluate_classifier_with_probabilities | def evaluate_classifier_with_probabilities(model, data,
probabilities='probabilities',
verbose = False):
"""
Evaluate a classifier specification for testing.
Parameters
----------
filename: [str | Model]
File from where to load the model from (OR) a loaded
version of the MLModel.
data: [str | Dataframe]
Test data on which to evaluate the models (dataframe,
or path to a csv file).
probabilities: str
Column to interpret as the probabilities column
verbose: bool
Verbosity levels of the predictions.
"""
model = _get_model(model)
if verbose:
print("")
print("Other Framework\t\tPredicted")
max_probability_error, num_key_mismatch = 0, 0
for _,row in data.iterrows():
predicted_values = model.predict(dict(row))[_to_unicode(probabilities)]
other_values = row[probabilities]
if set(predicted_values.keys()) != set(other_values.keys()):
if verbose:
print("Different classes: ", str(predicted_values.keys()), str(other_values.keys()))
num_key_mismatch += 1
continue
for cur_class, cur_predicted_class_values in predicted_values.items():
delta = cur_predicted_class_values - other_values[cur_class]
if verbose:
print(delta, cur_predicted_class_values, other_values[cur_class])
max_probability_error = max(abs(delta), max_probability_error)
if verbose:
print("")
ret = {
"num_samples": len(data),
"max_probability_error": max_probability_error,
"num_key_mismatch": num_key_mismatch
}
if verbose:
print("results: %s" % ret)
return ret | python | def evaluate_classifier_with_probabilities(model, data,
probabilities='probabilities',
verbose = False):
"""
Evaluate a classifier specification for testing.
Parameters
----------
filename: [str | Model]
File from where to load the model from (OR) a loaded
version of the MLModel.
data: [str | Dataframe]
Test data on which to evaluate the models (dataframe,
or path to a csv file).
probabilities: str
Column to interpret as the probabilities column
verbose: bool
Verbosity levels of the predictions.
"""
model = _get_model(model)
if verbose:
print("")
print("Other Framework\t\tPredicted")
max_probability_error, num_key_mismatch = 0, 0
for _,row in data.iterrows():
predicted_values = model.predict(dict(row))[_to_unicode(probabilities)]
other_values = row[probabilities]
if set(predicted_values.keys()) != set(other_values.keys()):
if verbose:
print("Different classes: ", str(predicted_values.keys()), str(other_values.keys()))
num_key_mismatch += 1
continue
for cur_class, cur_predicted_class_values in predicted_values.items():
delta = cur_predicted_class_values - other_values[cur_class]
if verbose:
print(delta, cur_predicted_class_values, other_values[cur_class])
max_probability_error = max(abs(delta), max_probability_error)
if verbose:
print("")
ret = {
"num_samples": len(data),
"max_probability_error": max_probability_error,
"num_key_mismatch": num_key_mismatch
}
if verbose:
print("results: %s" % ret)
return ret | [
"def",
"evaluate_classifier_with_probabilities",
"(",
"model",
",",
"data",
",",
"probabilities",
"=",
"'probabilities'",
",",
"verbose",
"=",
"False",
")",
":",
"model",
"=",
"_get_model",
"(",
"model",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"Other Framework\\t\\tPredicted\"",
")",
"max_probability_error",
",",
"num_key_mismatch",
"=",
"0",
",",
"0",
"for",
"_",
",",
"row",
"in",
"data",
".",
"iterrows",
"(",
")",
":",
"predicted_values",
"=",
"model",
".",
"predict",
"(",
"dict",
"(",
"row",
")",
")",
"[",
"_to_unicode",
"(",
"probabilities",
")",
"]",
"other_values",
"=",
"row",
"[",
"probabilities",
"]",
"if",
"set",
"(",
"predicted_values",
".",
"keys",
"(",
")",
")",
"!=",
"set",
"(",
"other_values",
".",
"keys",
"(",
")",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Different classes: \"",
",",
"str",
"(",
"predicted_values",
".",
"keys",
"(",
")",
")",
",",
"str",
"(",
"other_values",
".",
"keys",
"(",
")",
")",
")",
"num_key_mismatch",
"+=",
"1",
"continue",
"for",
"cur_class",
",",
"cur_predicted_class_values",
"in",
"predicted_values",
".",
"items",
"(",
")",
":",
"delta",
"=",
"cur_predicted_class_values",
"-",
"other_values",
"[",
"cur_class",
"]",
"if",
"verbose",
":",
"print",
"(",
"delta",
",",
"cur_predicted_class_values",
",",
"other_values",
"[",
"cur_class",
"]",
")",
"max_probability_error",
"=",
"max",
"(",
"abs",
"(",
"delta",
")",
",",
"max_probability_error",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\"",
")",
"ret",
"=",
"{",
"\"num_samples\"",
":",
"len",
"(",
"data",
")",
",",
"\"max_probability_error\"",
":",
"max_probability_error",
",",
"\"num_key_mismatch\"",
":",
"num_key_mismatch",
"}",
"if",
"verbose",
":",
"print",
"(",
"\"results: %s\"",
"%",
"ret",
")",
"return",
"ret"
] | Evaluate a classifier specification for testing.
Parameters
----------
filename: [str | Model]
File from where to load the model from (OR) a loaded
version of the MLModel.
data: [str | Dataframe]
Test data on which to evaluate the models (dataframe,
or path to a csv file).
probabilities: str
Column to interpret as the probabilities column
verbose: bool
Verbosity levels of the predictions. | [
"Evaluate",
"a",
"classifier",
"specification",
"for",
"testing",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/utils.py#L512-L571 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/utils.py | rename_feature | def rename_feature(spec, current_name, new_name, rename_inputs=True,
rename_outputs=True):
"""
Rename a feature in the specification.
Parameters
----------
spec: Model_pb
The specification containing the feature to rename.
current_name: str
Current name of the feature. If this feature doesn't exist, the rename
is a no-op.
new_name: str
New name of the feature.
rename_inputs: bool
Search for `current_name` only in the input features (i.e ignore output
features)
rename_outputs: bool
Search for `current_name` only in the output features (i.e ignore input
features)
Examples
--------
.. sourcecode:: python
# In-place rename of spec
>>> coremltools.utils.rename_feature(spec, 'old_feature', 'new_feature_name')
"""
from coremltools.models import MLModel
if not rename_inputs and not rename_outputs:
return
changed_input = False
changed_output = False
if rename_inputs:
for input in spec.description.input:
if input.name == current_name:
input.name = new_name
changed_input = True
if rename_outputs:
for output in spec.description.output:
if output.name == current_name:
output.name = new_name
changed_output = True
if spec.description.predictedFeatureName == current_name:
spec.description.predictedFeatureName = new_name
if spec.description.predictedProbabilitiesName == current_name:
spec.description.predictedProbabilitiesName = new_name
if not changed_input and not changed_output:
return
# Rename internally in NN model
nn = None
for nn_type in ['neuralNetwork','neuralNetworkClassifier','neuralNetworkRegressor']:
if spec.HasField(nn_type):
nn = getattr(spec,nn_type)
if nn is not None:
for layer in nn.layers:
if rename_inputs:
for index,name in enumerate(layer.input):
if name == current_name:
layer.input[index] = new_name
if rename_outputs:
for index,name in enumerate(layer.output):
if name == current_name:
layer.output[index] = new_name
# Rename internally for feature vectorizer
if spec.HasField('featureVectorizer') and rename_inputs:
for input in spec.featureVectorizer.inputList:
if input.inputColumn == current_name:
input.inputColumn = new_name
changed_input = True
# Rename for pipeline models
pipeline = None
if spec.HasField('pipeline'):
pipeline = spec.pipeline
elif spec.HasField('pipelineClassifier'):
pipeline = spec.pipelineClassifier.pipeline
elif spec.HasField('pipelineRegressor'):
pipeline = spec.pipelineRegressor.pipeline
if pipeline is not None:
for index,model in enumerate(pipeline.models):
rename_feature(model,
current_name,
new_name,
rename_inputs or (index != 0),
rename_outputs or (index < len(spec.pipeline.models))) | python | def rename_feature(spec, current_name, new_name, rename_inputs=True,
rename_outputs=True):
"""
Rename a feature in the specification.
Parameters
----------
spec: Model_pb
The specification containing the feature to rename.
current_name: str
Current name of the feature. If this feature doesn't exist, the rename
is a no-op.
new_name: str
New name of the feature.
rename_inputs: bool
Search for `current_name` only in the input features (i.e ignore output
features)
rename_outputs: bool
Search for `current_name` only in the output features (i.e ignore input
features)
Examples
--------
.. sourcecode:: python
# In-place rename of spec
>>> coremltools.utils.rename_feature(spec, 'old_feature', 'new_feature_name')
"""
from coremltools.models import MLModel
if not rename_inputs and not rename_outputs:
return
changed_input = False
changed_output = False
if rename_inputs:
for input in spec.description.input:
if input.name == current_name:
input.name = new_name
changed_input = True
if rename_outputs:
for output in spec.description.output:
if output.name == current_name:
output.name = new_name
changed_output = True
if spec.description.predictedFeatureName == current_name:
spec.description.predictedFeatureName = new_name
if spec.description.predictedProbabilitiesName == current_name:
spec.description.predictedProbabilitiesName = new_name
if not changed_input and not changed_output:
return
# Rename internally in NN model
nn = None
for nn_type in ['neuralNetwork','neuralNetworkClassifier','neuralNetworkRegressor']:
if spec.HasField(nn_type):
nn = getattr(spec,nn_type)
if nn is not None:
for layer in nn.layers:
if rename_inputs:
for index,name in enumerate(layer.input):
if name == current_name:
layer.input[index] = new_name
if rename_outputs:
for index,name in enumerate(layer.output):
if name == current_name:
layer.output[index] = new_name
# Rename internally for feature vectorizer
if spec.HasField('featureVectorizer') and rename_inputs:
for input in spec.featureVectorizer.inputList:
if input.inputColumn == current_name:
input.inputColumn = new_name
changed_input = True
# Rename for pipeline models
pipeline = None
if spec.HasField('pipeline'):
pipeline = spec.pipeline
elif spec.HasField('pipelineClassifier'):
pipeline = spec.pipelineClassifier.pipeline
elif spec.HasField('pipelineRegressor'):
pipeline = spec.pipelineRegressor.pipeline
if pipeline is not None:
for index,model in enumerate(pipeline.models):
rename_feature(model,
current_name,
new_name,
rename_inputs or (index != 0),
rename_outputs or (index < len(spec.pipeline.models))) | [
"def",
"rename_feature",
"(",
"spec",
",",
"current_name",
",",
"new_name",
",",
"rename_inputs",
"=",
"True",
",",
"rename_outputs",
"=",
"True",
")",
":",
"from",
"coremltools",
".",
"models",
"import",
"MLModel",
"if",
"not",
"rename_inputs",
"and",
"not",
"rename_outputs",
":",
"return",
"changed_input",
"=",
"False",
"changed_output",
"=",
"False",
"if",
"rename_inputs",
":",
"for",
"input",
"in",
"spec",
".",
"description",
".",
"input",
":",
"if",
"input",
".",
"name",
"==",
"current_name",
":",
"input",
".",
"name",
"=",
"new_name",
"changed_input",
"=",
"True",
"if",
"rename_outputs",
":",
"for",
"output",
"in",
"spec",
".",
"description",
".",
"output",
":",
"if",
"output",
".",
"name",
"==",
"current_name",
":",
"output",
".",
"name",
"=",
"new_name",
"changed_output",
"=",
"True",
"if",
"spec",
".",
"description",
".",
"predictedFeatureName",
"==",
"current_name",
":",
"spec",
".",
"description",
".",
"predictedFeatureName",
"=",
"new_name",
"if",
"spec",
".",
"description",
".",
"predictedProbabilitiesName",
"==",
"current_name",
":",
"spec",
".",
"description",
".",
"predictedProbabilitiesName",
"=",
"new_name",
"if",
"not",
"changed_input",
"and",
"not",
"changed_output",
":",
"return",
"# Rename internally in NN model",
"nn",
"=",
"None",
"for",
"nn_type",
"in",
"[",
"'neuralNetwork'",
",",
"'neuralNetworkClassifier'",
",",
"'neuralNetworkRegressor'",
"]",
":",
"if",
"spec",
".",
"HasField",
"(",
"nn_type",
")",
":",
"nn",
"=",
"getattr",
"(",
"spec",
",",
"nn_type",
")",
"if",
"nn",
"is",
"not",
"None",
":",
"for",
"layer",
"in",
"nn",
".",
"layers",
":",
"if",
"rename_inputs",
":",
"for",
"index",
",",
"name",
"in",
"enumerate",
"(",
"layer",
".",
"input",
")",
":",
"if",
"name",
"==",
"current_name",
":",
"layer",
".",
"input",
"[",
"index",
"]",
"=",
"new_name",
"if",
"rename_outputs",
":",
"for",
"index",
",",
"name",
"in",
"enumerate",
"(",
"layer",
".",
"output",
")",
":",
"if",
"name",
"==",
"current_name",
":",
"layer",
".",
"output",
"[",
"index",
"]",
"=",
"new_name",
"# Rename internally for feature vectorizer",
"if",
"spec",
".",
"HasField",
"(",
"'featureVectorizer'",
")",
"and",
"rename_inputs",
":",
"for",
"input",
"in",
"spec",
".",
"featureVectorizer",
".",
"inputList",
":",
"if",
"input",
".",
"inputColumn",
"==",
"current_name",
":",
"input",
".",
"inputColumn",
"=",
"new_name",
"changed_input",
"=",
"True",
"# Rename for pipeline models",
"pipeline",
"=",
"None",
"if",
"spec",
".",
"HasField",
"(",
"'pipeline'",
")",
":",
"pipeline",
"=",
"spec",
".",
"pipeline",
"elif",
"spec",
".",
"HasField",
"(",
"'pipelineClassifier'",
")",
":",
"pipeline",
"=",
"spec",
".",
"pipelineClassifier",
".",
"pipeline",
"elif",
"spec",
".",
"HasField",
"(",
"'pipelineRegressor'",
")",
":",
"pipeline",
"=",
"spec",
".",
"pipelineRegressor",
".",
"pipeline",
"if",
"pipeline",
"is",
"not",
"None",
":",
"for",
"index",
",",
"model",
"in",
"enumerate",
"(",
"pipeline",
".",
"models",
")",
":",
"rename_feature",
"(",
"model",
",",
"current_name",
",",
"new_name",
",",
"rename_inputs",
"or",
"(",
"index",
"!=",
"0",
")",
",",
"rename_outputs",
"or",
"(",
"index",
"<",
"len",
"(",
"spec",
".",
"pipeline",
".",
"models",
")",
")",
")"
] | Rename a feature in the specification.
Parameters
----------
spec: Model_pb
The specification containing the feature to rename.
current_name: str
Current name of the feature. If this feature doesn't exist, the rename
is a no-op.
new_name: str
New name of the feature.
rename_inputs: bool
Search for `current_name` only in the input features (i.e ignore output
features)
rename_outputs: bool
Search for `current_name` only in the output features (i.e ignore input
features)
Examples
--------
.. sourcecode:: python
# In-place rename of spec
>>> coremltools.utils.rename_feature(spec, 'old_feature', 'new_feature_name') | [
"Rename",
"a",
"feature",
"in",
"the",
"specification",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/utils.py#L574-L674 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.