repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/common.py | prepend_path_variable_command | def prepend_path_variable_command(variable, paths):
"""
Returns a command that prepends the given paths to the named path variable on
the current platform.
"""
assert isinstance(variable, basestring)
assert is_iterable_typed(paths, basestring)
return path_variable_setting_command(
variable, paths + [expand_variable(variable)]) | python | def prepend_path_variable_command(variable, paths):
"""
Returns a command that prepends the given paths to the named path variable on
the current platform.
"""
assert isinstance(variable, basestring)
assert is_iterable_typed(paths, basestring)
return path_variable_setting_command(
variable, paths + [expand_variable(variable)]) | [
"def",
"prepend_path_variable_command",
"(",
"variable",
",",
"paths",
")",
":",
"assert",
"isinstance",
"(",
"variable",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"paths",
",",
"basestring",
")",
"return",
"path_variable_setting_command",
"(",
"variable",
",",
"paths",
"+",
"[",
"expand_variable",
"(",
"variable",
")",
"]",
")"
] | Returns a command that prepends the given paths to the named path variable on
the current platform. | [
"Returns",
"a",
"command",
"that",
"prepends",
"the",
"given",
"paths",
"to",
"the",
"named",
"path",
"variable",
"on",
"the",
"current",
"platform",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/common.py#L537-L545 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/common.py | format_name | def format_name(format, name, target_type, prop_set):
""" Given a target, as given to a custom tag rule, returns a string formatted
according to the passed format. Format is a list of properties that is
represented in the result. For each element of format the corresponding target
information is obtained and added to the result string. For all, but the
literal, the format value is taken as the as string to prepend to the output
to join the item to the rest of the result. If not given "-" is used as a
joiner.
The format options can be:
<base>[joiner]
:: The basename of the target name.
<toolset>[joiner]
:: The abbreviated toolset tag being used to build the target.
<threading>[joiner]
:: Indication of a multi-threaded build.
<runtime>[joiner]
:: Collective tag of the build runtime.
<version:/version-feature | X.Y[.Z]/>[joiner]
:: Short version tag taken from the given "version-feature"
in the build properties. Or if not present, the literal
value as the version number.
<property:/property-name/>[joiner]
:: Direct lookup of the given property-name value in the
build properties. /property-name/ is a regular expression.
e.g. <property:toolset-.*:flavor> will match every toolset.
/otherwise/
:: The literal value of the format argument.
For example this format:
boost_ <base> <toolset> <threading> <runtime> <version:boost-version>
Might return:
boost_thread-vc80-mt-gd-1_33.dll, or
boost_regex-vc80-gd-1_33.dll
The returned name also has the target type specific prefix and suffix which
puts it in a ready form to use as the value from a custom tag rule.
"""
if __debug__:
from ..build.property_set import PropertySet
assert is_iterable_typed(format, basestring)
assert isinstance(name, basestring)
assert isinstance(target_type, basestring)
assert isinstance(prop_set, PropertySet)
# assert(isinstance(prop_set, property_set.PropertySet))
if type.is_derived(target_type, 'LIB'):
result = "" ;
for f in format:
grist = get_grist(f)
if grist == '<base>':
result += os.path.basename(name)
elif grist == '<toolset>':
result += join_tag(get_value(f),
toolset_tag(name, target_type, prop_set))
elif grist == '<threading>':
result += join_tag(get_value(f),
threading_tag(name, target_type, prop_set))
elif grist == '<runtime>':
result += join_tag(get_value(f),
runtime_tag(name, target_type, prop_set))
elif grist.startswith('<version:'):
key = grist[len('<version:'):-1]
version = prop_set.get('<' + key + '>')
if not version:
version = key
version = __re_version.match(version)
result += join_tag(get_value(f), version[1] + '_' + version[2])
elif grist.startswith('<property:'):
key = grist[len('<property:'):-1]
property_re = re.compile('<(' + key + ')>')
p0 = None
for prop in prop_set.raw():
match = property_re.match(prop)
if match:
p0 = match[1]
break
if p0:
p = prop_set.get('<' + p0 + '>')
if p:
assert(len(p) == 1)
result += join_tag(ungrist(f), p)
else:
result += f
result = b2.build.virtual_target.add_prefix_and_suffix(
''.join(result), target_type, prop_set)
return result | python | def format_name(format, name, target_type, prop_set):
""" Given a target, as given to a custom tag rule, returns a string formatted
according to the passed format. Format is a list of properties that is
represented in the result. For each element of format the corresponding target
information is obtained and added to the result string. For all, but the
literal, the format value is taken as the as string to prepend to the output
to join the item to the rest of the result. If not given "-" is used as a
joiner.
The format options can be:
<base>[joiner]
:: The basename of the target name.
<toolset>[joiner]
:: The abbreviated toolset tag being used to build the target.
<threading>[joiner]
:: Indication of a multi-threaded build.
<runtime>[joiner]
:: Collective tag of the build runtime.
<version:/version-feature | X.Y[.Z]/>[joiner]
:: Short version tag taken from the given "version-feature"
in the build properties. Or if not present, the literal
value as the version number.
<property:/property-name/>[joiner]
:: Direct lookup of the given property-name value in the
build properties. /property-name/ is a regular expression.
e.g. <property:toolset-.*:flavor> will match every toolset.
/otherwise/
:: The literal value of the format argument.
For example this format:
boost_ <base> <toolset> <threading> <runtime> <version:boost-version>
Might return:
boost_thread-vc80-mt-gd-1_33.dll, or
boost_regex-vc80-gd-1_33.dll
The returned name also has the target type specific prefix and suffix which
puts it in a ready form to use as the value from a custom tag rule.
"""
if __debug__:
from ..build.property_set import PropertySet
assert is_iterable_typed(format, basestring)
assert isinstance(name, basestring)
assert isinstance(target_type, basestring)
assert isinstance(prop_set, PropertySet)
# assert(isinstance(prop_set, property_set.PropertySet))
if type.is_derived(target_type, 'LIB'):
result = "" ;
for f in format:
grist = get_grist(f)
if grist == '<base>':
result += os.path.basename(name)
elif grist == '<toolset>':
result += join_tag(get_value(f),
toolset_tag(name, target_type, prop_set))
elif grist == '<threading>':
result += join_tag(get_value(f),
threading_tag(name, target_type, prop_set))
elif grist == '<runtime>':
result += join_tag(get_value(f),
runtime_tag(name, target_type, prop_set))
elif grist.startswith('<version:'):
key = grist[len('<version:'):-1]
version = prop_set.get('<' + key + '>')
if not version:
version = key
version = __re_version.match(version)
result += join_tag(get_value(f), version[1] + '_' + version[2])
elif grist.startswith('<property:'):
key = grist[len('<property:'):-1]
property_re = re.compile('<(' + key + ')>')
p0 = None
for prop in prop_set.raw():
match = property_re.match(prop)
if match:
p0 = match[1]
break
if p0:
p = prop_set.get('<' + p0 + '>')
if p:
assert(len(p) == 1)
result += join_tag(ungrist(f), p)
else:
result += f
result = b2.build.virtual_target.add_prefix_and_suffix(
''.join(result), target_type, prop_set)
return result | [
"def",
"format_name",
"(",
"format",
",",
"name",
",",
"target_type",
",",
"prop_set",
")",
":",
"if",
"__debug__",
":",
"from",
".",
".",
"build",
".",
"property_set",
"import",
"PropertySet",
"assert",
"is_iterable_typed",
"(",
"format",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"target_type",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"prop_set",
",",
"PropertySet",
")",
"# assert(isinstance(prop_set, property_set.PropertySet))",
"if",
"type",
".",
"is_derived",
"(",
"target_type",
",",
"'LIB'",
")",
":",
"result",
"=",
"\"\"",
"for",
"f",
"in",
"format",
":",
"grist",
"=",
"get_grist",
"(",
"f",
")",
"if",
"grist",
"==",
"'<base>'",
":",
"result",
"+=",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
"elif",
"grist",
"==",
"'<toolset>'",
":",
"result",
"+=",
"join_tag",
"(",
"get_value",
"(",
"f",
")",
",",
"toolset_tag",
"(",
"name",
",",
"target_type",
",",
"prop_set",
")",
")",
"elif",
"grist",
"==",
"'<threading>'",
":",
"result",
"+=",
"join_tag",
"(",
"get_value",
"(",
"f",
")",
",",
"threading_tag",
"(",
"name",
",",
"target_type",
",",
"prop_set",
")",
")",
"elif",
"grist",
"==",
"'<runtime>'",
":",
"result",
"+=",
"join_tag",
"(",
"get_value",
"(",
"f",
")",
",",
"runtime_tag",
"(",
"name",
",",
"target_type",
",",
"prop_set",
")",
")",
"elif",
"grist",
".",
"startswith",
"(",
"'<version:'",
")",
":",
"key",
"=",
"grist",
"[",
"len",
"(",
"'<version:'",
")",
":",
"-",
"1",
"]",
"version",
"=",
"prop_set",
".",
"get",
"(",
"'<'",
"+",
"key",
"+",
"'>'",
")",
"if",
"not",
"version",
":",
"version",
"=",
"key",
"version",
"=",
"__re_version",
".",
"match",
"(",
"version",
")",
"result",
"+=",
"join_tag",
"(",
"get_value",
"(",
"f",
")",
",",
"version",
"[",
"1",
"]",
"+",
"'_'",
"+",
"version",
"[",
"2",
"]",
")",
"elif",
"grist",
".",
"startswith",
"(",
"'<property:'",
")",
":",
"key",
"=",
"grist",
"[",
"len",
"(",
"'<property:'",
")",
":",
"-",
"1",
"]",
"property_re",
"=",
"re",
".",
"compile",
"(",
"'<('",
"+",
"key",
"+",
"')>'",
")",
"p0",
"=",
"None",
"for",
"prop",
"in",
"prop_set",
".",
"raw",
"(",
")",
":",
"match",
"=",
"property_re",
".",
"match",
"(",
"prop",
")",
"if",
"match",
":",
"p0",
"=",
"match",
"[",
"1",
"]",
"break",
"if",
"p0",
":",
"p",
"=",
"prop_set",
".",
"get",
"(",
"'<'",
"+",
"p0",
"+",
"'>'",
")",
"if",
"p",
":",
"assert",
"(",
"len",
"(",
"p",
")",
"==",
"1",
")",
"result",
"+=",
"join_tag",
"(",
"ungrist",
"(",
"f",
")",
",",
"p",
")",
"else",
":",
"result",
"+=",
"f",
"result",
"=",
"b2",
".",
"build",
".",
"virtual_target",
".",
"add_prefix_and_suffix",
"(",
"''",
".",
"join",
"(",
"result",
")",
",",
"target_type",
",",
"prop_set",
")",
"return",
"result"
] | Given a target, as given to a custom tag rule, returns a string formatted
according to the passed format. Format is a list of properties that is
represented in the result. For each element of format the corresponding target
information is obtained and added to the result string. For all, but the
literal, the format value is taken as the as string to prepend to the output
to join the item to the rest of the result. If not given "-" is used as a
joiner.
The format options can be:
<base>[joiner]
:: The basename of the target name.
<toolset>[joiner]
:: The abbreviated toolset tag being used to build the target.
<threading>[joiner]
:: Indication of a multi-threaded build.
<runtime>[joiner]
:: Collective tag of the build runtime.
<version:/version-feature | X.Y[.Z]/>[joiner]
:: Short version tag taken from the given "version-feature"
in the build properties. Or if not present, the literal
value as the version number.
<property:/property-name/>[joiner]
:: Direct lookup of the given property-name value in the
build properties. /property-name/ is a regular expression.
e.g. <property:toolset-.*:flavor> will match every toolset.
/otherwise/
:: The literal value of the format argument.
For example this format:
boost_ <base> <toolset> <threading> <runtime> <version:boost-version>
Might return:
boost_thread-vc80-mt-gd-1_33.dll, or
boost_regex-vc80-gd-1_33.dll
The returned name also has the target type specific prefix and suffix which
puts it in a ready form to use as the value from a custom tag rule. | [
"Given",
"a",
"target",
"as",
"given",
"to",
"a",
"custom",
"tag",
"rule",
"returns",
"a",
"string",
"formatted",
"according",
"to",
"the",
"passed",
"format",
".",
"Format",
"is",
"a",
"list",
"of",
"properties",
"that",
"is",
"represented",
"in",
"the",
"result",
".",
"For",
"each",
"element",
"of",
"format",
"the",
"corresponding",
"target",
"information",
"is",
"obtained",
"and",
"added",
"to",
"the",
"result",
"string",
".",
"For",
"all",
"but",
"the",
"literal",
"the",
"format",
"value",
"is",
"taken",
"as",
"the",
"as",
"string",
"to",
"prepend",
"to",
"the",
"output",
"to",
"join",
"the",
"item",
"to",
"the",
"rest",
"of",
"the",
"result",
".",
"If",
"not",
"given",
"-",
"is",
"used",
"as",
"a",
"joiner",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/common.py#L607-L697 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/common.py | Configurations.register | def register(self, id):
"""
Registers a configuration.
Returns True if the configuration has been added and False if
it already exists. Reports an error if the configuration is 'used'.
"""
assert isinstance(id, basestring)
if id in self.used_:
#FIXME
errors.error("common: the configuration '$(id)' is in use")
if id not in self.all_:
self.all_.add(id)
# Indicate that a new configuration has been added.
return True
else:
return False | python | def register(self, id):
"""
Registers a configuration.
Returns True if the configuration has been added and False if
it already exists. Reports an error if the configuration is 'used'.
"""
assert isinstance(id, basestring)
if id in self.used_:
#FIXME
errors.error("common: the configuration '$(id)' is in use")
if id not in self.all_:
self.all_.add(id)
# Indicate that a new configuration has been added.
return True
else:
return False | [
"def",
"register",
"(",
"self",
",",
"id",
")",
":",
"assert",
"isinstance",
"(",
"id",
",",
"basestring",
")",
"if",
"id",
"in",
"self",
".",
"used_",
":",
"#FIXME",
"errors",
".",
"error",
"(",
"\"common: the configuration '$(id)' is in use\"",
")",
"if",
"id",
"not",
"in",
"self",
".",
"all_",
":",
"self",
".",
"all_",
".",
"add",
"(",
"id",
")",
"# Indicate that a new configuration has been added.",
"return",
"True",
"else",
":",
"return",
"False"
] | Registers a configuration.
Returns True if the configuration has been added and False if
it already exists. Reports an error if the configuration is 'used'. | [
"Registers",
"a",
"configuration",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/common.py#L108-L126 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/common.py | Configurations.get | def get(self, id, param):
""" Returns the value of a configuration parameter. """
assert isinstance(id, basestring)
assert isinstance(param, basestring)
return self.params_.get(param, {}).get(id) | python | def get(self, id, param):
""" Returns the value of a configuration parameter. """
assert isinstance(id, basestring)
assert isinstance(param, basestring)
return self.params_.get(param, {}).get(id) | [
"def",
"get",
"(",
"self",
",",
"id",
",",
"param",
")",
":",
"assert",
"isinstance",
"(",
"id",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"param",
",",
"basestring",
")",
"return",
"self",
".",
"params_",
".",
"get",
"(",
"param",
",",
"{",
"}",
")",
".",
"get",
"(",
"id",
")"
] | Returns the value of a configuration parameter. | [
"Returns",
"the",
"value",
"of",
"a",
"configuration",
"parameter",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/common.py#L157-L161 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/common.py | Configurations.set | def set (self, id, param, value):
""" Sets the value of a configuration parameter. """
assert isinstance(id, basestring)
assert isinstance(param, basestring)
assert is_iterable_typed(value, basestring)
self.params_.setdefault(param, {})[id] = value | python | def set (self, id, param, value):
""" Sets the value of a configuration parameter. """
assert isinstance(id, basestring)
assert isinstance(param, basestring)
assert is_iterable_typed(value, basestring)
self.params_.setdefault(param, {})[id] = value | [
"def",
"set",
"(",
"self",
",",
"id",
",",
"param",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"id",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"param",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"value",
",",
"basestring",
")",
"self",
".",
"params_",
".",
"setdefault",
"(",
"param",
",",
"{",
"}",
")",
"[",
"id",
"]",
"=",
"value"
] | Sets the value of a configuration parameter. | [
"Sets",
"the",
"value",
"of",
"a",
"configuration",
"parameter",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/common.py#L163-L168 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_mxnet/_mxnet_utils.py | get_gpus_in_use | def get_gpus_in_use(max_devices=None):
"""
Like get_num_gpus_in_use, but returns a list of dictionaries with just
queried GPU information.
"""
from turicreate.util import _get_cuda_gpus
gpu_indices = get_gpu_ids_in_use(max_devices=max_devices)
gpus = _get_cuda_gpus()
return [gpus[index] for index in gpu_indices] | python | def get_gpus_in_use(max_devices=None):
"""
Like get_num_gpus_in_use, but returns a list of dictionaries with just
queried GPU information.
"""
from turicreate.util import _get_cuda_gpus
gpu_indices = get_gpu_ids_in_use(max_devices=max_devices)
gpus = _get_cuda_gpus()
return [gpus[index] for index in gpu_indices] | [
"def",
"get_gpus_in_use",
"(",
"max_devices",
"=",
"None",
")",
":",
"from",
"turicreate",
".",
"util",
"import",
"_get_cuda_gpus",
"gpu_indices",
"=",
"get_gpu_ids_in_use",
"(",
"max_devices",
"=",
"max_devices",
")",
"gpus",
"=",
"_get_cuda_gpus",
"(",
")",
"return",
"[",
"gpus",
"[",
"index",
"]",
"for",
"index",
"in",
"gpu_indices",
"]"
] | Like get_num_gpus_in_use, but returns a list of dictionaries with just
queried GPU information. | [
"Like",
"get_num_gpus_in_use",
"but",
"returns",
"a",
"list",
"of",
"dictionaries",
"with",
"just",
"queried",
"GPU",
"information",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mxnet/_mxnet_utils.py#L100-L108 | train |
apple/turicreate | src/unity/python/turicreate/_sys_util.py | make_unity_server_env | def make_unity_server_env():
"""
Returns the environment for unity_server.
The environment is necessary to start the unity_server
by setting the proper environments for shared libraries,
hadoop classpath, and module search paths for python lambda workers.
The environment has 3 components:
1. CLASSPATH, contains hadoop class path
2. __GL_PYTHON_EXECUTABLE__, path to the python executable
3. __GL_PYLAMBDA_SCRIPT__, path to the lambda worker executable
4. __GL_SYS_PATH__: contains the python sys.path of the interpreter
"""
env = os.environ.copy()
# Add hadoop class path
classpath = get_hadoop_class_path()
if ("CLASSPATH" in env):
env["CLASSPATH"] = env['CLASSPATH'] + (os.path.pathsep + classpath if classpath != '' else '')
else:
env["CLASSPATH"] = classpath
# Add python syspath
env['__GL_SYS_PATH__'] = (os.path.pathsep).join(sys.path + [os.getcwd()])
# Add the python executable to the runtime config
env['__GL_PYTHON_EXECUTABLE__'] = os.path.abspath(sys.executable)
# Add the pylambda execution script to the runtime config
env['__GL_PYLAMBDA_SCRIPT__'] = os.path.abspath(_pylambda_worker.__file__)
#### Remove PYTHONEXECUTABLE ####
# Anaconda overwrites this environment variable
# which forces all python sub-processes to use the same binary.
# When using virtualenv with ipython (which is outside virtualenv),
# all subprocess launched under unity_server will use the
# conda binary outside of virtualenv, which lacks the access
# to all packages installed inside virtualenv.
if 'PYTHONEXECUTABLE' in env:
del env['PYTHONEXECUTABLE']
# Set mxnet envvars
if 'MXNET_CPU_WORKER_NTHREADS' not in env:
from multiprocessing import cpu_count
num_cpus = int(env.get('OMP_NUM_THREADS', cpu_count()))
if sys.platform == 'darwin':
num_workers = num_cpus
else:
# On Linux, BLAS doesn't seem to tolerate larger numbers of workers.
num_workers = min(2, num_cpus)
env['MXNET_CPU_WORKER_NTHREADS'] = str(num_workers)
## set local to be c standard so that unity_server will run ##
env['LC_ALL']='C'
# add certificate file
if 'TURI_FILEIO_ALTERNATIVE_SSL_CERT_FILE' not in env and \
'TURI_FILEIO_ALTERNATIVE_SSL_CERT_DIR' not in env:
try:
import certifi
env['TURI_FILEIO_ALTERNATIVE_SSL_CERT_FILE'] = certifi.where()
env['TURI_FILEIO_ALTERNATIVE_SSL_CERT_DIR'] = ""
except:
pass
return env | python | def make_unity_server_env():
"""
Returns the environment for unity_server.
The environment is necessary to start the unity_server
by setting the proper environments for shared libraries,
hadoop classpath, and module search paths for python lambda workers.
The environment has 3 components:
1. CLASSPATH, contains hadoop class path
2. __GL_PYTHON_EXECUTABLE__, path to the python executable
3. __GL_PYLAMBDA_SCRIPT__, path to the lambda worker executable
4. __GL_SYS_PATH__: contains the python sys.path of the interpreter
"""
env = os.environ.copy()
# Add hadoop class path
classpath = get_hadoop_class_path()
if ("CLASSPATH" in env):
env["CLASSPATH"] = env['CLASSPATH'] + (os.path.pathsep + classpath if classpath != '' else '')
else:
env["CLASSPATH"] = classpath
# Add python syspath
env['__GL_SYS_PATH__'] = (os.path.pathsep).join(sys.path + [os.getcwd()])
# Add the python executable to the runtime config
env['__GL_PYTHON_EXECUTABLE__'] = os.path.abspath(sys.executable)
# Add the pylambda execution script to the runtime config
env['__GL_PYLAMBDA_SCRIPT__'] = os.path.abspath(_pylambda_worker.__file__)
#### Remove PYTHONEXECUTABLE ####
# Anaconda overwrites this environment variable
# which forces all python sub-processes to use the same binary.
# When using virtualenv with ipython (which is outside virtualenv),
# all subprocess launched under unity_server will use the
# conda binary outside of virtualenv, which lacks the access
# to all packages installed inside virtualenv.
if 'PYTHONEXECUTABLE' in env:
del env['PYTHONEXECUTABLE']
# Set mxnet envvars
if 'MXNET_CPU_WORKER_NTHREADS' not in env:
from multiprocessing import cpu_count
num_cpus = int(env.get('OMP_NUM_THREADS', cpu_count()))
if sys.platform == 'darwin':
num_workers = num_cpus
else:
# On Linux, BLAS doesn't seem to tolerate larger numbers of workers.
num_workers = min(2, num_cpus)
env['MXNET_CPU_WORKER_NTHREADS'] = str(num_workers)
## set local to be c standard so that unity_server will run ##
env['LC_ALL']='C'
# add certificate file
if 'TURI_FILEIO_ALTERNATIVE_SSL_CERT_FILE' not in env and \
'TURI_FILEIO_ALTERNATIVE_SSL_CERT_DIR' not in env:
try:
import certifi
env['TURI_FILEIO_ALTERNATIVE_SSL_CERT_FILE'] = certifi.where()
env['TURI_FILEIO_ALTERNATIVE_SSL_CERT_DIR'] = ""
except:
pass
return env | [
"def",
"make_unity_server_env",
"(",
")",
":",
"env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"# Add hadoop class path",
"classpath",
"=",
"get_hadoop_class_path",
"(",
")",
"if",
"(",
"\"CLASSPATH\"",
"in",
"env",
")",
":",
"env",
"[",
"\"CLASSPATH\"",
"]",
"=",
"env",
"[",
"'CLASSPATH'",
"]",
"+",
"(",
"os",
".",
"path",
".",
"pathsep",
"+",
"classpath",
"if",
"classpath",
"!=",
"''",
"else",
"''",
")",
"else",
":",
"env",
"[",
"\"CLASSPATH\"",
"]",
"=",
"classpath",
"# Add python syspath",
"env",
"[",
"'__GL_SYS_PATH__'",
"]",
"=",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
".",
"join",
"(",
"sys",
".",
"path",
"+",
"[",
"os",
".",
"getcwd",
"(",
")",
"]",
")",
"# Add the python executable to the runtime config",
"env",
"[",
"'__GL_PYTHON_EXECUTABLE__'",
"]",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"sys",
".",
"executable",
")",
"# Add the pylambda execution script to the runtime config",
"env",
"[",
"'__GL_PYLAMBDA_SCRIPT__'",
"]",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"_pylambda_worker",
".",
"__file__",
")",
"#### Remove PYTHONEXECUTABLE ####",
"# Anaconda overwrites this environment variable",
"# which forces all python sub-processes to use the same binary.",
"# When using virtualenv with ipython (which is outside virtualenv),",
"# all subprocess launched under unity_server will use the",
"# conda binary outside of virtualenv, which lacks the access",
"# to all packages installed inside virtualenv.",
"if",
"'PYTHONEXECUTABLE'",
"in",
"env",
":",
"del",
"env",
"[",
"'PYTHONEXECUTABLE'",
"]",
"# Set mxnet envvars",
"if",
"'MXNET_CPU_WORKER_NTHREADS'",
"not",
"in",
"env",
":",
"from",
"multiprocessing",
"import",
"cpu_count",
"num_cpus",
"=",
"int",
"(",
"env",
".",
"get",
"(",
"'OMP_NUM_THREADS'",
",",
"cpu_count",
"(",
")",
")",
")",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"num_workers",
"=",
"num_cpus",
"else",
":",
"# On Linux, BLAS doesn't seem to tolerate larger numbers of workers.",
"num_workers",
"=",
"min",
"(",
"2",
",",
"num_cpus",
")",
"env",
"[",
"'MXNET_CPU_WORKER_NTHREADS'",
"]",
"=",
"str",
"(",
"num_workers",
")",
"## set local to be c standard so that unity_server will run ##",
"env",
"[",
"'LC_ALL'",
"]",
"=",
"'C'",
"# add certificate file",
"if",
"'TURI_FILEIO_ALTERNATIVE_SSL_CERT_FILE'",
"not",
"in",
"env",
"and",
"'TURI_FILEIO_ALTERNATIVE_SSL_CERT_DIR'",
"not",
"in",
"env",
":",
"try",
":",
"import",
"certifi",
"env",
"[",
"'TURI_FILEIO_ALTERNATIVE_SSL_CERT_FILE'",
"]",
"=",
"certifi",
".",
"where",
"(",
")",
"env",
"[",
"'TURI_FILEIO_ALTERNATIVE_SSL_CERT_DIR'",
"]",
"=",
"\"\"",
"except",
":",
"pass",
"return",
"env"
] | Returns the environment for unity_server.
The environment is necessary to start the unity_server
by setting the proper environments for shared libraries,
hadoop classpath, and module search paths for python lambda workers.
The environment has 3 components:
1. CLASSPATH, contains hadoop class path
2. __GL_PYTHON_EXECUTABLE__, path to the python executable
3. __GL_PYLAMBDA_SCRIPT__, path to the lambda worker executable
4. __GL_SYS_PATH__: contains the python sys.path of the interpreter | [
"Returns",
"the",
"environment",
"for",
"unity_server",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L22-L86 | train |
apple/turicreate | src/unity/python/turicreate/_sys_util.py | set_windows_dll_path | def set_windows_dll_path():
"""
Sets the dll load path so that things are resolved correctly.
"""
lib_path = os.path.dirname(os.path.abspath(_pylambda_worker.__file__))
lib_path = os.path.abspath(os.path.join(lib_path, os.pardir))
def errcheck_bool(result, func, args):
if not result:
last_error = ctypes.get_last_error()
if last_error != 0:
raise ctypes.WinError(last_error)
else:
raise OSError
return args
# Also need to set the dll loading directory to the main
# folder so windows attempts to load all DLLs from this
# directory.
import ctypes.wintypes as wintypes
try:
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
kernel32.SetDllDirectoryW.errcheck = errcheck_bool
kernel32.SetDllDirectoryW.argtypes = (wintypes.LPCWSTR,)
kernel32.SetDllDirectoryW(lib_path)
except Exception as e:
logging.getLogger(__name__).warning(
"Error setting DLL load orders: %s (things should still work)." % str(e)) | python | def set_windows_dll_path():
"""
Sets the dll load path so that things are resolved correctly.
"""
lib_path = os.path.dirname(os.path.abspath(_pylambda_worker.__file__))
lib_path = os.path.abspath(os.path.join(lib_path, os.pardir))
def errcheck_bool(result, func, args):
if not result:
last_error = ctypes.get_last_error()
if last_error != 0:
raise ctypes.WinError(last_error)
else:
raise OSError
return args
# Also need to set the dll loading directory to the main
# folder so windows attempts to load all DLLs from this
# directory.
import ctypes.wintypes as wintypes
try:
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
kernel32.SetDllDirectoryW.errcheck = errcheck_bool
kernel32.SetDllDirectoryW.argtypes = (wintypes.LPCWSTR,)
kernel32.SetDllDirectoryW(lib_path)
except Exception as e:
logging.getLogger(__name__).warning(
"Error setting DLL load orders: %s (things should still work)." % str(e)) | [
"def",
"set_windows_dll_path",
"(",
")",
":",
"lib_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"_pylambda_worker",
".",
"__file__",
")",
")",
"lib_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"lib_path",
",",
"os",
".",
"pardir",
")",
")",
"def",
"errcheck_bool",
"(",
"result",
",",
"func",
",",
"args",
")",
":",
"if",
"not",
"result",
":",
"last_error",
"=",
"ctypes",
".",
"get_last_error",
"(",
")",
"if",
"last_error",
"!=",
"0",
":",
"raise",
"ctypes",
".",
"WinError",
"(",
"last_error",
")",
"else",
":",
"raise",
"OSError",
"return",
"args",
"# Also need to set the dll loading directory to the main",
"# folder so windows attempts to load all DLLs from this",
"# directory.",
"import",
"ctypes",
".",
"wintypes",
"as",
"wintypes",
"try",
":",
"kernel32",
"=",
"ctypes",
".",
"WinDLL",
"(",
"'kernel32'",
",",
"use_last_error",
"=",
"True",
")",
"kernel32",
".",
"SetDllDirectoryW",
".",
"errcheck",
"=",
"errcheck_bool",
"kernel32",
".",
"SetDllDirectoryW",
".",
"argtypes",
"=",
"(",
"wintypes",
".",
"LPCWSTR",
",",
")",
"kernel32",
".",
"SetDllDirectoryW",
"(",
"lib_path",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"warning",
"(",
"\"Error setting DLL load orders: %s (things should still work).\"",
"%",
"str",
"(",
"e",
")",
")"
] | Sets the dll load path so that things are resolved correctly. | [
"Sets",
"the",
"dll",
"load",
"path",
"so",
"that",
"things",
"are",
"resolved",
"correctly",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L88-L117 | train |
apple/turicreate | src/unity/python/turicreate/_sys_util.py | dump_directory_structure | def dump_directory_structure(out = sys.stdout):
"""
Dumps a detailed report of the turicreate/sframe directory structure
and files, along with the output of os.lstat for each. This is useful
for debugging purposes.
"""
"Dumping Installation Directory Structure for Debugging: "
import sys, os
from os.path import split, abspath, join
from itertools import chain
main_dir = split(abspath(sys.modules[__name__].__file__))[0]
visited_files = []
def on_error(err):
visited_files.append( (" ERROR", str(err)) )
for path, dirs, files in os.walk(main_dir, onerror = on_error):
for fn in chain(files, dirs):
name = join(path, fn)
try:
visited_files.append( (name, repr(os.lstat(name))) )
except:
visited_files.append( (name, "ERROR calling os.lstat.") )
def strip_name(n):
if n[:len(main_dir)] == main_dir:
return "<root>/" + n[len(main_dir):]
else:
return n
out.write("\n".join( (" %s: %s" % (strip_name(name), stats))
for name, stats in sorted(visited_files)))
out.flush() | python | def dump_directory_structure(out = sys.stdout):
"""
Dumps a detailed report of the turicreate/sframe directory structure
and files, along with the output of os.lstat for each. This is useful
for debugging purposes.
"""
"Dumping Installation Directory Structure for Debugging: "
import sys, os
from os.path import split, abspath, join
from itertools import chain
main_dir = split(abspath(sys.modules[__name__].__file__))[0]
visited_files = []
def on_error(err):
visited_files.append( (" ERROR", str(err)) )
for path, dirs, files in os.walk(main_dir, onerror = on_error):
for fn in chain(files, dirs):
name = join(path, fn)
try:
visited_files.append( (name, repr(os.lstat(name))) )
except:
visited_files.append( (name, "ERROR calling os.lstat.") )
def strip_name(n):
if n[:len(main_dir)] == main_dir:
return "<root>/" + n[len(main_dir):]
else:
return n
out.write("\n".join( (" %s: %s" % (strip_name(name), stats))
for name, stats in sorted(visited_files)))
out.flush() | [
"def",
"dump_directory_structure",
"(",
"out",
"=",
"sys",
".",
"stdout",
")",
":",
"\"Dumping Installation Directory Structure for Debugging: \"",
"import",
"sys",
",",
"os",
"from",
"os",
".",
"path",
"import",
"split",
",",
"abspath",
",",
"join",
"from",
"itertools",
"import",
"chain",
"main_dir",
"=",
"split",
"(",
"abspath",
"(",
"sys",
".",
"modules",
"[",
"__name__",
"]",
".",
"__file__",
")",
")",
"[",
"0",
"]",
"visited_files",
"=",
"[",
"]",
"def",
"on_error",
"(",
"err",
")",
":",
"visited_files",
".",
"append",
"(",
"(",
"\" ERROR\"",
",",
"str",
"(",
"err",
")",
")",
")",
"for",
"path",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"main_dir",
",",
"onerror",
"=",
"on_error",
")",
":",
"for",
"fn",
"in",
"chain",
"(",
"files",
",",
"dirs",
")",
":",
"name",
"=",
"join",
"(",
"path",
",",
"fn",
")",
"try",
":",
"visited_files",
".",
"append",
"(",
"(",
"name",
",",
"repr",
"(",
"os",
".",
"lstat",
"(",
"name",
")",
")",
")",
")",
"except",
":",
"visited_files",
".",
"append",
"(",
"(",
"name",
",",
"\"ERROR calling os.lstat.\"",
")",
")",
"def",
"strip_name",
"(",
"n",
")",
":",
"if",
"n",
"[",
":",
"len",
"(",
"main_dir",
")",
"]",
"==",
"main_dir",
":",
"return",
"\"<root>/\"",
"+",
"n",
"[",
"len",
"(",
"main_dir",
")",
":",
"]",
"else",
":",
"return",
"n",
"out",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"(",
"\" %s: %s\"",
"%",
"(",
"strip_name",
"(",
"name",
")",
",",
"stats",
")",
")",
"for",
"name",
",",
"stats",
"in",
"sorted",
"(",
"visited_files",
")",
")",
")",
"out",
".",
"flush",
"(",
")"
] | Dumps a detailed report of the turicreate/sframe directory structure
and files, along with the output of os.lstat for each. This is useful
for debugging purposes. | [
"Dumps",
"a",
"detailed",
"report",
"of",
"the",
"turicreate",
"/",
"sframe",
"directory",
"structure",
"and",
"files",
"along",
"with",
"the",
"output",
"of",
"os",
".",
"lstat",
"for",
"each",
".",
"This",
"is",
"useful",
"for",
"debugging",
"purposes",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L366-L402 | train |
apple/turicreate | src/unity/python/turicreate/_sys_util.py | _get_expanded_classpath | def _get_expanded_classpath(classpath):
"""
Take a classpath of the form:
/etc/hadoop/conf:/usr/lib/hadoop/lib/*:/usr/lib/hadoop/.//*: ...
and return it expanded to all the JARs (and nothing else):
/etc/hadoop/conf:/usr/lib/hadoop/lib/netty-3.6.2.Final.jar:/usr/lib/hadoop/lib/jaxb-api-2.2.2.jar: ...
mentioned in the path
"""
if classpath is None or classpath == '':
return ''
# so this set comprehension takes paths that end with * to be globbed to find the jars, and then
# recombined back into a colon separated list of jar paths, removing dupes and using full file paths
jars = (os.path.pathsep).join((os.path.pathsep).join([os.path.abspath(jarpath) for jarpath in _glob.glob(path)])
for path in classpath.split(os.path.pathsep))
logging.getLogger(__name__).debug('classpath being used: %s' % jars)
return jars | python | def _get_expanded_classpath(classpath):
"""
Take a classpath of the form:
/etc/hadoop/conf:/usr/lib/hadoop/lib/*:/usr/lib/hadoop/.//*: ...
and return it expanded to all the JARs (and nothing else):
/etc/hadoop/conf:/usr/lib/hadoop/lib/netty-3.6.2.Final.jar:/usr/lib/hadoop/lib/jaxb-api-2.2.2.jar: ...
mentioned in the path
"""
if classpath is None or classpath == '':
return ''
# so this set comprehension takes paths that end with * to be globbed to find the jars, and then
# recombined back into a colon separated list of jar paths, removing dupes and using full file paths
jars = (os.path.pathsep).join((os.path.pathsep).join([os.path.abspath(jarpath) for jarpath in _glob.glob(path)])
for path in classpath.split(os.path.pathsep))
logging.getLogger(__name__).debug('classpath being used: %s' % jars)
return jars | [
"def",
"_get_expanded_classpath",
"(",
"classpath",
")",
":",
"if",
"classpath",
"is",
"None",
"or",
"classpath",
"==",
"''",
":",
"return",
"''",
"# so this set comprehension takes paths that end with * to be globbed to find the jars, and then",
"# recombined back into a colon separated list of jar paths, removing dupes and using full file paths",
"jars",
"=",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
".",
"join",
"(",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
".",
"join",
"(",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"jarpath",
")",
"for",
"jarpath",
"in",
"_glob",
".",
"glob",
"(",
"path",
")",
"]",
")",
"for",
"path",
"in",
"classpath",
".",
"split",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
")",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"debug",
"(",
"'classpath being used: %s'",
"%",
"jars",
")",
"return",
"jars"
] | Take a classpath of the form:
/etc/hadoop/conf:/usr/lib/hadoop/lib/*:/usr/lib/hadoop/.//*: ...
and return it expanded to all the JARs (and nothing else):
/etc/hadoop/conf:/usr/lib/hadoop/lib/netty-3.6.2.Final.jar:/usr/lib/hadoop/lib/jaxb-api-2.2.2.jar: ...
mentioned in the path | [
"Take",
"a",
"classpath",
"of",
"the",
"form",
":",
"/",
"etc",
"/",
"hadoop",
"/",
"conf",
":",
"/",
"usr",
"/",
"lib",
"/",
"hadoop",
"/",
"lib",
"/",
"*",
":",
"/",
"usr",
"/",
"lib",
"/",
"hadoop",
"/",
".",
"//",
"*",
":",
"..."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L432-L450 | train |
apple/turicreate | src/unity/python/turicreate/_sys_util.py | get_library_name | def get_library_name():
"""
Returns either sframe or turicreate depending on which library
this file is bundled with.
"""
from os.path import split, abspath
__lib_name = split(split(abspath(sys.modules[__name__].__file__))[0])[1]
assert __lib_name in ["sframe", "turicreate"]
return __lib_name | python | def get_library_name():
"""
Returns either sframe or turicreate depending on which library
this file is bundled with.
"""
from os.path import split, abspath
__lib_name = split(split(abspath(sys.modules[__name__].__file__))[0])[1]
assert __lib_name in ["sframe", "turicreate"]
return __lib_name | [
"def",
"get_library_name",
"(",
")",
":",
"from",
"os",
".",
"path",
"import",
"split",
",",
"abspath",
"__lib_name",
"=",
"split",
"(",
"split",
"(",
"abspath",
"(",
"sys",
".",
"modules",
"[",
"__name__",
"]",
".",
"__file__",
")",
")",
"[",
"0",
"]",
")",
"[",
"1",
"]",
"assert",
"__lib_name",
"in",
"[",
"\"sframe\"",
",",
"\"turicreate\"",
"]",
"return",
"__lib_name"
] | Returns either sframe or turicreate depending on which library
this file is bundled with. | [
"Returns",
"either",
"sframe",
"or",
"turicreate",
"depending",
"on",
"which",
"library",
"this",
"file",
"is",
"bundled",
"with",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L452-L463 | train |
apple/turicreate | src/unity/python/turicreate/_sys_util.py | get_config_file | def get_config_file():
"""
Returns the file name of the config file from which the environment
variables are written.
"""
import os
from os.path import abspath, expanduser, join, exists
__lib_name = get_library_name()
assert __lib_name in ["sframe", "turicreate"]
__default_config_path = join(expanduser("~"), ".%s" % __lib_name, "config")
if "TURI_CONFIG_FILE" in os.environ:
__default_config_path = abspath(expanduser(os.environ["TURI_CONFIG_FILE"]))
if not exists(__default_config_path):
print(("WARNING: Config file specified in environment variable "
"'TURI_CONFIG_FILE' as "
"'%s', but this path does not exist.") % __default_config_path)
return __default_config_path | python | def get_config_file():
"""
Returns the file name of the config file from which the environment
variables are written.
"""
import os
from os.path import abspath, expanduser, join, exists
__lib_name = get_library_name()
assert __lib_name in ["sframe", "turicreate"]
__default_config_path = join(expanduser("~"), ".%s" % __lib_name, "config")
if "TURI_CONFIG_FILE" in os.environ:
__default_config_path = abspath(expanduser(os.environ["TURI_CONFIG_FILE"]))
if not exists(__default_config_path):
print(("WARNING: Config file specified in environment variable "
"'TURI_CONFIG_FILE' as "
"'%s', but this path does not exist.") % __default_config_path)
return __default_config_path | [
"def",
"get_config_file",
"(",
")",
":",
"import",
"os",
"from",
"os",
".",
"path",
"import",
"abspath",
",",
"expanduser",
",",
"join",
",",
"exists",
"__lib_name",
"=",
"get_library_name",
"(",
")",
"assert",
"__lib_name",
"in",
"[",
"\"sframe\"",
",",
"\"turicreate\"",
"]",
"__default_config_path",
"=",
"join",
"(",
"expanduser",
"(",
"\"~\"",
")",
",",
"\".%s\"",
"%",
"__lib_name",
",",
"\"config\"",
")",
"if",
"\"TURI_CONFIG_FILE\"",
"in",
"os",
".",
"environ",
":",
"__default_config_path",
"=",
"abspath",
"(",
"expanduser",
"(",
"os",
".",
"environ",
"[",
"\"TURI_CONFIG_FILE\"",
"]",
")",
")",
"if",
"not",
"exists",
"(",
"__default_config_path",
")",
":",
"print",
"(",
"(",
"\"WARNING: Config file specified in environment variable \"",
"\"'TURI_CONFIG_FILE' as \"",
"\"'%s', but this path does not exist.\"",
")",
"%",
"__default_config_path",
")",
"return",
"__default_config_path"
] | Returns the file name of the config file from which the environment
variables are written. | [
"Returns",
"the",
"file",
"name",
"of",
"the",
"config",
"file",
"from",
"which",
"the",
"environment",
"variables",
"are",
"written",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L466-L488 | train |
apple/turicreate | src/unity/python/turicreate/_sys_util.py | setup_environment_from_config_file | def setup_environment_from_config_file():
"""
Imports the environmental configuration settings from the
config file, if present, and sets the environment
variables to test it.
"""
from os.path import exists
config_file = get_config_file()
if not exists(config_file):
return
try:
config = _ConfigParser.SafeConfigParser()
config.read(config_file)
__section = "Environment"
if config.has_section(__section):
items = config.items(__section)
for k, v in items:
try:
os.environ[k.upper()] = v
except Exception as e:
print(("WARNING: Error setting environment variable "
"'%s = %s' from config file '%s': %s.")
% (k, str(v), config_file, str(e)) )
except Exception as e:
print("WARNING: Error reading config file '%s': %s." % (config_file, str(e))) | python | def setup_environment_from_config_file():
"""
Imports the environmental configuration settings from the
config file, if present, and sets the environment
variables to test it.
"""
from os.path import exists
config_file = get_config_file()
if not exists(config_file):
return
try:
config = _ConfigParser.SafeConfigParser()
config.read(config_file)
__section = "Environment"
if config.has_section(__section):
items = config.items(__section)
for k, v in items:
try:
os.environ[k.upper()] = v
except Exception as e:
print(("WARNING: Error setting environment variable "
"'%s = %s' from config file '%s': %s.")
% (k, str(v), config_file, str(e)) )
except Exception as e:
print("WARNING: Error reading config file '%s': %s." % (config_file, str(e))) | [
"def",
"setup_environment_from_config_file",
"(",
")",
":",
"from",
"os",
".",
"path",
"import",
"exists",
"config_file",
"=",
"get_config_file",
"(",
")",
"if",
"not",
"exists",
"(",
"config_file",
")",
":",
"return",
"try",
":",
"config",
"=",
"_ConfigParser",
".",
"SafeConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"config_file",
")",
"__section",
"=",
"\"Environment\"",
"if",
"config",
".",
"has_section",
"(",
"__section",
")",
":",
"items",
"=",
"config",
".",
"items",
"(",
"__section",
")",
"for",
"k",
",",
"v",
"in",
"items",
":",
"try",
":",
"os",
".",
"environ",
"[",
"k",
".",
"upper",
"(",
")",
"]",
"=",
"v",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"(",
"\"WARNING: Error setting environment variable \"",
"\"'%s = %s' from config file '%s': %s.\"",
")",
"%",
"(",
"k",
",",
"str",
"(",
"v",
")",
",",
"config_file",
",",
"str",
"(",
"e",
")",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"WARNING: Error reading config file '%s': %s.\"",
"%",
"(",
"config_file",
",",
"str",
"(",
"e",
")",
")",
")"
] | Imports the environmental configuration settings from the
config file, if present, and sets the environment
variables to test it. | [
"Imports",
"the",
"environmental",
"configuration",
"settings",
"from",
"the",
"config",
"file",
"if",
"present",
"and",
"sets",
"the",
"environment",
"variables",
"to",
"test",
"it",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L491-L522 | train |
apple/turicreate | src/unity/python/turicreate/_sys_util.py | write_config_file_value | def write_config_file_value(key, value):
"""
Writes an environment variable configuration to the current
config file. This will be read in on the next restart.
The config file is created if not present.
Note: The variables will not take effect until after restart.
"""
filename = get_config_file()
config = _ConfigParser.SafeConfigParser()
config.read(filename)
__section = "Environment"
if not(config.has_section(__section)):
config.add_section(__section)
config.set(__section, key, value)
with open(filename, 'w') as config_file:
config.write(config_file) | python | def write_config_file_value(key, value):
"""
Writes an environment variable configuration to the current
config file. This will be read in on the next restart.
The config file is created if not present.
Note: The variables will not take effect until after restart.
"""
filename = get_config_file()
config = _ConfigParser.SafeConfigParser()
config.read(filename)
__section = "Environment"
if not(config.has_section(__section)):
config.add_section(__section)
config.set(__section, key, value)
with open(filename, 'w') as config_file:
config.write(config_file) | [
"def",
"write_config_file_value",
"(",
"key",
",",
"value",
")",
":",
"filename",
"=",
"get_config_file",
"(",
")",
"config",
"=",
"_ConfigParser",
".",
"SafeConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"filename",
")",
"__section",
"=",
"\"Environment\"",
"if",
"not",
"(",
"config",
".",
"has_section",
"(",
"__section",
")",
")",
":",
"config",
".",
"add_section",
"(",
"__section",
")",
"config",
".",
"set",
"(",
"__section",
",",
"key",
",",
"value",
")",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"config_file",
":",
"config",
".",
"write",
"(",
"config_file",
")"
] | Writes an environment variable configuration to the current
config file. This will be read in on the next restart.
The config file is created if not present.
Note: The variables will not take effect until after restart. | [
"Writes",
"an",
"environment",
"variable",
"configuration",
"to",
"the",
"current",
"config",
"file",
".",
"This",
"will",
"be",
"read",
"in",
"on",
"the",
"next",
"restart",
".",
"The",
"config",
"file",
"is",
"created",
"if",
"not",
"present",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_sys_util.py#L525-L547 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py | _ServiceBuilder.BuildService | def BuildService(self, cls):
"""Constructs the service class.
Args:
cls: The class that will be constructed.
"""
# CallMethod needs to operate with an instance of the Service class. This
# internal wrapper function exists only to be able to pass the service
# instance to the method that does the real CallMethod work.
def _WrapCallMethod(srvc, method_descriptor,
rpc_controller, request, callback):
return self._CallMethod(srvc, method_descriptor,
rpc_controller, request, callback)
self.cls = cls
cls.CallMethod = _WrapCallMethod
cls.GetDescriptor = staticmethod(lambda: self.descriptor)
cls.GetDescriptor.__doc__ = "Returns the service descriptor."
cls.GetRequestClass = self._GetRequestClass
cls.GetResponseClass = self._GetResponseClass
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateNonImplementedMethod(method)) | python | def BuildService(self, cls):
"""Constructs the service class.
Args:
cls: The class that will be constructed.
"""
# CallMethod needs to operate with an instance of the Service class. This
# internal wrapper function exists only to be able to pass the service
# instance to the method that does the real CallMethod work.
def _WrapCallMethod(srvc, method_descriptor,
rpc_controller, request, callback):
return self._CallMethod(srvc, method_descriptor,
rpc_controller, request, callback)
self.cls = cls
cls.CallMethod = _WrapCallMethod
cls.GetDescriptor = staticmethod(lambda: self.descriptor)
cls.GetDescriptor.__doc__ = "Returns the service descriptor."
cls.GetRequestClass = self._GetRequestClass
cls.GetResponseClass = self._GetResponseClass
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateNonImplementedMethod(method)) | [
"def",
"BuildService",
"(",
"self",
",",
"cls",
")",
":",
"# CallMethod needs to operate with an instance of the Service class. This",
"# internal wrapper function exists only to be able to pass the service",
"# instance to the method that does the real CallMethod work.",
"def",
"_WrapCallMethod",
"(",
"srvc",
",",
"method_descriptor",
",",
"rpc_controller",
",",
"request",
",",
"callback",
")",
":",
"return",
"self",
".",
"_CallMethod",
"(",
"srvc",
",",
"method_descriptor",
",",
"rpc_controller",
",",
"request",
",",
"callback",
")",
"self",
".",
"cls",
"=",
"cls",
"cls",
".",
"CallMethod",
"=",
"_WrapCallMethod",
"cls",
".",
"GetDescriptor",
"=",
"staticmethod",
"(",
"lambda",
":",
"self",
".",
"descriptor",
")",
"cls",
".",
"GetDescriptor",
".",
"__doc__",
"=",
"\"Returns the service descriptor.\"",
"cls",
".",
"GetRequestClass",
"=",
"self",
".",
"_GetRequestClass",
"cls",
".",
"GetResponseClass",
"=",
"self",
".",
"_GetResponseClass",
"for",
"method",
"in",
"self",
".",
"descriptor",
".",
"methods",
":",
"setattr",
"(",
"cls",
",",
"method",
".",
"name",
",",
"self",
".",
"_GenerateNonImplementedMethod",
"(",
"method",
")",
")"
] | Constructs the service class.
Args:
cls: The class that will be constructed. | [
"Constructs",
"the",
"service",
"class",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py#L133-L154 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py | _ServiceBuilder._CallMethod | def _CallMethod(self, srvc, method_descriptor,
rpc_controller, request, callback):
"""Calls the method described by a given method descriptor.
Args:
srvc: Instance of the service for which this method is called.
method_descriptor: Descriptor that represent the method to call.
rpc_controller: RPC controller to use for this method's execution.
request: Request protocol message.
callback: A callback to invoke after the method has completed.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'CallMethod() given method descriptor for wrong service type.')
method = getattr(srvc, method_descriptor.name)
return method(rpc_controller, request, callback) | python | def _CallMethod(self, srvc, method_descriptor,
rpc_controller, request, callback):
"""Calls the method described by a given method descriptor.
Args:
srvc: Instance of the service for which this method is called.
method_descriptor: Descriptor that represent the method to call.
rpc_controller: RPC controller to use for this method's execution.
request: Request protocol message.
callback: A callback to invoke after the method has completed.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'CallMethod() given method descriptor for wrong service type.')
method = getattr(srvc, method_descriptor.name)
return method(rpc_controller, request, callback) | [
"def",
"_CallMethod",
"(",
"self",
",",
"srvc",
",",
"method_descriptor",
",",
"rpc_controller",
",",
"request",
",",
"callback",
")",
":",
"if",
"method_descriptor",
".",
"containing_service",
"!=",
"self",
".",
"descriptor",
":",
"raise",
"RuntimeError",
"(",
"'CallMethod() given method descriptor for wrong service type.'",
")",
"method",
"=",
"getattr",
"(",
"srvc",
",",
"method_descriptor",
".",
"name",
")",
"return",
"method",
"(",
"rpc_controller",
",",
"request",
",",
"callback",
")"
] | Calls the method described by a given method descriptor.
Args:
srvc: Instance of the service for which this method is called.
method_descriptor: Descriptor that represent the method to call.
rpc_controller: RPC controller to use for this method's execution.
request: Request protocol message.
callback: A callback to invoke after the method has completed. | [
"Calls",
"the",
"method",
"described",
"by",
"a",
"given",
"method",
"descriptor",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py#L156-L171 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py | _ServiceBuilder._GetRequestClass | def _GetRequestClass(self, method_descriptor):
"""Returns the class of the request protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
request protocol message class.
Returns:
A class that represents the input protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetRequestClass() given method descriptor for wrong service type.')
return method_descriptor.input_type._concrete_class | python | def _GetRequestClass(self, method_descriptor):
"""Returns the class of the request protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
request protocol message class.
Returns:
A class that represents the input protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetRequestClass() given method descriptor for wrong service type.')
return method_descriptor.input_type._concrete_class | [
"def",
"_GetRequestClass",
"(",
"self",
",",
"method_descriptor",
")",
":",
"if",
"method_descriptor",
".",
"containing_service",
"!=",
"self",
".",
"descriptor",
":",
"raise",
"RuntimeError",
"(",
"'GetRequestClass() given method descriptor for wrong service type.'",
")",
"return",
"method_descriptor",
".",
"input_type",
".",
"_concrete_class"
] | Returns the class of the request protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
request protocol message class.
Returns:
A class that represents the input protocol message of the specified
method. | [
"Returns",
"the",
"class",
"of",
"the",
"request",
"protocol",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py#L173-L187 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py | _ServiceBuilder._GetResponseClass | def _GetResponseClass(self, method_descriptor):
"""Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class | python | def _GetResponseClass(self, method_descriptor):
"""Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class | [
"def",
"_GetResponseClass",
"(",
"self",
",",
"method_descriptor",
")",
":",
"if",
"method_descriptor",
".",
"containing_service",
"!=",
"self",
".",
"descriptor",
":",
"raise",
"RuntimeError",
"(",
"'GetResponseClass() given method descriptor for wrong service type.'",
")",
"return",
"method_descriptor",
".",
"output_type",
".",
"_concrete_class"
] | Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method. | [
"Returns",
"the",
"class",
"of",
"the",
"response",
"protocol",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py#L189-L203 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py | _ServiceBuilder._GenerateNonImplementedMethod | def _GenerateNonImplementedMethod(self, method):
"""Generates and returns a method that can be set for a service methods.
Args:
method: Descriptor of the service method for which a method is to be
generated.
Returns:
A method that can be added to the service class.
"""
return lambda inst, rpc_controller, request, callback: (
self._NonImplementedMethod(method.name, rpc_controller, callback)) | python | def _GenerateNonImplementedMethod(self, method):
"""Generates and returns a method that can be set for a service methods.
Args:
method: Descriptor of the service method for which a method is to be
generated.
Returns:
A method that can be added to the service class.
"""
return lambda inst, rpc_controller, request, callback: (
self._NonImplementedMethod(method.name, rpc_controller, callback)) | [
"def",
"_GenerateNonImplementedMethod",
"(",
"self",
",",
"method",
")",
":",
"return",
"lambda",
"inst",
",",
"rpc_controller",
",",
"request",
",",
"callback",
":",
"(",
"self",
".",
"_NonImplementedMethod",
"(",
"method",
".",
"name",
",",
"rpc_controller",
",",
"callback",
")",
")"
] | Generates and returns a method that can be set for a service methods.
Args:
method: Descriptor of the service method for which a method is to be
generated.
Returns:
A method that can be added to the service class. | [
"Generates",
"and",
"returns",
"a",
"method",
"that",
"can",
"be",
"set",
"for",
"a",
"service",
"methods",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py#L205-L216 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py | _ServiceStubBuilder.BuildServiceStub | def BuildServiceStub(self, cls):
"""Constructs the stub class.
Args:
cls: The class that will be constructed.
"""
def _ServiceStubInit(stub, rpc_channel):
stub.rpc_channel = rpc_channel
self.cls = cls
cls.__init__ = _ServiceStubInit
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateStubMethod(method)) | python | def BuildServiceStub(self, cls):
"""Constructs the stub class.
Args:
cls: The class that will be constructed.
"""
def _ServiceStubInit(stub, rpc_channel):
stub.rpc_channel = rpc_channel
self.cls = cls
cls.__init__ = _ServiceStubInit
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateStubMethod(method)) | [
"def",
"BuildServiceStub",
"(",
"self",
",",
"cls",
")",
":",
"def",
"_ServiceStubInit",
"(",
"stub",
",",
"rpc_channel",
")",
":",
"stub",
".",
"rpc_channel",
"=",
"rpc_channel",
"self",
".",
"cls",
"=",
"cls",
"cls",
".",
"__init__",
"=",
"_ServiceStubInit",
"for",
"method",
"in",
"self",
".",
"descriptor",
".",
"methods",
":",
"setattr",
"(",
"cls",
",",
"method",
".",
"name",
",",
"self",
".",
"_GenerateStubMethod",
"(",
"method",
")",
")"
] | Constructs the stub class.
Args:
cls: The class that will be constructed. | [
"Constructs",
"the",
"stub",
"class",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py#L251-L263 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py | _ServiceStubBuilder._StubMethod | def _StubMethod(self, stub, method_descriptor,
rpc_controller, request, callback):
"""The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call).
"""
return stub.rpc_channel.CallMethod(
method_descriptor, rpc_controller, request,
method_descriptor.output_type._concrete_class, callback) | python | def _StubMethod(self, stub, method_descriptor,
rpc_controller, request, callback):
"""The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call).
"""
return stub.rpc_channel.CallMethod(
method_descriptor, rpc_controller, request,
method_descriptor.output_type._concrete_class, callback) | [
"def",
"_StubMethod",
"(",
"self",
",",
"stub",
",",
"method_descriptor",
",",
"rpc_controller",
",",
"request",
",",
"callback",
")",
":",
"return",
"stub",
".",
"rpc_channel",
".",
"CallMethod",
"(",
"method_descriptor",
",",
"rpc_controller",
",",
"request",
",",
"method_descriptor",
".",
"output_type",
".",
"_concrete_class",
",",
"callback",
")"
] | The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call). | [
"The",
"body",
"of",
"all",
"service",
"methods",
"in",
"the",
"generated",
"stub",
"class",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/service_reflection.py#L269-L284 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | MessageToString | def MessageToString(message,
as_utf8=False,
as_one_line=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
use_field_number=False,
descriptor_pool=None,
indent=0):
"""Convert protobuf message to text format.
Floating point values can be formatted compactly with 15 digits of
precision (which is the most that IEEE 754 "double" can guarantee)
using float_format='.15g'. To ensure that converting to text and back to a
proto will result in an identical value, float_format='.17g' should be used.
Args:
message: The protocol buffers message.
as_utf8: Produce text output in UTF8 format.
as_one_line: Don't introduce newlines between fields.
pointy_brackets: If True, use angle brackets instead of curly braces for
nesting.
use_index_order: If True, print fields of a proto message using the order
defined in source code instead of the field number. By default, use the
field number order.
float_format: If set, use this to specify floating point number formatting
(per the "Format Specification Mini-Language"); otherwise, str() is used.
use_field_number: If True, print field numbers instead of names.
descriptor_pool: A DescriptorPool used to resolve Any types.
indent: The indent level, in terms of spaces, for pretty print.
Returns:
A string of the text formatted protocol buffer message.
"""
out = TextWriter(as_utf8)
printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets,
use_index_order, float_format, use_field_number,
descriptor_pool)
printer.PrintMessage(message)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result | python | def MessageToString(message,
as_utf8=False,
as_one_line=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
use_field_number=False,
descriptor_pool=None,
indent=0):
"""Convert protobuf message to text format.
Floating point values can be formatted compactly with 15 digits of
precision (which is the most that IEEE 754 "double" can guarantee)
using float_format='.15g'. To ensure that converting to text and back to a
proto will result in an identical value, float_format='.17g' should be used.
Args:
message: The protocol buffers message.
as_utf8: Produce text output in UTF8 format.
as_one_line: Don't introduce newlines between fields.
pointy_brackets: If True, use angle brackets instead of curly braces for
nesting.
use_index_order: If True, print fields of a proto message using the order
defined in source code instead of the field number. By default, use the
field number order.
float_format: If set, use this to specify floating point number formatting
(per the "Format Specification Mini-Language"); otherwise, str() is used.
use_field_number: If True, print field numbers instead of names.
descriptor_pool: A DescriptorPool used to resolve Any types.
indent: The indent level, in terms of spaces, for pretty print.
Returns:
A string of the text formatted protocol buffer message.
"""
out = TextWriter(as_utf8)
printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets,
use_index_order, float_format, use_field_number,
descriptor_pool)
printer.PrintMessage(message)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result | [
"def",
"MessageToString",
"(",
"message",
",",
"as_utf8",
"=",
"False",
",",
"as_one_line",
"=",
"False",
",",
"pointy_brackets",
"=",
"False",
",",
"use_index_order",
"=",
"False",
",",
"float_format",
"=",
"None",
",",
"use_field_number",
"=",
"False",
",",
"descriptor_pool",
"=",
"None",
",",
"indent",
"=",
"0",
")",
":",
"out",
"=",
"TextWriter",
"(",
"as_utf8",
")",
"printer",
"=",
"_Printer",
"(",
"out",
",",
"indent",
",",
"as_utf8",
",",
"as_one_line",
",",
"pointy_brackets",
",",
"use_index_order",
",",
"float_format",
",",
"use_field_number",
",",
"descriptor_pool",
")",
"printer",
".",
"PrintMessage",
"(",
"message",
")",
"result",
"=",
"out",
".",
"getvalue",
"(",
")",
"out",
".",
"close",
"(",
")",
"if",
"as_one_line",
":",
"return",
"result",
".",
"rstrip",
"(",
")",
"return",
"result"
] | Convert protobuf message to text format.
Floating point values can be formatted compactly with 15 digits of
precision (which is the most that IEEE 754 "double" can guarantee)
using float_format='.15g'. To ensure that converting to text and back to a
proto will result in an identical value, float_format='.17g' should be used.
Args:
message: The protocol buffers message.
as_utf8: Produce text output in UTF8 format.
as_one_line: Don't introduce newlines between fields.
pointy_brackets: If True, use angle brackets instead of curly braces for
nesting.
use_index_order: If True, print fields of a proto message using the order
defined in source code instead of the field number. By default, use the
field number order.
float_format: If set, use this to specify floating point number formatting
(per the "Format Specification Mini-Language"); otherwise, str() is used.
use_field_number: If True, print field numbers instead of names.
descriptor_pool: A DescriptorPool used to resolve Any types.
indent: The indent level, in terms of spaces, for pretty print.
Returns:
A string of the text formatted protocol buffer message. | [
"Convert",
"protobuf",
"message",
"to",
"text",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L121-L164 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | PrintFieldValue | def PrintFieldValue(field,
value,
out,
indent=0,
as_utf8=False,
as_one_line=False,
pointy_brackets=False,
use_index_order=False,
float_format=None):
"""Print a single field value (not including name)."""
printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets,
use_index_order, float_format)
printer.PrintFieldValue(field, value) | python | def PrintFieldValue(field,
value,
out,
indent=0,
as_utf8=False,
as_one_line=False,
pointy_brackets=False,
use_index_order=False,
float_format=None):
"""Print a single field value (not including name)."""
printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets,
use_index_order, float_format)
printer.PrintFieldValue(field, value) | [
"def",
"PrintFieldValue",
"(",
"field",
",",
"value",
",",
"out",
",",
"indent",
"=",
"0",
",",
"as_utf8",
"=",
"False",
",",
"as_one_line",
"=",
"False",
",",
"pointy_brackets",
"=",
"False",
",",
"use_index_order",
"=",
"False",
",",
"float_format",
"=",
"None",
")",
":",
"printer",
"=",
"_Printer",
"(",
"out",
",",
"indent",
",",
"as_utf8",
",",
"as_one_line",
",",
"pointy_brackets",
",",
"use_index_order",
",",
"float_format",
")",
"printer",
".",
"PrintFieldValue",
"(",
"field",
",",
"value",
")"
] | Print a single field value (not including name). | [
"Print",
"a",
"single",
"field",
"value",
"(",
"not",
"including",
"name",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L204-L216 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _BuildMessageFromTypeName | def _BuildMessageFromTypeName(type_name, descriptor_pool):
"""Returns a protobuf message instance.
Args:
type_name: Fully-qualified protobuf message type name string.
descriptor_pool: DescriptorPool instance.
Returns:
A Message instance of type matching type_name, or None if the a Descriptor
wasn't found matching type_name.
"""
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
database = symbol_database.Default()
try:
message_descriptor = descriptor_pool.FindMessageTypeByName(type_name)
except KeyError:
return None
message_type = database.GetPrototype(message_descriptor)
return message_type() | python | def _BuildMessageFromTypeName(type_name, descriptor_pool):
"""Returns a protobuf message instance.
Args:
type_name: Fully-qualified protobuf message type name string.
descriptor_pool: DescriptorPool instance.
Returns:
A Message instance of type matching type_name, or None if the a Descriptor
wasn't found matching type_name.
"""
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
database = symbol_database.Default()
try:
message_descriptor = descriptor_pool.FindMessageTypeByName(type_name)
except KeyError:
return None
message_type = database.GetPrototype(message_descriptor)
return message_type() | [
"def",
"_BuildMessageFromTypeName",
"(",
"type_name",
",",
"descriptor_pool",
")",
":",
"# pylint: disable=g-import-not-at-top",
"from",
"google",
".",
"protobuf",
"import",
"symbol_database",
"database",
"=",
"symbol_database",
".",
"Default",
"(",
")",
"try",
":",
"message_descriptor",
"=",
"descriptor_pool",
".",
"FindMessageTypeByName",
"(",
"type_name",
")",
"except",
"KeyError",
":",
"return",
"None",
"message_type",
"=",
"database",
".",
"GetPrototype",
"(",
"message_descriptor",
")",
"return",
"message_type",
"(",
")"
] | Returns a protobuf message instance.
Args:
type_name: Fully-qualified protobuf message type name string.
descriptor_pool: DescriptorPool instance.
Returns:
A Message instance of type matching type_name, or None if the a Descriptor
wasn't found matching type_name. | [
"Returns",
"a",
"protobuf",
"message",
"instance",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L219-L238 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | Parse | def Parse(text,
message,
allow_unknown_extension=False,
allow_field_number=False,
descriptor_pool=None):
"""Parses a text representation of a protocol message into a message.
Args:
text: Message text representation.
message: A protocol buffer message to merge into.
allow_unknown_extension: if True, skip over missing extensions and keep
parsing
allow_field_number: if True, both field number and field name are allowed.
descriptor_pool: A DescriptorPool used to resolve Any types.
Returns:
The same message passed as argument.
Raises:
ParseError: On text parsing problems.
"""
if not isinstance(text, str):
text = text.decode('utf-8')
return ParseLines(text.split('\n'),
message,
allow_unknown_extension,
allow_field_number,
descriptor_pool=descriptor_pool) | python | def Parse(text,
message,
allow_unknown_extension=False,
allow_field_number=False,
descriptor_pool=None):
"""Parses a text representation of a protocol message into a message.
Args:
text: Message text representation.
message: A protocol buffer message to merge into.
allow_unknown_extension: if True, skip over missing extensions and keep
parsing
allow_field_number: if True, both field number and field name are allowed.
descriptor_pool: A DescriptorPool used to resolve Any types.
Returns:
The same message passed as argument.
Raises:
ParseError: On text parsing problems.
"""
if not isinstance(text, str):
text = text.decode('utf-8')
return ParseLines(text.split('\n'),
message,
allow_unknown_extension,
allow_field_number,
descriptor_pool=descriptor_pool) | [
"def",
"Parse",
"(",
"text",
",",
"message",
",",
"allow_unknown_extension",
"=",
"False",
",",
"allow_field_number",
"=",
"False",
",",
"descriptor_pool",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"text",
",",
"str",
")",
":",
"text",
"=",
"text",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"ParseLines",
"(",
"text",
".",
"split",
"(",
"'\\n'",
")",
",",
"message",
",",
"allow_unknown_extension",
",",
"allow_field_number",
",",
"descriptor_pool",
"=",
"descriptor_pool",
")"
] | Parses a text representation of a protocol message into a message.
Args:
text: Message text representation.
message: A protocol buffer message to merge into.
allow_unknown_extension: if True, skip over missing extensions and keep
parsing
allow_field_number: if True, both field number and field name are allowed.
descriptor_pool: A DescriptorPool used to resolve Any types.
Returns:
The same message passed as argument.
Raises:
ParseError: On text parsing problems. | [
"Parses",
"a",
"text",
"representation",
"of",
"a",
"protocol",
"message",
"into",
"a",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L422-L449 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _SkipFieldContents | def _SkipFieldContents(tokenizer):
"""Skips over contents (value or message) of a field.
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
# Try to guess the type of this field.
# If this field is not a message, there should be a ":" between the
# field name and the field value and also the field value should not
# start with "{" or "<" which indicates the beginning of a message body.
# If there is no ":" or there is a "{" or "<" after ":", this field has
# to be a message or the input is ill-formed.
if tokenizer.TryConsume(':') and not tokenizer.LookingAt(
'{') and not tokenizer.LookingAt('<'):
_SkipFieldValue(tokenizer)
else:
_SkipFieldMessage(tokenizer) | python | def _SkipFieldContents(tokenizer):
"""Skips over contents (value or message) of a field.
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
# Try to guess the type of this field.
# If this field is not a message, there should be a ":" between the
# field name and the field value and also the field value should not
# start with "{" or "<" which indicates the beginning of a message body.
# If there is no ":" or there is a "{" or "<" after ":", this field has
# to be a message or the input is ill-formed.
if tokenizer.TryConsume(':') and not tokenizer.LookingAt(
'{') and not tokenizer.LookingAt('<'):
_SkipFieldValue(tokenizer)
else:
_SkipFieldMessage(tokenizer) | [
"def",
"_SkipFieldContents",
"(",
"tokenizer",
")",
":",
"# Try to guess the type of this field.",
"# If this field is not a message, there should be a \":\" between the",
"# field name and the field value and also the field value should not",
"# start with \"{\" or \"<\" which indicates the beginning of a message body.",
"# If there is no \":\" or there is a \"{\" or \"<\" after \":\", this field has",
"# to be a message or the input is ill-formed.",
"if",
"tokenizer",
".",
"TryConsume",
"(",
"':'",
")",
"and",
"not",
"tokenizer",
".",
"LookingAt",
"(",
"'{'",
")",
"and",
"not",
"tokenizer",
".",
"LookingAt",
"(",
"'<'",
")",
":",
"_SkipFieldValue",
"(",
"tokenizer",
")",
"else",
":",
"_SkipFieldMessage",
"(",
"tokenizer",
")"
] | Skips over contents (value or message) of a field.
Args:
tokenizer: A tokenizer to parse the field name and values. | [
"Skips",
"over",
"contents",
"(",
"value",
"or",
"message",
")",
"of",
"a",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L848-L864 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _SkipField | def _SkipField(tokenizer):
"""Skips over a complete field (name and value/message).
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
if tokenizer.TryConsume('['):
# Consume extension name.
tokenizer.ConsumeIdentifier()
while tokenizer.TryConsume('.'):
tokenizer.ConsumeIdentifier()
tokenizer.Consume(']')
else:
tokenizer.ConsumeIdentifier()
_SkipFieldContents(tokenizer)
# For historical reasons, fields may optionally be separated by commas or
# semicolons.
if not tokenizer.TryConsume(','):
tokenizer.TryConsume(';') | python | def _SkipField(tokenizer):
"""Skips over a complete field (name and value/message).
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
if tokenizer.TryConsume('['):
# Consume extension name.
tokenizer.ConsumeIdentifier()
while tokenizer.TryConsume('.'):
tokenizer.ConsumeIdentifier()
tokenizer.Consume(']')
else:
tokenizer.ConsumeIdentifier()
_SkipFieldContents(tokenizer)
# For historical reasons, fields may optionally be separated by commas or
# semicolons.
if not tokenizer.TryConsume(','):
tokenizer.TryConsume(';') | [
"def",
"_SkipField",
"(",
"tokenizer",
")",
":",
"if",
"tokenizer",
".",
"TryConsume",
"(",
"'['",
")",
":",
"# Consume extension name.",
"tokenizer",
".",
"ConsumeIdentifier",
"(",
")",
"while",
"tokenizer",
".",
"TryConsume",
"(",
"'.'",
")",
":",
"tokenizer",
".",
"ConsumeIdentifier",
"(",
")",
"tokenizer",
".",
"Consume",
"(",
"']'",
")",
"else",
":",
"tokenizer",
".",
"ConsumeIdentifier",
"(",
")",
"_SkipFieldContents",
"(",
"tokenizer",
")",
"# For historical reasons, fields may optionally be separated by commas or",
"# semicolons.",
"if",
"not",
"tokenizer",
".",
"TryConsume",
"(",
"','",
")",
":",
"tokenizer",
".",
"TryConsume",
"(",
"';'",
")"
] | Skips over a complete field (name and value/message).
Args:
tokenizer: A tokenizer to parse the field name and values. | [
"Skips",
"over",
"a",
"complete",
"field",
"(",
"name",
"and",
"value",
"/",
"message",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L867-L887 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _SkipFieldMessage | def _SkipFieldMessage(tokenizer):
"""Skips over a field message.
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
if tokenizer.TryConsume('<'):
delimiter = '>'
else:
tokenizer.Consume('{')
delimiter = '}'
while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'):
_SkipField(tokenizer)
tokenizer.Consume(delimiter) | python | def _SkipFieldMessage(tokenizer):
"""Skips over a field message.
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
if tokenizer.TryConsume('<'):
delimiter = '>'
else:
tokenizer.Consume('{')
delimiter = '}'
while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'):
_SkipField(tokenizer)
tokenizer.Consume(delimiter) | [
"def",
"_SkipFieldMessage",
"(",
"tokenizer",
")",
":",
"if",
"tokenizer",
".",
"TryConsume",
"(",
"'<'",
")",
":",
"delimiter",
"=",
"'>'",
"else",
":",
"tokenizer",
".",
"Consume",
"(",
"'{'",
")",
"delimiter",
"=",
"'}'",
"while",
"not",
"tokenizer",
".",
"LookingAt",
"(",
"'>'",
")",
"and",
"not",
"tokenizer",
".",
"LookingAt",
"(",
"'}'",
")",
":",
"_SkipField",
"(",
"tokenizer",
")",
"tokenizer",
".",
"Consume",
"(",
"delimiter",
")"
] | Skips over a field message.
Args:
tokenizer: A tokenizer to parse the field name and values. | [
"Skips",
"over",
"a",
"field",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L890-L906 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _SkipFieldValue | def _SkipFieldValue(tokenizer):
"""Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found.
"""
# String/bytes tokens can come in multiple adjacent string literals.
# If we can consume one, consume as many as we can.
if tokenizer.TryConsumeByteString():
while tokenizer.TryConsumeByteString():
pass
return
if (not tokenizer.TryConsumeIdentifier() and
not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and
not tokenizer.TryConsumeFloat()):
raise ParseError('Invalid field value: ' + tokenizer.token) | python | def _SkipFieldValue(tokenizer):
"""Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found.
"""
# String/bytes tokens can come in multiple adjacent string literals.
# If we can consume one, consume as many as we can.
if tokenizer.TryConsumeByteString():
while tokenizer.TryConsumeByteString():
pass
return
if (not tokenizer.TryConsumeIdentifier() and
not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and
not tokenizer.TryConsumeFloat()):
raise ParseError('Invalid field value: ' + tokenizer.token) | [
"def",
"_SkipFieldValue",
"(",
"tokenizer",
")",
":",
"# String/bytes tokens can come in multiple adjacent string literals.",
"# If we can consume one, consume as many as we can.",
"if",
"tokenizer",
".",
"TryConsumeByteString",
"(",
")",
":",
"while",
"tokenizer",
".",
"TryConsumeByteString",
"(",
")",
":",
"pass",
"return",
"if",
"(",
"not",
"tokenizer",
".",
"TryConsumeIdentifier",
"(",
")",
"and",
"not",
"_TryConsumeInt64",
"(",
"tokenizer",
")",
"and",
"not",
"_TryConsumeUint64",
"(",
"tokenizer",
")",
"and",
"not",
"tokenizer",
".",
"TryConsumeFloat",
"(",
")",
")",
":",
"raise",
"ParseError",
"(",
"'Invalid field value: '",
"+",
"tokenizer",
".",
"token",
")"
] | Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found. | [
"Skips",
"over",
"a",
"field",
"value",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L909-L928 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _ConsumeInteger | def _ConsumeInteger(tokenizer, is_signed=False, is_long=False):
"""Consumes an integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer parsed.
Raises:
ParseError: If an integer with given characteristics couldn't be consumed.
"""
try:
result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long)
except ValueError as e:
raise tokenizer.ParseError(str(e))
tokenizer.NextToken()
return result | python | def _ConsumeInteger(tokenizer, is_signed=False, is_long=False):
"""Consumes an integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer parsed.
Raises:
ParseError: If an integer with given characteristics couldn't be consumed.
"""
try:
result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long)
except ValueError as e:
raise tokenizer.ParseError(str(e))
tokenizer.NextToken()
return result | [
"def",
"_ConsumeInteger",
"(",
"tokenizer",
",",
"is_signed",
"=",
"False",
",",
"is_long",
"=",
"False",
")",
":",
"try",
":",
"result",
"=",
"ParseInteger",
"(",
"tokenizer",
".",
"token",
",",
"is_signed",
"=",
"is_signed",
",",
"is_long",
"=",
"is_long",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"tokenizer",
".",
"ParseError",
"(",
"str",
"(",
"e",
")",
")",
"tokenizer",
".",
"NextToken",
"(",
")",
"return",
"result"
] | Consumes an integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer parsed.
Raises:
ParseError: If an integer with given characteristics couldn't be consumed. | [
"Consumes",
"an",
"integer",
"number",
"from",
"tokenizer",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1359-L1378 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | ParseInteger | def ParseInteger(text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
result = _ParseAbstractInteger(text, is_long=is_long)
# Check if the integer is sane. Exceptions handled by callers.
checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result | python | def ParseInteger(text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
result = _ParseAbstractInteger(text, is_long=is_long)
# Check if the integer is sane. Exceptions handled by callers.
checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result | [
"def",
"ParseInteger",
"(",
"text",
",",
"is_signed",
"=",
"False",
",",
"is_long",
"=",
"False",
")",
":",
"# Do the actual parsing. Exception handling is propagated to caller.",
"result",
"=",
"_ParseAbstractInteger",
"(",
"text",
",",
"is_long",
"=",
"is_long",
")",
"# Check if the integer is sane. Exceptions handled by callers.",
"checker",
"=",
"_INTEGER_CHECKERS",
"[",
"2",
"*",
"int",
"(",
"is_long",
")",
"+",
"int",
"(",
"is_signed",
")",
"]",
"checker",
".",
"CheckValue",
"(",
"result",
")",
"return",
"result"
] | Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer. | [
"Parses",
"an",
"integer",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1381-L1401 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _ParseAbstractInteger | def _ParseAbstractInteger(text, is_long=False):
"""Parses an integer without checking size/signedness.
Args:
text: The text to parse.
is_long: True if the value should be returned as a long integer.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
try:
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if is_long:
return long(text, 0)
else:
return int(text, 0)
except ValueError:
raise ValueError('Couldn\'t parse integer: %s' % text) | python | def _ParseAbstractInteger(text, is_long=False):
"""Parses an integer without checking size/signedness.
Args:
text: The text to parse.
is_long: True if the value should be returned as a long integer.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
try:
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if is_long:
return long(text, 0)
else:
return int(text, 0)
except ValueError:
raise ValueError('Couldn\'t parse integer: %s' % text) | [
"def",
"_ParseAbstractInteger",
"(",
"text",
",",
"is_long",
"=",
"False",
")",
":",
"# Do the actual parsing. Exception handling is propagated to caller.",
"try",
":",
"# We force 32-bit values to int and 64-bit values to long to make",
"# alternate implementations where the distinction is more significant",
"# (e.g. the C++ implementation) simpler.",
"if",
"is_long",
":",
"return",
"long",
"(",
"text",
",",
"0",
")",
"else",
":",
"return",
"int",
"(",
"text",
",",
"0",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Couldn\\'t parse integer: %s'",
"%",
"text",
")"
] | Parses an integer without checking size/signedness.
Args:
text: The text to parse.
is_long: True if the value should be returned as a long integer.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer. | [
"Parses",
"an",
"integer",
"without",
"checking",
"size",
"/",
"signedness",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1404-L1427 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | ParseFloat | def ParseFloat(text):
"""Parse a floating point number.
Args:
text: Text to parse.
Returns:
The number parsed.
Raises:
ValueError: If a floating point number couldn't be parsed.
"""
try:
# Assume Python compatible syntax.
return float(text)
except ValueError:
# Check alternative spellings.
if _FLOAT_INFINITY.match(text):
if text[0] == '-':
return float('-inf')
else:
return float('inf')
elif _FLOAT_NAN.match(text):
return float('nan')
else:
# assume '1.0f' format
try:
return float(text.rstrip('f'))
except ValueError:
raise ValueError('Couldn\'t parse float: %s' % text) | python | def ParseFloat(text):
"""Parse a floating point number.
Args:
text: Text to parse.
Returns:
The number parsed.
Raises:
ValueError: If a floating point number couldn't be parsed.
"""
try:
# Assume Python compatible syntax.
return float(text)
except ValueError:
# Check alternative spellings.
if _FLOAT_INFINITY.match(text):
if text[0] == '-':
return float('-inf')
else:
return float('inf')
elif _FLOAT_NAN.match(text):
return float('nan')
else:
# assume '1.0f' format
try:
return float(text.rstrip('f'))
except ValueError:
raise ValueError('Couldn\'t parse float: %s' % text) | [
"def",
"ParseFloat",
"(",
"text",
")",
":",
"try",
":",
"# Assume Python compatible syntax.",
"return",
"float",
"(",
"text",
")",
"except",
"ValueError",
":",
"# Check alternative spellings.",
"if",
"_FLOAT_INFINITY",
".",
"match",
"(",
"text",
")",
":",
"if",
"text",
"[",
"0",
"]",
"==",
"'-'",
":",
"return",
"float",
"(",
"'-inf'",
")",
"else",
":",
"return",
"float",
"(",
"'inf'",
")",
"elif",
"_FLOAT_NAN",
".",
"match",
"(",
"text",
")",
":",
"return",
"float",
"(",
"'nan'",
")",
"else",
":",
"# assume '1.0f' format",
"try",
":",
"return",
"float",
"(",
"text",
".",
"rstrip",
"(",
"'f'",
")",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Couldn\\'t parse float: %s'",
"%",
"text",
")"
] | Parse a floating point number.
Args:
text: Text to parse.
Returns:
The number parsed.
Raises:
ValueError: If a floating point number couldn't be parsed. | [
"Parse",
"a",
"floating",
"point",
"number",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1430-L1459 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | ParseEnum | def ParseEnum(field, value):
"""Parse an enum value.
The value can be specified by a number (the enum value), or by
a string literal (the enum name).
Args:
field: Enum field descriptor.
value: String value.
Returns:
Enum value number.
Raises:
ValueError: If the enum value could not be parsed.
"""
enum_descriptor = field.enum_type
try:
number = int(value, 0)
except ValueError:
# Identifier.
enum_value = enum_descriptor.values_by_name.get(value, None)
if enum_value is None:
raise ValueError('Enum type "%s" has no value named %s.' %
(enum_descriptor.full_name, value))
else:
# Numeric value.
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise ValueError('Enum type "%s" has no value with number %d.' %
(enum_descriptor.full_name, number))
return enum_value.number | python | def ParseEnum(field, value):
"""Parse an enum value.
The value can be specified by a number (the enum value), or by
a string literal (the enum name).
Args:
field: Enum field descriptor.
value: String value.
Returns:
Enum value number.
Raises:
ValueError: If the enum value could not be parsed.
"""
enum_descriptor = field.enum_type
try:
number = int(value, 0)
except ValueError:
# Identifier.
enum_value = enum_descriptor.values_by_name.get(value, None)
if enum_value is None:
raise ValueError('Enum type "%s" has no value named %s.' %
(enum_descriptor.full_name, value))
else:
# Numeric value.
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise ValueError('Enum type "%s" has no value with number %d.' %
(enum_descriptor.full_name, number))
return enum_value.number | [
"def",
"ParseEnum",
"(",
"field",
",",
"value",
")",
":",
"enum_descriptor",
"=",
"field",
".",
"enum_type",
"try",
":",
"number",
"=",
"int",
"(",
"value",
",",
"0",
")",
"except",
"ValueError",
":",
"# Identifier.",
"enum_value",
"=",
"enum_descriptor",
".",
"values_by_name",
".",
"get",
"(",
"value",
",",
"None",
")",
"if",
"enum_value",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Enum type \"%s\" has no value named %s.'",
"%",
"(",
"enum_descriptor",
".",
"full_name",
",",
"value",
")",
")",
"else",
":",
"# Numeric value.",
"enum_value",
"=",
"enum_descriptor",
".",
"values_by_number",
".",
"get",
"(",
"number",
",",
"None",
")",
"if",
"enum_value",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Enum type \"%s\" has no value with number %d.'",
"%",
"(",
"enum_descriptor",
".",
"full_name",
",",
"number",
")",
")",
"return",
"enum_value",
".",
"number"
] | Parse an enum value.
The value can be specified by a number (the enum value), or by
a string literal (the enum name).
Args:
field: Enum field descriptor.
value: String value.
Returns:
Enum value number.
Raises:
ValueError: If the enum value could not be parsed. | [
"Parse",
"an",
"enum",
"value",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1482-L1513 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Printer._TryPrintAsAnyMessage | def _TryPrintAsAnyMessage(self, message):
"""Serializes if message is a google.protobuf.Any field."""
packed_message = _BuildMessageFromTypeName(message.TypeName(),
self.descriptor_pool)
if packed_message:
packed_message.MergeFromString(message.value)
self.out.write('%s[%s]' % (self.indent * ' ', message.type_url))
self._PrintMessageFieldValue(packed_message)
self.out.write(' ' if self.as_one_line else '\n')
return True
else:
return False | python | def _TryPrintAsAnyMessage(self, message):
"""Serializes if message is a google.protobuf.Any field."""
packed_message = _BuildMessageFromTypeName(message.TypeName(),
self.descriptor_pool)
if packed_message:
packed_message.MergeFromString(message.value)
self.out.write('%s[%s]' % (self.indent * ' ', message.type_url))
self._PrintMessageFieldValue(packed_message)
self.out.write(' ' if self.as_one_line else '\n')
return True
else:
return False | [
"def",
"_TryPrintAsAnyMessage",
"(",
"self",
",",
"message",
")",
":",
"packed_message",
"=",
"_BuildMessageFromTypeName",
"(",
"message",
".",
"TypeName",
"(",
")",
",",
"self",
".",
"descriptor_pool",
")",
"if",
"packed_message",
":",
"packed_message",
".",
"MergeFromString",
"(",
"message",
".",
"value",
")",
"self",
".",
"out",
".",
"write",
"(",
"'%s[%s]'",
"%",
"(",
"self",
".",
"indent",
"*",
"' '",
",",
"message",
".",
"type_url",
")",
")",
"self",
".",
"_PrintMessageFieldValue",
"(",
"packed_message",
")",
"self",
".",
"out",
".",
"write",
"(",
"' '",
"if",
"self",
".",
"as_one_line",
"else",
"'\\n'",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | Serializes if message is a google.protobuf.Any field. | [
"Serializes",
"if",
"message",
"is",
"a",
"google",
".",
"protobuf",
".",
"Any",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L287-L298 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Printer.PrintMessage | def PrintMessage(self, message):
"""Convert protobuf message to text format.
Args:
message: The protocol buffers message.
"""
if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and
self.descriptor_pool and self._TryPrintAsAnyMessage(message)):
return
fields = message.ListFields()
if self.use_index_order:
fields.sort(key=lambda x: x[0].index)
for field, value in fields:
if _IsMapEntry(field):
for key in sorted(value):
# This is slow for maps with submessage entires because it copies the
# entire tree. Unfortunately this would take significant refactoring
# of this file to work around.
#
# TODO(haberman): refactor and optimize if this becomes an issue.
entry_submsg = value.GetEntryClass()(key=key, value=value[key])
self.PrintField(field, entry_submsg)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
self.PrintField(field, element)
else:
self.PrintField(field, value) | python | def PrintMessage(self, message):
"""Convert protobuf message to text format.
Args:
message: The protocol buffers message.
"""
if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and
self.descriptor_pool and self._TryPrintAsAnyMessage(message)):
return
fields = message.ListFields()
if self.use_index_order:
fields.sort(key=lambda x: x[0].index)
for field, value in fields:
if _IsMapEntry(field):
for key in sorted(value):
# This is slow for maps with submessage entires because it copies the
# entire tree. Unfortunately this would take significant refactoring
# of this file to work around.
#
# TODO(haberman): refactor and optimize if this becomes an issue.
entry_submsg = value.GetEntryClass()(key=key, value=value[key])
self.PrintField(field, entry_submsg)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
self.PrintField(field, element)
else:
self.PrintField(field, value) | [
"def",
"PrintMessage",
"(",
"self",
",",
"message",
")",
":",
"if",
"(",
"message",
".",
"DESCRIPTOR",
".",
"full_name",
"==",
"_ANY_FULL_TYPE_NAME",
"and",
"self",
".",
"descriptor_pool",
"and",
"self",
".",
"_TryPrintAsAnyMessage",
"(",
"message",
")",
")",
":",
"return",
"fields",
"=",
"message",
".",
"ListFields",
"(",
")",
"if",
"self",
".",
"use_index_order",
":",
"fields",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
".",
"index",
")",
"for",
"field",
",",
"value",
"in",
"fields",
":",
"if",
"_IsMapEntry",
"(",
"field",
")",
":",
"for",
"key",
"in",
"sorted",
"(",
"value",
")",
":",
"# This is slow for maps with submessage entires because it copies the",
"# entire tree. Unfortunately this would take significant refactoring",
"# of this file to work around.",
"#",
"# TODO(haberman): refactor and optimize if this becomes an issue.",
"entry_submsg",
"=",
"value",
".",
"GetEntryClass",
"(",
")",
"(",
"key",
"=",
"key",
",",
"value",
"=",
"value",
"[",
"key",
"]",
")",
"self",
".",
"PrintField",
"(",
"field",
",",
"entry_submsg",
")",
"elif",
"field",
".",
"label",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"LABEL_REPEATED",
":",
"for",
"element",
"in",
"value",
":",
"self",
".",
"PrintField",
"(",
"field",
",",
"element",
")",
"else",
":",
"self",
".",
"PrintField",
"(",
"field",
",",
"value",
")"
] | Convert protobuf message to text format.
Args:
message: The protocol buffers message. | [
"Convert",
"protobuf",
"message",
"to",
"text",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L300-L326 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Printer.PrintField | def PrintField(self, field, value):
"""Print a single field name/value pair."""
out = self.out
out.write(' ' * self.indent)
if self.use_field_number:
out.write(str(field.number))
else:
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
# For groups, use the capitalized name.
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# The colon is optional in this case, but our cross-language golden files
# don't include it.
out.write(': ')
self.PrintFieldValue(field, value)
if self.as_one_line:
out.write(' ')
else:
out.write('\n') | python | def PrintField(self, field, value):
"""Print a single field name/value pair."""
out = self.out
out.write(' ' * self.indent)
if self.use_field_number:
out.write(str(field.number))
else:
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
# For groups, use the capitalized name.
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# The colon is optional in this case, but our cross-language golden files
# don't include it.
out.write(': ')
self.PrintFieldValue(field, value)
if self.as_one_line:
out.write(' ')
else:
out.write('\n') | [
"def",
"PrintField",
"(",
"self",
",",
"field",
",",
"value",
")",
":",
"out",
"=",
"self",
".",
"out",
"out",
".",
"write",
"(",
"' '",
"*",
"self",
".",
"indent",
")",
"if",
"self",
".",
"use_field_number",
":",
"out",
".",
"write",
"(",
"str",
"(",
"field",
".",
"number",
")",
")",
"else",
":",
"if",
"field",
".",
"is_extension",
":",
"out",
".",
"write",
"(",
"'['",
")",
"if",
"(",
"field",
".",
"containing_type",
".",
"GetOptions",
"(",
")",
".",
"message_set_wire_format",
"and",
"field",
".",
"type",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_MESSAGE",
"and",
"field",
".",
"label",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"LABEL_OPTIONAL",
")",
":",
"out",
".",
"write",
"(",
"field",
".",
"message_type",
".",
"full_name",
")",
"else",
":",
"out",
".",
"write",
"(",
"field",
".",
"full_name",
")",
"out",
".",
"write",
"(",
"']'",
")",
"elif",
"field",
".",
"type",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_GROUP",
":",
"# For groups, use the capitalized name.",
"out",
".",
"write",
"(",
"field",
".",
"message_type",
".",
"name",
")",
"else",
":",
"out",
".",
"write",
"(",
"field",
".",
"name",
")",
"if",
"field",
".",
"cpp_type",
"!=",
"descriptor",
".",
"FieldDescriptor",
".",
"CPPTYPE_MESSAGE",
":",
"# The colon is optional in this case, but our cross-language golden files",
"# don't include it.",
"out",
".",
"write",
"(",
"': '",
")",
"self",
".",
"PrintFieldValue",
"(",
"field",
",",
"value",
")",
"if",
"self",
".",
"as_one_line",
":",
"out",
".",
"write",
"(",
"' '",
")",
"else",
":",
"out",
".",
"write",
"(",
"'\\n'",
")"
] | Print a single field name/value pair. | [
"Print",
"a",
"single",
"field",
"name",
"/",
"value",
"pair",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L328-L359 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Parser.ParseFromString | def ParseFromString(self, text, message):
"""Parses a text representation of a protocol message into a message."""
if not isinstance(text, str):
text = text.decode('utf-8')
return self.ParseLines(text.split('\n'), message) | python | def ParseFromString(self, text, message):
"""Parses a text representation of a protocol message into a message."""
if not isinstance(text, str):
text = text.decode('utf-8')
return self.ParseLines(text.split('\n'), message) | [
"def",
"ParseFromString",
"(",
"self",
",",
"text",
",",
"message",
")",
":",
"if",
"not",
"isinstance",
"(",
"text",
",",
"str",
")",
":",
"text",
"=",
"text",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"self",
".",
"ParseLines",
"(",
"text",
".",
"split",
"(",
"'\\n'",
")",
",",
"message",
")"
] | Parses a text representation of a protocol message into a message. | [
"Parses",
"a",
"text",
"representation",
"of",
"a",
"protocol",
"message",
"into",
"a",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L549-L553 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Parser.ParseLines | def ParseLines(self, lines, message):
"""Parses a text representation of a protocol message into a message."""
self._allow_multiple_scalars = False
self._ParseOrMerge(lines, message)
return message | python | def ParseLines(self, lines, message):
"""Parses a text representation of a protocol message into a message."""
self._allow_multiple_scalars = False
self._ParseOrMerge(lines, message)
return message | [
"def",
"ParseLines",
"(",
"self",
",",
"lines",
",",
"message",
")",
":",
"self",
".",
"_allow_multiple_scalars",
"=",
"False",
"self",
".",
"_ParseOrMerge",
"(",
"lines",
",",
"message",
")",
"return",
"message"
] | Parses a text representation of a protocol message into a message. | [
"Parses",
"a",
"text",
"representation",
"of",
"a",
"protocol",
"message",
"into",
"a",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L555-L559 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Parser.MergeLines | def MergeLines(self, lines, message):
"""Merges a text representation of a protocol message into a message."""
self._allow_multiple_scalars = True
self._ParseOrMerge(lines, message)
return message | python | def MergeLines(self, lines, message):
"""Merges a text representation of a protocol message into a message."""
self._allow_multiple_scalars = True
self._ParseOrMerge(lines, message)
return message | [
"def",
"MergeLines",
"(",
"self",
",",
"lines",
",",
"message",
")",
":",
"self",
".",
"_allow_multiple_scalars",
"=",
"True",
"self",
".",
"_ParseOrMerge",
"(",
"lines",
",",
"message",
")",
"return",
"message"
] | Merges a text representation of a protocol message into a message. | [
"Merges",
"a",
"text",
"representation",
"of",
"a",
"protocol",
"message",
"into",
"a",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L565-L569 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Parser._ParseOrMerge | def _ParseOrMerge(self, lines, message):
"""Converts a text representation of a protocol message into a message.
Args:
lines: Lines of a message's text representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On text parsing problems.
"""
tokenizer = Tokenizer(lines)
while not tokenizer.AtEnd():
self._MergeField(tokenizer, message) | python | def _ParseOrMerge(self, lines, message):
"""Converts a text representation of a protocol message into a message.
Args:
lines: Lines of a message's text representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On text parsing problems.
"""
tokenizer = Tokenizer(lines)
while not tokenizer.AtEnd():
self._MergeField(tokenizer, message) | [
"def",
"_ParseOrMerge",
"(",
"self",
",",
"lines",
",",
"message",
")",
":",
"tokenizer",
"=",
"Tokenizer",
"(",
"lines",
")",
"while",
"not",
"tokenizer",
".",
"AtEnd",
"(",
")",
":",
"self",
".",
"_MergeField",
"(",
"tokenizer",
",",
"message",
")"
] | Converts a text representation of a protocol message into a message.
Args:
lines: Lines of a message's text representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On text parsing problems. | [
"Converts",
"a",
"text",
"representation",
"of",
"a",
"protocol",
"message",
"into",
"a",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L571-L583 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Parser._MergeField | def _MergeField(self, tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of text parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if (hasattr(message_descriptor, 'syntax') and
message_descriptor.syntax == 'proto3'):
# Proto3 doesn't represent presence so we can't test if multiple
# scalars have occurred. We have to allow them.
self._allow_multiple_scalars = True
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
# pylint: disable=protected-access
field = message.Extensions._FindExtensionByName(name)
# pylint: enable=protected-access
if not field:
if self.allow_unknown_extension:
field = None
else:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' %
(name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifierOrNumber()
if self.allow_field_number and name.isdigit():
number = ParseInteger(name, True, True)
field = message_descriptor.fields_by_number.get(number, None)
if not field and message_descriptor.is_extendable:
field = message.Extensions._FindExtensionByNumber(number)
else:
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' %
(message_descriptor.full_name, name))
if field:
if not self._allow_multiple_scalars and field.containing_oneof:
# Check if there's a different field set in this oneof.
# Note that we ignore the case if the same field was set before, and we
# apply _allow_multiple_scalars to non-scalar fields as well.
which_oneof = message.WhichOneof(field.containing_oneof.name)
if which_oneof is not None and which_oneof != field.name:
raise tokenizer.ParseErrorPreviousToken(
'Field "%s" is specified along with field "%s", another member '
'of oneof "%s" for message type "%s".' %
(field.name, which_oneof, field.containing_oneof.name,
message_descriptor.full_name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
merger = self._MergeMessageField
else:
tokenizer.Consume(':')
merger = self._MergeScalarField
if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and
tokenizer.TryConsume('[')):
# Short repeated format, e.g. "foo: [1, 2, 3]"
while True:
merger(tokenizer, message, field)
if tokenizer.TryConsume(']'):
break
tokenizer.Consume(',')
else:
merger(tokenizer, message, field)
else: # Proto field is unknown.
assert self.allow_unknown_extension
_SkipFieldContents(tokenizer)
# For historical reasons, fields may optionally be separated by commas or
# semicolons.
if not tokenizer.TryConsume(','):
tokenizer.TryConsume(';') | python | def _MergeField(self, tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of text parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if (hasattr(message_descriptor, 'syntax') and
message_descriptor.syntax == 'proto3'):
# Proto3 doesn't represent presence so we can't test if multiple
# scalars have occurred. We have to allow them.
self._allow_multiple_scalars = True
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
# pylint: disable=protected-access
field = message.Extensions._FindExtensionByName(name)
# pylint: enable=protected-access
if not field:
if self.allow_unknown_extension:
field = None
else:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' %
(name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifierOrNumber()
if self.allow_field_number and name.isdigit():
number = ParseInteger(name, True, True)
field = message_descriptor.fields_by_number.get(number, None)
if not field and message_descriptor.is_extendable:
field = message.Extensions._FindExtensionByNumber(number)
else:
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' %
(message_descriptor.full_name, name))
if field:
if not self._allow_multiple_scalars and field.containing_oneof:
# Check if there's a different field set in this oneof.
# Note that we ignore the case if the same field was set before, and we
# apply _allow_multiple_scalars to non-scalar fields as well.
which_oneof = message.WhichOneof(field.containing_oneof.name)
if which_oneof is not None and which_oneof != field.name:
raise tokenizer.ParseErrorPreviousToken(
'Field "%s" is specified along with field "%s", another member '
'of oneof "%s" for message type "%s".' %
(field.name, which_oneof, field.containing_oneof.name,
message_descriptor.full_name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
merger = self._MergeMessageField
else:
tokenizer.Consume(':')
merger = self._MergeScalarField
if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and
tokenizer.TryConsume('[')):
# Short repeated format, e.g. "foo: [1, 2, 3]"
while True:
merger(tokenizer, message, field)
if tokenizer.TryConsume(']'):
break
tokenizer.Consume(',')
else:
merger(tokenizer, message, field)
else: # Proto field is unknown.
assert self.allow_unknown_extension
_SkipFieldContents(tokenizer)
# For historical reasons, fields may optionally be separated by commas or
# semicolons.
if not tokenizer.TryConsume(','):
tokenizer.TryConsume(';') | [
"def",
"_MergeField",
"(",
"self",
",",
"tokenizer",
",",
"message",
")",
":",
"message_descriptor",
"=",
"message",
".",
"DESCRIPTOR",
"if",
"(",
"hasattr",
"(",
"message_descriptor",
",",
"'syntax'",
")",
"and",
"message_descriptor",
".",
"syntax",
"==",
"'proto3'",
")",
":",
"# Proto3 doesn't represent presence so we can't test if multiple",
"# scalars have occurred. We have to allow them.",
"self",
".",
"_allow_multiple_scalars",
"=",
"True",
"if",
"tokenizer",
".",
"TryConsume",
"(",
"'['",
")",
":",
"name",
"=",
"[",
"tokenizer",
".",
"ConsumeIdentifier",
"(",
")",
"]",
"while",
"tokenizer",
".",
"TryConsume",
"(",
"'.'",
")",
":",
"name",
".",
"append",
"(",
"tokenizer",
".",
"ConsumeIdentifier",
"(",
")",
")",
"name",
"=",
"'.'",
".",
"join",
"(",
"name",
")",
"if",
"not",
"message_descriptor",
".",
"is_extendable",
":",
"raise",
"tokenizer",
".",
"ParseErrorPreviousToken",
"(",
"'Message type \"%s\" does not have extensions.'",
"%",
"message_descriptor",
".",
"full_name",
")",
"# pylint: disable=protected-access",
"field",
"=",
"message",
".",
"Extensions",
".",
"_FindExtensionByName",
"(",
"name",
")",
"# pylint: enable=protected-access",
"if",
"not",
"field",
":",
"if",
"self",
".",
"allow_unknown_extension",
":",
"field",
"=",
"None",
"else",
":",
"raise",
"tokenizer",
".",
"ParseErrorPreviousToken",
"(",
"'Extension \"%s\" not registered.'",
"%",
"name",
")",
"elif",
"message_descriptor",
"!=",
"field",
".",
"containing_type",
":",
"raise",
"tokenizer",
".",
"ParseErrorPreviousToken",
"(",
"'Extension \"%s\" does not extend message type \"%s\".'",
"%",
"(",
"name",
",",
"message_descriptor",
".",
"full_name",
")",
")",
"tokenizer",
".",
"Consume",
"(",
"']'",
")",
"else",
":",
"name",
"=",
"tokenizer",
".",
"ConsumeIdentifierOrNumber",
"(",
")",
"if",
"self",
".",
"allow_field_number",
"and",
"name",
".",
"isdigit",
"(",
")",
":",
"number",
"=",
"ParseInteger",
"(",
"name",
",",
"True",
",",
"True",
")",
"field",
"=",
"message_descriptor",
".",
"fields_by_number",
".",
"get",
"(",
"number",
",",
"None",
")",
"if",
"not",
"field",
"and",
"message_descriptor",
".",
"is_extendable",
":",
"field",
"=",
"message",
".",
"Extensions",
".",
"_FindExtensionByNumber",
"(",
"number",
")",
"else",
":",
"field",
"=",
"message_descriptor",
".",
"fields_by_name",
".",
"get",
"(",
"name",
",",
"None",
")",
"# Group names are expected to be capitalized as they appear in the",
"# .proto file, which actually matches their type names, not their field",
"# names.",
"if",
"not",
"field",
":",
"field",
"=",
"message_descriptor",
".",
"fields_by_name",
".",
"get",
"(",
"name",
".",
"lower",
"(",
")",
",",
"None",
")",
"if",
"field",
"and",
"field",
".",
"type",
"!=",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_GROUP",
":",
"field",
"=",
"None",
"if",
"(",
"field",
"and",
"field",
".",
"type",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_GROUP",
"and",
"field",
".",
"message_type",
".",
"name",
"!=",
"name",
")",
":",
"field",
"=",
"None",
"if",
"not",
"field",
":",
"raise",
"tokenizer",
".",
"ParseErrorPreviousToken",
"(",
"'Message type \"%s\" has no field named \"%s\".'",
"%",
"(",
"message_descriptor",
".",
"full_name",
",",
"name",
")",
")",
"if",
"field",
":",
"if",
"not",
"self",
".",
"_allow_multiple_scalars",
"and",
"field",
".",
"containing_oneof",
":",
"# Check if there's a different field set in this oneof.",
"# Note that we ignore the case if the same field was set before, and we",
"# apply _allow_multiple_scalars to non-scalar fields as well.",
"which_oneof",
"=",
"message",
".",
"WhichOneof",
"(",
"field",
".",
"containing_oneof",
".",
"name",
")",
"if",
"which_oneof",
"is",
"not",
"None",
"and",
"which_oneof",
"!=",
"field",
".",
"name",
":",
"raise",
"tokenizer",
".",
"ParseErrorPreviousToken",
"(",
"'Field \"%s\" is specified along with field \"%s\", another member '",
"'of oneof \"%s\" for message type \"%s\".'",
"%",
"(",
"field",
".",
"name",
",",
"which_oneof",
",",
"field",
".",
"containing_oneof",
".",
"name",
",",
"message_descriptor",
".",
"full_name",
")",
")",
"if",
"field",
".",
"cpp_type",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"CPPTYPE_MESSAGE",
":",
"tokenizer",
".",
"TryConsume",
"(",
"':'",
")",
"merger",
"=",
"self",
".",
"_MergeMessageField",
"else",
":",
"tokenizer",
".",
"Consume",
"(",
"':'",
")",
"merger",
"=",
"self",
".",
"_MergeScalarField",
"if",
"(",
"field",
".",
"label",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"LABEL_REPEATED",
"and",
"tokenizer",
".",
"TryConsume",
"(",
"'['",
")",
")",
":",
"# Short repeated format, e.g. \"foo: [1, 2, 3]\"",
"while",
"True",
":",
"merger",
"(",
"tokenizer",
",",
"message",
",",
"field",
")",
"if",
"tokenizer",
".",
"TryConsume",
"(",
"']'",
")",
":",
"break",
"tokenizer",
".",
"Consume",
"(",
"','",
")",
"else",
":",
"merger",
"(",
"tokenizer",
",",
"message",
",",
"field",
")",
"else",
":",
"# Proto field is unknown.",
"assert",
"self",
".",
"allow_unknown_extension",
"_SkipFieldContents",
"(",
"tokenizer",
")",
"# For historical reasons, fields may optionally be separated by commas or",
"# semicolons.",
"if",
"not",
"tokenizer",
".",
"TryConsume",
"(",
"','",
")",
":",
"tokenizer",
".",
"TryConsume",
"(",
"';'",
")"
] | Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of text parsing problems. | [
"Merges",
"a",
"single",
"protocol",
"message",
"field",
"into",
"a",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L585-L693 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Parser._ConsumeAnyTypeUrl | def _ConsumeAnyTypeUrl(self, tokenizer):
"""Consumes a google.protobuf.Any type URL and returns the type name."""
# Consume "type.googleapis.com/".
tokenizer.ConsumeIdentifier()
tokenizer.Consume('.')
tokenizer.ConsumeIdentifier()
tokenizer.Consume('.')
tokenizer.ConsumeIdentifier()
tokenizer.Consume('/')
# Consume the fully-qualified type name.
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
return '.'.join(name) | python | def _ConsumeAnyTypeUrl(self, tokenizer):
"""Consumes a google.protobuf.Any type URL and returns the type name."""
# Consume "type.googleapis.com/".
tokenizer.ConsumeIdentifier()
tokenizer.Consume('.')
tokenizer.ConsumeIdentifier()
tokenizer.Consume('.')
tokenizer.ConsumeIdentifier()
tokenizer.Consume('/')
# Consume the fully-qualified type name.
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
return '.'.join(name) | [
"def",
"_ConsumeAnyTypeUrl",
"(",
"self",
",",
"tokenizer",
")",
":",
"# Consume \"type.googleapis.com/\".",
"tokenizer",
".",
"ConsumeIdentifier",
"(",
")",
"tokenizer",
".",
"Consume",
"(",
"'.'",
")",
"tokenizer",
".",
"ConsumeIdentifier",
"(",
")",
"tokenizer",
".",
"Consume",
"(",
"'.'",
")",
"tokenizer",
".",
"ConsumeIdentifier",
"(",
")",
"tokenizer",
".",
"Consume",
"(",
"'/'",
")",
"# Consume the fully-qualified type name.",
"name",
"=",
"[",
"tokenizer",
".",
"ConsumeIdentifier",
"(",
")",
"]",
"while",
"tokenizer",
".",
"TryConsume",
"(",
"'.'",
")",
":",
"name",
".",
"append",
"(",
"tokenizer",
".",
"ConsumeIdentifier",
"(",
")",
")",
"return",
"'.'",
".",
"join",
"(",
"name",
")"
] | Consumes a google.protobuf.Any type URL and returns the type name. | [
"Consumes",
"a",
"google",
".",
"protobuf",
".",
"Any",
"type",
"URL",
"and",
"returns",
"the",
"type",
"name",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L695-L708 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Parser._MergeMessageField | def _MergeMessageField(self, tokenizer, message, field):
"""Merges a single scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: The message of which field is a member.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of text parsing problems.
"""
is_map_entry = _IsMapEntry(field)
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if (field.message_type.full_name == _ANY_FULL_TYPE_NAME and
tokenizer.TryConsume('[')):
packed_type_name = self._ConsumeAnyTypeUrl(tokenizer)
tokenizer.Consume(']')
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
expanded_any_end_token = '>'
else:
tokenizer.Consume('{')
expanded_any_end_token = '}'
if not self.descriptor_pool:
raise ParseError('Descriptor pool required to parse expanded Any field')
expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name,
self.descriptor_pool)
if not expanded_any_sub_message:
raise ParseError('Type %s not found in descriptor pool' %
packed_type_name)
while not tokenizer.TryConsume(expanded_any_end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' %
(expanded_any_end_token,))
self._MergeField(tokenizer, expanded_any_sub_message)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
any_message = getattr(message, field.name).add()
else:
any_message = getattr(message, field.name)
any_message.Pack(expanded_any_sub_message)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
elif is_map_entry:
sub_message = getattr(message, field.name).GetEntryClass()()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,))
self._MergeField(tokenizer, sub_message)
if is_map_entry:
value_cpptype = field.message_type.fields_by_name['value'].cpp_type
if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
value = getattr(message, field.name)[sub_message.key]
value.MergeFrom(sub_message.value)
else:
getattr(message, field.name)[sub_message.key] = sub_message.value | python | def _MergeMessageField(self, tokenizer, message, field):
"""Merges a single scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: The message of which field is a member.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of text parsing problems.
"""
is_map_entry = _IsMapEntry(field)
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if (field.message_type.full_name == _ANY_FULL_TYPE_NAME and
tokenizer.TryConsume('[')):
packed_type_name = self._ConsumeAnyTypeUrl(tokenizer)
tokenizer.Consume(']')
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
expanded_any_end_token = '>'
else:
tokenizer.Consume('{')
expanded_any_end_token = '}'
if not self.descriptor_pool:
raise ParseError('Descriptor pool required to parse expanded Any field')
expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name,
self.descriptor_pool)
if not expanded_any_sub_message:
raise ParseError('Type %s not found in descriptor pool' %
packed_type_name)
while not tokenizer.TryConsume(expanded_any_end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' %
(expanded_any_end_token,))
self._MergeField(tokenizer, expanded_any_sub_message)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
any_message = getattr(message, field.name).add()
else:
any_message = getattr(message, field.name)
any_message.Pack(expanded_any_sub_message)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
elif is_map_entry:
sub_message = getattr(message, field.name).GetEntryClass()()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,))
self._MergeField(tokenizer, sub_message)
if is_map_entry:
value_cpptype = field.message_type.fields_by_name['value'].cpp_type
if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
value = getattr(message, field.name)[sub_message.key]
value.MergeFrom(sub_message.value)
else:
getattr(message, field.name)[sub_message.key] = sub_message.value | [
"def",
"_MergeMessageField",
"(",
"self",
",",
"tokenizer",
",",
"message",
",",
"field",
")",
":",
"is_map_entry",
"=",
"_IsMapEntry",
"(",
"field",
")",
"if",
"tokenizer",
".",
"TryConsume",
"(",
"'<'",
")",
":",
"end_token",
"=",
"'>'",
"else",
":",
"tokenizer",
".",
"Consume",
"(",
"'{'",
")",
"end_token",
"=",
"'}'",
"if",
"(",
"field",
".",
"message_type",
".",
"full_name",
"==",
"_ANY_FULL_TYPE_NAME",
"and",
"tokenizer",
".",
"TryConsume",
"(",
"'['",
")",
")",
":",
"packed_type_name",
"=",
"self",
".",
"_ConsumeAnyTypeUrl",
"(",
"tokenizer",
")",
"tokenizer",
".",
"Consume",
"(",
"']'",
")",
"tokenizer",
".",
"TryConsume",
"(",
"':'",
")",
"if",
"tokenizer",
".",
"TryConsume",
"(",
"'<'",
")",
":",
"expanded_any_end_token",
"=",
"'>'",
"else",
":",
"tokenizer",
".",
"Consume",
"(",
"'{'",
")",
"expanded_any_end_token",
"=",
"'}'",
"if",
"not",
"self",
".",
"descriptor_pool",
":",
"raise",
"ParseError",
"(",
"'Descriptor pool required to parse expanded Any field'",
")",
"expanded_any_sub_message",
"=",
"_BuildMessageFromTypeName",
"(",
"packed_type_name",
",",
"self",
".",
"descriptor_pool",
")",
"if",
"not",
"expanded_any_sub_message",
":",
"raise",
"ParseError",
"(",
"'Type %s not found in descriptor pool'",
"%",
"packed_type_name",
")",
"while",
"not",
"tokenizer",
".",
"TryConsume",
"(",
"expanded_any_end_token",
")",
":",
"if",
"tokenizer",
".",
"AtEnd",
"(",
")",
":",
"raise",
"tokenizer",
".",
"ParseErrorPreviousToken",
"(",
"'Expected \"%s\".'",
"%",
"(",
"expanded_any_end_token",
",",
")",
")",
"self",
".",
"_MergeField",
"(",
"tokenizer",
",",
"expanded_any_sub_message",
")",
"if",
"field",
".",
"label",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"LABEL_REPEATED",
":",
"any_message",
"=",
"getattr",
"(",
"message",
",",
"field",
".",
"name",
")",
".",
"add",
"(",
")",
"else",
":",
"any_message",
"=",
"getattr",
"(",
"message",
",",
"field",
".",
"name",
")",
"any_message",
".",
"Pack",
"(",
"expanded_any_sub_message",
")",
"elif",
"field",
".",
"label",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"LABEL_REPEATED",
":",
"if",
"field",
".",
"is_extension",
":",
"sub_message",
"=",
"message",
".",
"Extensions",
"[",
"field",
"]",
".",
"add",
"(",
")",
"elif",
"is_map_entry",
":",
"sub_message",
"=",
"getattr",
"(",
"message",
",",
"field",
".",
"name",
")",
".",
"GetEntryClass",
"(",
")",
"(",
")",
"else",
":",
"sub_message",
"=",
"getattr",
"(",
"message",
",",
"field",
".",
"name",
")",
".",
"add",
"(",
")",
"else",
":",
"if",
"field",
".",
"is_extension",
":",
"sub_message",
"=",
"message",
".",
"Extensions",
"[",
"field",
"]",
"else",
":",
"sub_message",
"=",
"getattr",
"(",
"message",
",",
"field",
".",
"name",
")",
"sub_message",
".",
"SetInParent",
"(",
")",
"while",
"not",
"tokenizer",
".",
"TryConsume",
"(",
"end_token",
")",
":",
"if",
"tokenizer",
".",
"AtEnd",
"(",
")",
":",
"raise",
"tokenizer",
".",
"ParseErrorPreviousToken",
"(",
"'Expected \"%s\".'",
"%",
"(",
"end_token",
",",
")",
")",
"self",
".",
"_MergeField",
"(",
"tokenizer",
",",
"sub_message",
")",
"if",
"is_map_entry",
":",
"value_cpptype",
"=",
"field",
".",
"message_type",
".",
"fields_by_name",
"[",
"'value'",
"]",
".",
"cpp_type",
"if",
"value_cpptype",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"CPPTYPE_MESSAGE",
":",
"value",
"=",
"getattr",
"(",
"message",
",",
"field",
".",
"name",
")",
"[",
"sub_message",
".",
"key",
"]",
"value",
".",
"MergeFrom",
"(",
"sub_message",
".",
"value",
")",
"else",
":",
"getattr",
"(",
"message",
",",
"field",
".",
"name",
")",
"[",
"sub_message",
".",
"key",
"]",
"=",
"sub_message",
".",
"value"
] | Merges a single scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: The message of which field is a member.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of text parsing problems. | [
"Merges",
"a",
"single",
"scalar",
"field",
"into",
"a",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L710-L781 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | _Parser._MergeScalarField | def _MergeScalarField(self, tokenizer, message, field):
"""Merges a single scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of text parsing problems.
RuntimeError: On runtime errors.
"""
_ = self.allow_unknown_extension
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = _ConsumeInt32(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = _ConsumeInt64(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = _ConsumeUint32(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = _ConsumeUint64(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
value = tokenizer.ConsumeEnum(field)
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
if not self._allow_multiple_scalars and message.HasExtension(field):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" extensions.' %
(message.DESCRIPTOR.full_name, field.full_name))
else:
message.Extensions[field] = value
else:
if not self._allow_multiple_scalars and message.HasField(field.name):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" fields.' %
(message.DESCRIPTOR.full_name, field.name))
else:
setattr(message, field.name, value) | python | def _MergeScalarField(self, tokenizer, message, field):
"""Merges a single scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of text parsing problems.
RuntimeError: On runtime errors.
"""
_ = self.allow_unknown_extension
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = _ConsumeInt32(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = _ConsumeInt64(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = _ConsumeUint32(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = _ConsumeUint64(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
value = tokenizer.ConsumeEnum(field)
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
if not self._allow_multiple_scalars and message.HasExtension(field):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" extensions.' %
(message.DESCRIPTOR.full_name, field.full_name))
else:
message.Extensions[field] = value
else:
if not self._allow_multiple_scalars and message.HasField(field.name):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" fields.' %
(message.DESCRIPTOR.full_name, field.name))
else:
setattr(message, field.name, value) | [
"def",
"_MergeScalarField",
"(",
"self",
",",
"tokenizer",
",",
"message",
",",
"field",
")",
":",
"_",
"=",
"self",
".",
"allow_unknown_extension",
"value",
"=",
"None",
"if",
"field",
".",
"type",
"in",
"(",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_INT32",
",",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_SINT32",
",",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_SFIXED32",
")",
":",
"value",
"=",
"_ConsumeInt32",
"(",
"tokenizer",
")",
"elif",
"field",
".",
"type",
"in",
"(",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_INT64",
",",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_SINT64",
",",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_SFIXED64",
")",
":",
"value",
"=",
"_ConsumeInt64",
"(",
"tokenizer",
")",
"elif",
"field",
".",
"type",
"in",
"(",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_UINT32",
",",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_FIXED32",
")",
":",
"value",
"=",
"_ConsumeUint32",
"(",
"tokenizer",
")",
"elif",
"field",
".",
"type",
"in",
"(",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_UINT64",
",",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_FIXED64",
")",
":",
"value",
"=",
"_ConsumeUint64",
"(",
"tokenizer",
")",
"elif",
"field",
".",
"type",
"in",
"(",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_FLOAT",
",",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_DOUBLE",
")",
":",
"value",
"=",
"tokenizer",
".",
"ConsumeFloat",
"(",
")",
"elif",
"field",
".",
"type",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_BOOL",
":",
"value",
"=",
"tokenizer",
".",
"ConsumeBool",
"(",
")",
"elif",
"field",
".",
"type",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_STRING",
":",
"value",
"=",
"tokenizer",
".",
"ConsumeString",
"(",
")",
"elif",
"field",
".",
"type",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_BYTES",
":",
"value",
"=",
"tokenizer",
".",
"ConsumeByteString",
"(",
")",
"elif",
"field",
".",
"type",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"TYPE_ENUM",
":",
"value",
"=",
"tokenizer",
".",
"ConsumeEnum",
"(",
"field",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Unknown field type %d'",
"%",
"field",
".",
"type",
")",
"if",
"field",
".",
"label",
"==",
"descriptor",
".",
"FieldDescriptor",
".",
"LABEL_REPEATED",
":",
"if",
"field",
".",
"is_extension",
":",
"message",
".",
"Extensions",
"[",
"field",
"]",
".",
"append",
"(",
"value",
")",
"else",
":",
"getattr",
"(",
"message",
",",
"field",
".",
"name",
")",
".",
"append",
"(",
"value",
")",
"else",
":",
"if",
"field",
".",
"is_extension",
":",
"if",
"not",
"self",
".",
"_allow_multiple_scalars",
"and",
"message",
".",
"HasExtension",
"(",
"field",
")",
":",
"raise",
"tokenizer",
".",
"ParseErrorPreviousToken",
"(",
"'Message type \"%s\" should not have multiple \"%s\" extensions.'",
"%",
"(",
"message",
".",
"DESCRIPTOR",
".",
"full_name",
",",
"field",
".",
"full_name",
")",
")",
"else",
":",
"message",
".",
"Extensions",
"[",
"field",
"]",
"=",
"value",
"else",
":",
"if",
"not",
"self",
".",
"_allow_multiple_scalars",
"and",
"message",
".",
"HasField",
"(",
"field",
".",
"name",
")",
":",
"raise",
"tokenizer",
".",
"ParseErrorPreviousToken",
"(",
"'Message type \"%s\" should not have multiple \"%s\" fields.'",
"%",
"(",
"message",
".",
"DESCRIPTOR",
".",
"full_name",
",",
"field",
".",
"name",
")",
")",
"else",
":",
"setattr",
"(",
"message",
",",
"field",
".",
"name",
",",
"value",
")"
] | Merges a single scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of text parsing problems.
RuntimeError: On runtime errors. | [
"Merges",
"a",
"single",
"scalar",
"field",
"into",
"a",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L783-L845 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | Tokenizer.TryConsume | def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False | python | def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False | [
"def",
"TryConsume",
"(",
"self",
",",
"token",
")",
":",
"if",
"self",
".",
"token",
"==",
"token",
":",
"self",
".",
"NextToken",
"(",
")",
"return",
"True",
"return",
"False"
] | Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed. | [
"Tries",
"to",
"consume",
"a",
"given",
"piece",
"of",
"text",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1002-L1014 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | Tokenizer.ConsumeCommentOrTrailingComment | def ConsumeCommentOrTrailingComment(self):
"""Consumes a comment, returns a 2-tuple (trailing bool, comment str)."""
# Tokenizer initializes _previous_line and _previous_column to 0. As the
# tokenizer starts, it looks like there is a previous token on the line.
just_started = self._line == 0 and self._column == 0
before_parsing = self._previous_line
comment = self.ConsumeComment()
# A trailing comment is a comment on the same line than the previous token.
trailing = (self._previous_line == before_parsing
and not just_started)
return trailing, comment | python | def ConsumeCommentOrTrailingComment(self):
"""Consumes a comment, returns a 2-tuple (trailing bool, comment str)."""
# Tokenizer initializes _previous_line and _previous_column to 0. As the
# tokenizer starts, it looks like there is a previous token on the line.
just_started = self._line == 0 and self._column == 0
before_parsing = self._previous_line
comment = self.ConsumeComment()
# A trailing comment is a comment on the same line than the previous token.
trailing = (self._previous_line == before_parsing
and not just_started)
return trailing, comment | [
"def",
"ConsumeCommentOrTrailingComment",
"(",
"self",
")",
":",
"# Tokenizer initializes _previous_line and _previous_column to 0. As the",
"# tokenizer starts, it looks like there is a previous token on the line.",
"just_started",
"=",
"self",
".",
"_line",
"==",
"0",
"and",
"self",
".",
"_column",
"==",
"0",
"before_parsing",
"=",
"self",
".",
"_previous_line",
"comment",
"=",
"self",
".",
"ConsumeComment",
"(",
")",
"# A trailing comment is a comment on the same line than the previous token.",
"trailing",
"=",
"(",
"self",
".",
"_previous_line",
"==",
"before_parsing",
"and",
"not",
"just_started",
")",
"return",
"trailing",
",",
"comment"
] | Consumes a comment, returns a 2-tuple (trailing bool, comment str). | [
"Consumes",
"a",
"comment",
"returns",
"a",
"2",
"-",
"tuple",
"(",
"trailing",
"bool",
"comment",
"str",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1035-L1049 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | Tokenizer.ConsumeIdentifier | def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self.ParseError('Expected identifier.')
self.NextToken()
return result | python | def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self.ParseError('Expected identifier.')
self.NextToken()
return result | [
"def",
"ConsumeIdentifier",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"token",
"if",
"not",
"self",
".",
"_IDENTIFIER",
".",
"match",
"(",
"result",
")",
":",
"raise",
"self",
".",
"ParseError",
"(",
"'Expected identifier.'",
")",
"self",
".",
"NextToken",
"(",
")",
"return",
"result"
] | Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed. | [
"Consumes",
"protocol",
"message",
"field",
"identifier",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1058-L1071 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | Tokenizer.ConsumeIdentifierOrNumber | def ConsumeIdentifierOrNumber(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER_OR_NUMBER.match(result):
raise self.ParseError('Expected identifier or number.')
self.NextToken()
return result | python | def ConsumeIdentifierOrNumber(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER_OR_NUMBER.match(result):
raise self.ParseError('Expected identifier or number.')
self.NextToken()
return result | [
"def",
"ConsumeIdentifierOrNumber",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"token",
"if",
"not",
"self",
".",
"_IDENTIFIER_OR_NUMBER",
".",
"match",
"(",
"result",
")",
":",
"raise",
"self",
".",
"ParseError",
"(",
"'Expected identifier or number.'",
")",
"self",
".",
"NextToken",
"(",
")",
"return",
"result"
] | Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed. | [
"Consumes",
"protocol",
"message",
"field",
"identifier",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1080-L1093 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | Tokenizer.ConsumeInteger | def ConsumeInteger(self, is_long=False):
"""Consumes an integer number.
Args:
is_long: True if the value should be returned as a long integer.
Returns:
The integer parsed.
Raises:
ParseError: If an integer couldn't be consumed.
"""
try:
result = _ParseAbstractInteger(self.token, is_long=is_long)
except ValueError as e:
raise self.ParseError(str(e))
self.NextToken()
return result | python | def ConsumeInteger(self, is_long=False):
"""Consumes an integer number.
Args:
is_long: True if the value should be returned as a long integer.
Returns:
The integer parsed.
Raises:
ParseError: If an integer couldn't be consumed.
"""
try:
result = _ParseAbstractInteger(self.token, is_long=is_long)
except ValueError as e:
raise self.ParseError(str(e))
self.NextToken()
return result | [
"def",
"ConsumeInteger",
"(",
"self",
",",
"is_long",
"=",
"False",
")",
":",
"try",
":",
"result",
"=",
"_ParseAbstractInteger",
"(",
"self",
".",
"token",
",",
"is_long",
"=",
"is_long",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"self",
".",
"ParseError",
"(",
"str",
"(",
"e",
")",
")",
"self",
".",
"NextToken",
"(",
")",
"return",
"result"
] | Consumes an integer number.
Args:
is_long: True if the value should be returned as a long integer.
Returns:
The integer parsed.
Raises:
ParseError: If an integer couldn't be consumed. | [
"Consumes",
"an",
"integer",
"number",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1103-L1119 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | Tokenizer.ConsumeString | def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
the_bytes = self.ConsumeByteString()
try:
return six.text_type(the_bytes, 'utf-8')
except UnicodeDecodeError as e:
raise self._StringParseError(e) | python | def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
the_bytes = self.ConsumeByteString()
try:
return six.text_type(the_bytes, 'utf-8')
except UnicodeDecodeError as e:
raise self._StringParseError(e) | [
"def",
"ConsumeString",
"(",
"self",
")",
":",
"the_bytes",
"=",
"self",
".",
"ConsumeByteString",
"(",
")",
"try",
":",
"return",
"six",
".",
"text_type",
"(",
"the_bytes",
",",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
"as",
"e",
":",
"raise",
"self",
".",
"_StringParseError",
"(",
"e",
")"
] | Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed. | [
"Consumes",
"a",
"string",
"value",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1167-L1180 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | Tokenizer.ConsumeByteString | def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
the_list = [self._ConsumeSingleByteString()]
while self.token and self.token[0] in _QUOTES:
the_list.append(self._ConsumeSingleByteString())
return b''.join(the_list) | python | def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
the_list = [self._ConsumeSingleByteString()]
while self.token and self.token[0] in _QUOTES:
the_list.append(self._ConsumeSingleByteString())
return b''.join(the_list) | [
"def",
"ConsumeByteString",
"(",
"self",
")",
":",
"the_list",
"=",
"[",
"self",
".",
"_ConsumeSingleByteString",
"(",
")",
"]",
"while",
"self",
".",
"token",
"and",
"self",
".",
"token",
"[",
"0",
"]",
"in",
"_QUOTES",
":",
"the_list",
".",
"append",
"(",
"self",
".",
"_ConsumeSingleByteString",
"(",
")",
")",
"return",
"b''",
".",
"join",
"(",
"the_list",
")"
] | Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed. | [
"Consumes",
"a",
"byte",
"array",
"value",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1182-L1194 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | Tokenizer.NextToken | def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._more_lines:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if not match and not self._skip_comments:
match = self._COMMENT.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column] | python | def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._more_lines:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if not match and not self._skip_comments:
match = self._COMMENT.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column] | [
"def",
"NextToken",
"(",
"self",
")",
":",
"self",
".",
"_previous_line",
"=",
"self",
".",
"_line",
"self",
".",
"_previous_column",
"=",
"self",
".",
"_column",
"self",
".",
"_column",
"+=",
"len",
"(",
"self",
".",
"token",
")",
"self",
".",
"_SkipWhitespace",
"(",
")",
"if",
"not",
"self",
".",
"_more_lines",
":",
"self",
".",
"token",
"=",
"''",
"return",
"match",
"=",
"self",
".",
"_TOKEN",
".",
"match",
"(",
"self",
".",
"_current_line",
",",
"self",
".",
"_column",
")",
"if",
"not",
"match",
"and",
"not",
"self",
".",
"_skip_comments",
":",
"match",
"=",
"self",
".",
"_COMMENT",
".",
"match",
"(",
"self",
".",
"_current_line",
",",
"self",
".",
"_column",
")",
"if",
"match",
":",
"token",
"=",
"match",
".",
"group",
"(",
"0",
")",
"self",
".",
"token",
"=",
"token",
"else",
":",
"self",
".",
"token",
"=",
"self",
".",
"_current_line",
"[",
"self",
".",
"_column",
"]"
] | Reads the next meaningful token. | [
"Reads",
"the",
"next",
"meaningful",
"token",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1249-L1268 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/decision_tree_regression.py | create | def create(dataset, target,
features=None,
validation_set='auto',
max_depth=6,
min_loss_reduction=0.0, min_child_weight=0.1,
verbose=True,
random_seed = None,
metric = 'auto',
**kwargs):
"""
Create a :class:`~turicreate.decision_tree_regression.DecisionTreeRegression` to predict
a scalar target variable using one or more features. In addition to standard
numeric and categorical types, features can also be extracted automatically
from list- or dictionary-type SFrame columns.
Parameters
----------
dataset : SFrame
A training dataset containing feature columns and a target column.
Only numerical typed (int, float) target column is allowed.
target : str
The name of the column in ``dataset`` that is the prediction target.
This column must have a numeric type.
features : list[str], optional
A list of columns names of features used for training the model.
Defaults to None, using all columns.
validation_set : SFrame, optional
The validation set that is used to watch the validation result as
boosting progress.
max_depth : float, optional
Maximum depth of a tree. Must be at least 1.
min_loss_reduction : float, optional (non-negative)
Minimum loss reduction required to make a further partition/split a
node during the tree learning phase. Larger (more positive) values
can help prevent overfitting by avoiding splits that do not
sufficiently reduce the loss function.
min_child_weight : float, optional (non-negative)
Controls the minimum weight of each leaf node. Larger values result in
more conservative tree learning and help prevent overfitting.
Formally, this is minimum sum of instance weights (hessians) in each
node. If the tree learning algorithm results in a leaf node with the
sum of instance weights less than `min_child_weight`, tree building
will terminate.
verbose : boolean, optional
If True, print progress information during training.
random_seed: int, optional
Seeds random operations such as column and row subsampling, such that
results are reproducible.
metric : str or list[str], optional
Performance metric(s) that are tracked during training. When specified,
the progress table will display the tracked metric(s) on training and
validation set.
Supported metrics are: {'rmse', 'max_error'}
Returns
-------
out : DecisionTreeRegression
A trained decision tree model
References
----------
- `Wikipedia - Gradient tree boosting
<http://en.wikipedia.org/wiki/Gradient_boosting#Gradient_tree_boosting>`_
- `Trevor Hastie's slides on Boosted Trees and Random Forest
<http://jessica2.msri.org/attachments/10778/10778-boost.pdf>`_
See Also
--------
DecisionTreeRegression, turicreate.linear_regression.LinearRegression, turicreate.regression.create
Examples
--------
Setup the data:
>>> url = 'https://static.turi.com/datasets/xgboost/mushroom.csv'
>>> data = turicreate.SFrame.read_csv(url)
>>> data['label'] = data['label'] == 'p'
Split the data into training and test data:
>>> train, test = data.random_split(0.8)
Create the model:
>>> model = turicreate.decision_tree_regression.create(train, target='label')
Make predictions and evaluate the model:
>>> predictions = model.predict(test)
>>> results = model.evaluate(test)
"""
if random_seed is not None:
kwargs['random_seed'] = random_seed
model = _sl.create(dataset = dataset,
target = target,
features = features,
model_name = 'decision_tree_regression',
validation_set = validation_set,
max_depth = max_depth,
min_loss_reduction = min_loss_reduction,
min_child_weight = min_child_weight,
verbose = verbose, **kwargs)
return DecisionTreeRegression(model.__proxy__) | python | def create(dataset, target,
features=None,
validation_set='auto',
max_depth=6,
min_loss_reduction=0.0, min_child_weight=0.1,
verbose=True,
random_seed = None,
metric = 'auto',
**kwargs):
"""
Create a :class:`~turicreate.decision_tree_regression.DecisionTreeRegression` to predict
a scalar target variable using one or more features. In addition to standard
numeric and categorical types, features can also be extracted automatically
from list- or dictionary-type SFrame columns.
Parameters
----------
dataset : SFrame
A training dataset containing feature columns and a target column.
Only numerical typed (int, float) target column is allowed.
target : str
The name of the column in ``dataset`` that is the prediction target.
This column must have a numeric type.
features : list[str], optional
A list of columns names of features used for training the model.
Defaults to None, using all columns.
validation_set : SFrame, optional
The validation set that is used to watch the validation result as
boosting progress.
max_depth : float, optional
Maximum depth of a tree. Must be at least 1.
min_loss_reduction : float, optional (non-negative)
Minimum loss reduction required to make a further partition/split a
node during the tree learning phase. Larger (more positive) values
can help prevent overfitting by avoiding splits that do not
sufficiently reduce the loss function.
min_child_weight : float, optional (non-negative)
Controls the minimum weight of each leaf node. Larger values result in
more conservative tree learning and help prevent overfitting.
Formally, this is minimum sum of instance weights (hessians) in each
node. If the tree learning algorithm results in a leaf node with the
sum of instance weights less than `min_child_weight`, tree building
will terminate.
verbose : boolean, optional
If True, print progress information during training.
random_seed: int, optional
Seeds random operations such as column and row subsampling, such that
results are reproducible.
metric : str or list[str], optional
Performance metric(s) that are tracked during training. When specified,
the progress table will display the tracked metric(s) on training and
validation set.
Supported metrics are: {'rmse', 'max_error'}
Returns
-------
out : DecisionTreeRegression
A trained decision tree model
References
----------
- `Wikipedia - Gradient tree boosting
<http://en.wikipedia.org/wiki/Gradient_boosting#Gradient_tree_boosting>`_
- `Trevor Hastie's slides on Boosted Trees and Random Forest
<http://jessica2.msri.org/attachments/10778/10778-boost.pdf>`_
See Also
--------
DecisionTreeRegression, turicreate.linear_regression.LinearRegression, turicreate.regression.create
Examples
--------
Setup the data:
>>> url = 'https://static.turi.com/datasets/xgboost/mushroom.csv'
>>> data = turicreate.SFrame.read_csv(url)
>>> data['label'] = data['label'] == 'p'
Split the data into training and test data:
>>> train, test = data.random_split(0.8)
Create the model:
>>> model = turicreate.decision_tree_regression.create(train, target='label')
Make predictions and evaluate the model:
>>> predictions = model.predict(test)
>>> results = model.evaluate(test)
"""
if random_seed is not None:
kwargs['random_seed'] = random_seed
model = _sl.create(dataset = dataset,
target = target,
features = features,
model_name = 'decision_tree_regression',
validation_set = validation_set,
max_depth = max_depth,
min_loss_reduction = min_loss_reduction,
min_child_weight = min_child_weight,
verbose = verbose, **kwargs)
return DecisionTreeRegression(model.__proxy__) | [
"def",
"create",
"(",
"dataset",
",",
"target",
",",
"features",
"=",
"None",
",",
"validation_set",
"=",
"'auto'",
",",
"max_depth",
"=",
"6",
",",
"min_loss_reduction",
"=",
"0.0",
",",
"min_child_weight",
"=",
"0.1",
",",
"verbose",
"=",
"True",
",",
"random_seed",
"=",
"None",
",",
"metric",
"=",
"'auto'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"random_seed",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'random_seed'",
"]",
"=",
"random_seed",
"model",
"=",
"_sl",
".",
"create",
"(",
"dataset",
"=",
"dataset",
",",
"target",
"=",
"target",
",",
"features",
"=",
"features",
",",
"model_name",
"=",
"'decision_tree_regression'",
",",
"validation_set",
"=",
"validation_set",
",",
"max_depth",
"=",
"max_depth",
",",
"min_loss_reduction",
"=",
"min_loss_reduction",
",",
"min_child_weight",
"=",
"min_child_weight",
",",
"verbose",
"=",
"verbose",
",",
"*",
"*",
"kwargs",
")",
"return",
"DecisionTreeRegression",
"(",
"model",
".",
"__proxy__",
")"
] | Create a :class:`~turicreate.decision_tree_regression.DecisionTreeRegression` to predict
a scalar target variable using one or more features. In addition to standard
numeric and categorical types, features can also be extracted automatically
from list- or dictionary-type SFrame columns.
Parameters
----------
dataset : SFrame
A training dataset containing feature columns and a target column.
Only numerical typed (int, float) target column is allowed.
target : str
The name of the column in ``dataset`` that is the prediction target.
This column must have a numeric type.
features : list[str], optional
A list of columns names of features used for training the model.
Defaults to None, using all columns.
validation_set : SFrame, optional
The validation set that is used to watch the validation result as
boosting progress.
max_depth : float, optional
Maximum depth of a tree. Must be at least 1.
min_loss_reduction : float, optional (non-negative)
Minimum loss reduction required to make a further partition/split a
node during the tree learning phase. Larger (more positive) values
can help prevent overfitting by avoiding splits that do not
sufficiently reduce the loss function.
min_child_weight : float, optional (non-negative)
Controls the minimum weight of each leaf node. Larger values result in
more conservative tree learning and help prevent overfitting.
Formally, this is minimum sum of instance weights (hessians) in each
node. If the tree learning algorithm results in a leaf node with the
sum of instance weights less than `min_child_weight`, tree building
will terminate.
verbose : boolean, optional
If True, print progress information during training.
random_seed: int, optional
Seeds random operations such as column and row subsampling, such that
results are reproducible.
metric : str or list[str], optional
Performance metric(s) that are tracked during training. When specified,
the progress table will display the tracked metric(s) on training and
validation set.
Supported metrics are: {'rmse', 'max_error'}
Returns
-------
out : DecisionTreeRegression
A trained decision tree model
References
----------
- `Wikipedia - Gradient tree boosting
<http://en.wikipedia.org/wiki/Gradient_boosting#Gradient_tree_boosting>`_
- `Trevor Hastie's slides on Boosted Trees and Random Forest
<http://jessica2.msri.org/attachments/10778/10778-boost.pdf>`_
See Also
--------
DecisionTreeRegression, turicreate.linear_regression.LinearRegression, turicreate.regression.create
Examples
--------
Setup the data:
>>> url = 'https://static.turi.com/datasets/xgboost/mushroom.csv'
>>> data = turicreate.SFrame.read_csv(url)
>>> data['label'] = data['label'] == 'p'
Split the data into training and test data:
>>> train, test = data.random_split(0.8)
Create the model:
>>> model = turicreate.decision_tree_regression.create(train, target='label')
Make predictions and evaluate the model:
>>> predictions = model.predict(test)
>>> results = model.evaluate(test) | [
"Create",
"a",
":",
"class",
":",
"~turicreate",
".",
"decision_tree_regression",
".",
"DecisionTreeRegression",
"to",
"predict",
"a",
"scalar",
"target",
"variable",
"using",
"one",
"or",
"more",
"features",
".",
"In",
"addition",
"to",
"standard",
"numeric",
"and",
"categorical",
"types",
"features",
"can",
"also",
"be",
"extracted",
"automatically",
"from",
"list",
"-",
"or",
"dictionary",
"-",
"type",
"SFrame",
"columns",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/decision_tree_regression.py#L310-L426 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/decision_tree_regression.py | DecisionTreeRegression.evaluate | def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Can be one of:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
----------
create, predict
Examples
--------
..sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
"""
_raise_error_evaluation_metric_is_valid(
metric, ['auto', 'rmse', 'max_error'])
return super(DecisionTreeRegression, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric) | python | def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Can be one of:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
----------
create, predict
Examples
--------
..sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
"""
_raise_error_evaluation_metric_is_valid(
metric, ['auto', 'rmse', 'max_error'])
return super(DecisionTreeRegression, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric) | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"_raise_error_evaluation_metric_is_valid",
"(",
"metric",
",",
"[",
"'auto'",
",",
"'rmse'",
",",
"'max_error'",
"]",
")",
"return",
"super",
"(",
"DecisionTreeRegression",
",",
"self",
")",
".",
"evaluate",
"(",
"dataset",
",",
"missing_value_action",
"=",
"missing_value_action",
",",
"metric",
"=",
"metric",
")"
] | Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Can be one of:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
----------
create, predict
Examples
--------
..sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse') | [
"Evaluate",
"the",
"model",
"on",
"the",
"given",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/decision_tree_regression.py#L179-L228 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/decision_tree_regression.py | DecisionTreeRegression.predict | def predict(self, dataset, missing_value_action='auto'):
"""
Predict the target column of the given dataset.
The target column is provided during
:func:`~turicreate.decision_tree_regression.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, predict
Examples
--------
>>> m.predict(testdata)
"""
return super(DecisionTreeRegression, self).predict(dataset, output_type='margin',
missing_value_action=missing_value_action) | python | def predict(self, dataset, missing_value_action='auto'):
"""
Predict the target column of the given dataset.
The target column is provided during
:func:`~turicreate.decision_tree_regression.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, predict
Examples
--------
>>> m.predict(testdata)
"""
return super(DecisionTreeRegression, self).predict(dataset, output_type='margin',
missing_value_action=missing_value_action) | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"return",
"super",
"(",
"DecisionTreeRegression",
",",
"self",
")",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"'margin'",
",",
"missing_value_action",
"=",
"missing_value_action",
")"
] | Predict the target column of the given dataset.
The target column is provided during
:func:`~turicreate.decision_tree_regression.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, predict
Examples
--------
>>> m.predict(testdata) | [
"Predict",
"the",
"target",
"column",
"of",
"the",
"given",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/decision_tree_regression.py#L258-L299 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/distances/_util.py | compute_composite_distance | def compute_composite_distance(distance, x, y):
"""
Compute the value of a composite distance function on two dictionaries,
typically SFrame rows.
Parameters
----------
distance : list[list]
A composite distance function. Composite distance functions are a
weighted sum of standard distance functions, each of which applies to
its own subset of features. Composite distance functions are specified
as a list of distance components, each of which is itself a list
containing three items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
x, y : dict
Individual observations, typically rows of an SFrame, in dictionary
form. Must include the features specified by `distance`.
Returns
-------
out : float
The distance between `x` and `y`, as specified by `distance`.
Examples
--------
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'species': ['cat', 'dog', 'fossa']})
...
>>> dist_spec = [[('X1', 'X2'), 'euclidean', 2],
... [('species',), 'levenshtein', 0.4]]
...
>>> d = turicreate.distances.compute_composite_distance(dist_spec, sf[0], sf[1])
>>> print d
1.95286120899
"""
## Validate inputs
_validate_composite_distance(distance)
distance = _convert_distance_names_to_functions(distance)
if not isinstance(x, dict) or not isinstance(y, dict):
raise TypeError("Inputs 'x' and 'y' must be in dictionary form. " +
"Selecting individual rows of an SFrame yields the " +
"correct format.")
ans = 0.
for d in distance:
ftrs, dist, weight = d
## Special check for multiple columns with levenshtein distance.
if dist == _tc.distances.levenshtein and len(ftrs) > 1:
raise ValueError("levenshtein distance cannot be used with multiple" +
"columns. Please concatenate strings into a single " +
"column before computing the distance.")
## Extract values for specified features.
a = {}
b = {}
for ftr in ftrs:
if type(x[ftr]) != type(y[ftr]):
if not isinstance(x[ftr], (int, float)) or not isinstance(y[ftr], (int, float)):
raise ValueError("Input data has different types.")
if isinstance(x[ftr], (int, float, str)):
a[ftr] = x[ftr]
b[ftr] = y[ftr]
elif isinstance(x[ftr], dict):
for key, val in _six.iteritems(x[ftr]):
a['{}.{}'.format(ftr, key)] = val
for key, val in _six.iteritems(y[ftr]):
b['{}.{}'.format(ftr, key)] = val
elif isinstance(x[ftr], (list, _array.array)):
for i, val in enumerate(x[ftr]):
a[i] = val
for i, val in enumerate(y[ftr]):
b[i] = val
else:
raise TypeError("Type of feature '{}' not understood.".format(ftr))
## Pull out the raw values for levenshtein
if dist == _tc.distances.levenshtein:
a = list(a.values())[0]
b = list(b.values())[0]
## Compute component distance and add to the total distance.
ans += weight * dist(a, b)
return ans | python | def compute_composite_distance(distance, x, y):
"""
Compute the value of a composite distance function on two dictionaries,
typically SFrame rows.
Parameters
----------
distance : list[list]
A composite distance function. Composite distance functions are a
weighted sum of standard distance functions, each of which applies to
its own subset of features. Composite distance functions are specified
as a list of distance components, each of which is itself a list
containing three items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
x, y : dict
Individual observations, typically rows of an SFrame, in dictionary
form. Must include the features specified by `distance`.
Returns
-------
out : float
The distance between `x` and `y`, as specified by `distance`.
Examples
--------
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'species': ['cat', 'dog', 'fossa']})
...
>>> dist_spec = [[('X1', 'X2'), 'euclidean', 2],
... [('species',), 'levenshtein', 0.4]]
...
>>> d = turicreate.distances.compute_composite_distance(dist_spec, sf[0], sf[1])
>>> print d
1.95286120899
"""
## Validate inputs
_validate_composite_distance(distance)
distance = _convert_distance_names_to_functions(distance)
if not isinstance(x, dict) or not isinstance(y, dict):
raise TypeError("Inputs 'x' and 'y' must be in dictionary form. " +
"Selecting individual rows of an SFrame yields the " +
"correct format.")
ans = 0.
for d in distance:
ftrs, dist, weight = d
## Special check for multiple columns with levenshtein distance.
if dist == _tc.distances.levenshtein and len(ftrs) > 1:
raise ValueError("levenshtein distance cannot be used with multiple" +
"columns. Please concatenate strings into a single " +
"column before computing the distance.")
## Extract values for specified features.
a = {}
b = {}
for ftr in ftrs:
if type(x[ftr]) != type(y[ftr]):
if not isinstance(x[ftr], (int, float)) or not isinstance(y[ftr], (int, float)):
raise ValueError("Input data has different types.")
if isinstance(x[ftr], (int, float, str)):
a[ftr] = x[ftr]
b[ftr] = y[ftr]
elif isinstance(x[ftr], dict):
for key, val in _six.iteritems(x[ftr]):
a['{}.{}'.format(ftr, key)] = val
for key, val in _six.iteritems(y[ftr]):
b['{}.{}'.format(ftr, key)] = val
elif isinstance(x[ftr], (list, _array.array)):
for i, val in enumerate(x[ftr]):
a[i] = val
for i, val in enumerate(y[ftr]):
b[i] = val
else:
raise TypeError("Type of feature '{}' not understood.".format(ftr))
## Pull out the raw values for levenshtein
if dist == _tc.distances.levenshtein:
a = list(a.values())[0]
b = list(b.values())[0]
## Compute component distance and add to the total distance.
ans += weight * dist(a, b)
return ans | [
"def",
"compute_composite_distance",
"(",
"distance",
",",
"x",
",",
"y",
")",
":",
"## Validate inputs",
"_validate_composite_distance",
"(",
"distance",
")",
"distance",
"=",
"_convert_distance_names_to_functions",
"(",
"distance",
")",
"if",
"not",
"isinstance",
"(",
"x",
",",
"dict",
")",
"or",
"not",
"isinstance",
"(",
"y",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"Inputs 'x' and 'y' must be in dictionary form. \"",
"+",
"\"Selecting individual rows of an SFrame yields the \"",
"+",
"\"correct format.\"",
")",
"ans",
"=",
"0.",
"for",
"d",
"in",
"distance",
":",
"ftrs",
",",
"dist",
",",
"weight",
"=",
"d",
"## Special check for multiple columns with levenshtein distance.",
"if",
"dist",
"==",
"_tc",
".",
"distances",
".",
"levenshtein",
"and",
"len",
"(",
"ftrs",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"levenshtein distance cannot be used with multiple\"",
"+",
"\"columns. Please concatenate strings into a single \"",
"+",
"\"column before computing the distance.\"",
")",
"## Extract values for specified features.",
"a",
"=",
"{",
"}",
"b",
"=",
"{",
"}",
"for",
"ftr",
"in",
"ftrs",
":",
"if",
"type",
"(",
"x",
"[",
"ftr",
"]",
")",
"!=",
"type",
"(",
"y",
"[",
"ftr",
"]",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
"[",
"ftr",
"]",
",",
"(",
"int",
",",
"float",
")",
")",
"or",
"not",
"isinstance",
"(",
"y",
"[",
"ftr",
"]",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Input data has different types.\"",
")",
"if",
"isinstance",
"(",
"x",
"[",
"ftr",
"]",
",",
"(",
"int",
",",
"float",
",",
"str",
")",
")",
":",
"a",
"[",
"ftr",
"]",
"=",
"x",
"[",
"ftr",
"]",
"b",
"[",
"ftr",
"]",
"=",
"y",
"[",
"ftr",
"]",
"elif",
"isinstance",
"(",
"x",
"[",
"ftr",
"]",
",",
"dict",
")",
":",
"for",
"key",
",",
"val",
"in",
"_six",
".",
"iteritems",
"(",
"x",
"[",
"ftr",
"]",
")",
":",
"a",
"[",
"'{}.{}'",
".",
"format",
"(",
"ftr",
",",
"key",
")",
"]",
"=",
"val",
"for",
"key",
",",
"val",
"in",
"_six",
".",
"iteritems",
"(",
"y",
"[",
"ftr",
"]",
")",
":",
"b",
"[",
"'{}.{}'",
".",
"format",
"(",
"ftr",
",",
"key",
")",
"]",
"=",
"val",
"elif",
"isinstance",
"(",
"x",
"[",
"ftr",
"]",
",",
"(",
"list",
",",
"_array",
".",
"array",
")",
")",
":",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"x",
"[",
"ftr",
"]",
")",
":",
"a",
"[",
"i",
"]",
"=",
"val",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"y",
"[",
"ftr",
"]",
")",
":",
"b",
"[",
"i",
"]",
"=",
"val",
"else",
":",
"raise",
"TypeError",
"(",
"\"Type of feature '{}' not understood.\"",
".",
"format",
"(",
"ftr",
")",
")",
"## Pull out the raw values for levenshtein",
"if",
"dist",
"==",
"_tc",
".",
"distances",
".",
"levenshtein",
":",
"a",
"=",
"list",
"(",
"a",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"b",
"=",
"list",
"(",
"b",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"## Compute component distance and add to the total distance.",
"ans",
"+=",
"weight",
"*",
"dist",
"(",
"a",
",",
"b",
")",
"return",
"ans"
] | Compute the value of a composite distance function on two dictionaries,
typically SFrame rows.
Parameters
----------
distance : list[list]
A composite distance function. Composite distance functions are a
weighted sum of standard distance functions, each of which applies to
its own subset of features. Composite distance functions are specified
as a list of distance components, each of which is itself a list
containing three items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
x, y : dict
Individual observations, typically rows of an SFrame, in dictionary
form. Must include the features specified by `distance`.
Returns
-------
out : float
The distance between `x` and `y`, as specified by `distance`.
Examples
--------
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'species': ['cat', 'dog', 'fossa']})
...
>>> dist_spec = [[('X1', 'X2'), 'euclidean', 2],
... [('species',), 'levenshtein', 0.4]]
...
>>> d = turicreate.distances.compute_composite_distance(dist_spec, sf[0], sf[1])
>>> print d
1.95286120899 | [
"Compute",
"the",
"value",
"of",
"a",
"composite",
"distance",
"function",
"on",
"two",
"dictionaries",
"typically",
"SFrame",
"rows",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/distances/_util.py#L24-L126 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/distances/_util.py | _validate_composite_distance | def _validate_composite_distance(distance):
"""
Check that composite distance function is in valid form. Don't modify the
composite distance in any way.
"""
if not isinstance(distance, list):
raise TypeError("Input 'distance' must be a composite distance.")
if len(distance) < 1:
raise ValueError("Composite distances must have a least one distance "
"component, consisting of a list of feature names, "
"a distance function (string or function handle), "
"and a weight.")
for d in distance:
## Extract individual pieces of the distance component
try:
ftrs, dist, weight = d
except:
raise TypeError("Elements of a composite distance function must " +
"have three items: a set of feature names (tuple or list), " +
"a distance function (string or function handle), " +
"and a weight.")
## Validate feature names
if len(ftrs) == 0:
raise ValueError("An empty list of features cannot be passed " +\
"as part of a composite distance function.")
if not isinstance(ftrs, (list, tuple)):
raise TypeError("Feature names must be specified in a list or tuple.")
if not all([isinstance(x, str) for x in ftrs]):
raise TypeError("Feature lists must contain only strings.")
## Validate standard distance function
if not isinstance(dist, str) and not hasattr(dist, '__call__'):
raise ValueError("Standard distances must be the name of a distance " +
"function (string) or a distance function handle")
if isinstance(dist, str):
try:
_tc.distances.__dict__[dist]
except:
raise ValueError("Distance '{}' not recognized".format(dist))
## Validate weight
if not isinstance(weight, (int, float)):
raise ValueError(
"The weight of each distance component must be a single " +\
"integer or a float value.")
if weight < 0:
raise ValueError("The weight on each distance component must be " +
"greater than or equal to zero.") | python | def _validate_composite_distance(distance):
"""
Check that composite distance function is in valid form. Don't modify the
composite distance in any way.
"""
if not isinstance(distance, list):
raise TypeError("Input 'distance' must be a composite distance.")
if len(distance) < 1:
raise ValueError("Composite distances must have a least one distance "
"component, consisting of a list of feature names, "
"a distance function (string or function handle), "
"and a weight.")
for d in distance:
## Extract individual pieces of the distance component
try:
ftrs, dist, weight = d
except:
raise TypeError("Elements of a composite distance function must " +
"have three items: a set of feature names (tuple or list), " +
"a distance function (string or function handle), " +
"and a weight.")
## Validate feature names
if len(ftrs) == 0:
raise ValueError("An empty list of features cannot be passed " +\
"as part of a composite distance function.")
if not isinstance(ftrs, (list, tuple)):
raise TypeError("Feature names must be specified in a list or tuple.")
if not all([isinstance(x, str) for x in ftrs]):
raise TypeError("Feature lists must contain only strings.")
## Validate standard distance function
if not isinstance(dist, str) and not hasattr(dist, '__call__'):
raise ValueError("Standard distances must be the name of a distance " +
"function (string) or a distance function handle")
if isinstance(dist, str):
try:
_tc.distances.__dict__[dist]
except:
raise ValueError("Distance '{}' not recognized".format(dist))
## Validate weight
if not isinstance(weight, (int, float)):
raise ValueError(
"The weight of each distance component must be a single " +\
"integer or a float value.")
if weight < 0:
raise ValueError("The weight on each distance component must be " +
"greater than or equal to zero.") | [
"def",
"_validate_composite_distance",
"(",
"distance",
")",
":",
"if",
"not",
"isinstance",
"(",
"distance",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"Input 'distance' must be a composite distance.\"",
")",
"if",
"len",
"(",
"distance",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Composite distances must have a least one distance \"",
"\"component, consisting of a list of feature names, \"",
"\"a distance function (string or function handle), \"",
"\"and a weight.\"",
")",
"for",
"d",
"in",
"distance",
":",
"## Extract individual pieces of the distance component",
"try",
":",
"ftrs",
",",
"dist",
",",
"weight",
"=",
"d",
"except",
":",
"raise",
"TypeError",
"(",
"\"Elements of a composite distance function must \"",
"+",
"\"have three items: a set of feature names (tuple or list), \"",
"+",
"\"a distance function (string or function handle), \"",
"+",
"\"and a weight.\"",
")",
"## Validate feature names",
"if",
"len",
"(",
"ftrs",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"An empty list of features cannot be passed \"",
"+",
"\"as part of a composite distance function.\"",
")",
"if",
"not",
"isinstance",
"(",
"ftrs",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Feature names must be specified in a list or tuple.\"",
")",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"x",
",",
"str",
")",
"for",
"x",
"in",
"ftrs",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"Feature lists must contain only strings.\"",
")",
"## Validate standard distance function",
"if",
"not",
"isinstance",
"(",
"dist",
",",
"str",
")",
"and",
"not",
"hasattr",
"(",
"dist",
",",
"'__call__'",
")",
":",
"raise",
"ValueError",
"(",
"\"Standard distances must be the name of a distance \"",
"+",
"\"function (string) or a distance function handle\"",
")",
"if",
"isinstance",
"(",
"dist",
",",
"str",
")",
":",
"try",
":",
"_tc",
".",
"distances",
".",
"__dict__",
"[",
"dist",
"]",
"except",
":",
"raise",
"ValueError",
"(",
"\"Distance '{}' not recognized\"",
".",
"format",
"(",
"dist",
")",
")",
"## Validate weight",
"if",
"not",
"isinstance",
"(",
"weight",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"The weight of each distance component must be a single \"",
"+",
"\"integer or a float value.\"",
")",
"if",
"weight",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"The weight on each distance component must be \"",
"+",
"\"greater than or equal to zero.\"",
")"
] | Check that composite distance function is in valid form. Don't modify the
composite distance in any way. | [
"Check",
"that",
"composite",
"distance",
"function",
"is",
"in",
"valid",
"form",
".",
"Don",
"t",
"modify",
"the",
"composite",
"distance",
"in",
"any",
"way",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/distances/_util.py#L129-L187 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/distances/_util.py | _scrub_composite_distance_features | def _scrub_composite_distance_features(distance, feature_blacklist):
"""
Remove feature names from the feature lists in a composite distance
function.
"""
dist_out = []
for i, d in enumerate(distance):
ftrs, dist, weight = d
new_ftrs = [x for x in ftrs if x not in feature_blacklist]
if len(new_ftrs) > 0:
dist_out.append([new_ftrs, dist, weight])
return dist_out | python | def _scrub_composite_distance_features(distance, feature_blacklist):
"""
Remove feature names from the feature lists in a composite distance
function.
"""
dist_out = []
for i, d in enumerate(distance):
ftrs, dist, weight = d
new_ftrs = [x for x in ftrs if x not in feature_blacklist]
if len(new_ftrs) > 0:
dist_out.append([new_ftrs, dist, weight])
return dist_out | [
"def",
"_scrub_composite_distance_features",
"(",
"distance",
",",
"feature_blacklist",
")",
":",
"dist_out",
"=",
"[",
"]",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"distance",
")",
":",
"ftrs",
",",
"dist",
",",
"weight",
"=",
"d",
"new_ftrs",
"=",
"[",
"x",
"for",
"x",
"in",
"ftrs",
"if",
"x",
"not",
"in",
"feature_blacklist",
"]",
"if",
"len",
"(",
"new_ftrs",
")",
">",
"0",
":",
"dist_out",
".",
"append",
"(",
"[",
"new_ftrs",
",",
"dist",
",",
"weight",
"]",
")",
"return",
"dist_out"
] | Remove feature names from the feature lists in a composite distance
function. | [
"Remove",
"feature",
"names",
"from",
"the",
"feature",
"lists",
"in",
"a",
"composite",
"distance",
"function",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/distances/_util.py#L190-L203 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/distances/_util.py | _convert_distance_names_to_functions | def _convert_distance_names_to_functions(distance):
"""
Convert function names in a composite distance function into function
handles.
"""
dist_out = _copy.deepcopy(distance)
for i, d in enumerate(distance):
_, dist, _ = d
if isinstance(dist, str):
try:
dist_out[i][1] = _tc.distances.__dict__[dist]
except:
raise ValueError("Distance '{}' not recognized.".format(dist))
return dist_out | python | def _convert_distance_names_to_functions(distance):
"""
Convert function names in a composite distance function into function
handles.
"""
dist_out = _copy.deepcopy(distance)
for i, d in enumerate(distance):
_, dist, _ = d
if isinstance(dist, str):
try:
dist_out[i][1] = _tc.distances.__dict__[dist]
except:
raise ValueError("Distance '{}' not recognized.".format(dist))
return dist_out | [
"def",
"_convert_distance_names_to_functions",
"(",
"distance",
")",
":",
"dist_out",
"=",
"_copy",
".",
"deepcopy",
"(",
"distance",
")",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"distance",
")",
":",
"_",
",",
"dist",
",",
"_",
"=",
"d",
"if",
"isinstance",
"(",
"dist",
",",
"str",
")",
":",
"try",
":",
"dist_out",
"[",
"i",
"]",
"[",
"1",
"]",
"=",
"_tc",
".",
"distances",
".",
"__dict__",
"[",
"dist",
"]",
"except",
":",
"raise",
"ValueError",
"(",
"\"Distance '{}' not recognized.\"",
".",
"format",
"(",
"dist",
")",
")",
"return",
"dist_out"
] | Convert function names in a composite distance function into function
handles. | [
"Convert",
"function",
"names",
"in",
"a",
"composite",
"distance",
"function",
"into",
"function",
"handles",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/distances/_util.py#L206-L221 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/distances/_util.py | build_address_distance | def build_address_distance(number=None, street=None, city=None, state=None,
zip_code=None):
"""
Construct a composite distance appropriate for matching address data. NOTE:
this utility function does not guarantee that the output composite distance
will work with a particular dataset and model. When the composite distance
is applied in a particular context, the feature types and individual
distance functions must be appropriate for the given model.
Parameters
----------
number, street, city, state, zip_code : string, optional
Name of the SFrame column for the feature corresponding to the address
component. Each feature name is mapped to an appropriate distance
function and scalar multiplier.
Returns
-------
dist : list
A composite distance function, mapping sets of feature names to distance
functions.
Examples
--------
>>> homes = turicreate.SFrame({'sqft': [1230, 875, 1745],
... 'street': ['phinney', 'fairview', 'cottage'],
... 'city': ['seattle', 'olympia', 'boston'],
... 'state': ['WA', 'WA', 'MA']})
...
>>> my_dist = turicreate.distances.build_address_distance(street='street',
... city='city',
... state='state')
>>> my_dist
[[['street'], 'jaccard', 5],
[['state'], 'jaccard', 5],
[['city'], 'levenshtein', 1]]
"""
## Validate inputs
for param in [number, street, city, state, zip_code]:
if param is not None and not isinstance(param, str):
raise TypeError("All inputs must be strings. Each parameter is " +
"intended to be the name of an SFrame column.")
## Figure out features for levenshtein distance.
string_features = []
if city:
string_features.append(city)
if zip_code:
string_features.append(zip_code)
## Compile the distance components.
dist = []
if number:
dist.append([[number], 'jaccard', 1])
if street:
dist.append([[street], 'jaccard', 5])
if state:
dist.append([[state], 'jaccard', 5])
if len(string_features) > 0:
dist.append([string_features, 'levenshtein', 1])
return dist | python | def build_address_distance(number=None, street=None, city=None, state=None,
zip_code=None):
"""
Construct a composite distance appropriate for matching address data. NOTE:
this utility function does not guarantee that the output composite distance
will work with a particular dataset and model. When the composite distance
is applied in a particular context, the feature types and individual
distance functions must be appropriate for the given model.
Parameters
----------
number, street, city, state, zip_code : string, optional
Name of the SFrame column for the feature corresponding to the address
component. Each feature name is mapped to an appropriate distance
function and scalar multiplier.
Returns
-------
dist : list
A composite distance function, mapping sets of feature names to distance
functions.
Examples
--------
>>> homes = turicreate.SFrame({'sqft': [1230, 875, 1745],
... 'street': ['phinney', 'fairview', 'cottage'],
... 'city': ['seattle', 'olympia', 'boston'],
... 'state': ['WA', 'WA', 'MA']})
...
>>> my_dist = turicreate.distances.build_address_distance(street='street',
... city='city',
... state='state')
>>> my_dist
[[['street'], 'jaccard', 5],
[['state'], 'jaccard', 5],
[['city'], 'levenshtein', 1]]
"""
## Validate inputs
for param in [number, street, city, state, zip_code]:
if param is not None and not isinstance(param, str):
raise TypeError("All inputs must be strings. Each parameter is " +
"intended to be the name of an SFrame column.")
## Figure out features for levenshtein distance.
string_features = []
if city:
string_features.append(city)
if zip_code:
string_features.append(zip_code)
## Compile the distance components.
dist = []
if number:
dist.append([[number], 'jaccard', 1])
if street:
dist.append([[street], 'jaccard', 5])
if state:
dist.append([[state], 'jaccard', 5])
if len(string_features) > 0:
dist.append([string_features, 'levenshtein', 1])
return dist | [
"def",
"build_address_distance",
"(",
"number",
"=",
"None",
",",
"street",
"=",
"None",
",",
"city",
"=",
"None",
",",
"state",
"=",
"None",
",",
"zip_code",
"=",
"None",
")",
":",
"## Validate inputs",
"for",
"param",
"in",
"[",
"number",
",",
"street",
",",
"city",
",",
"state",
",",
"zip_code",
"]",
":",
"if",
"param",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"param",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"All inputs must be strings. Each parameter is \"",
"+",
"\"intended to be the name of an SFrame column.\"",
")",
"## Figure out features for levenshtein distance.",
"string_features",
"=",
"[",
"]",
"if",
"city",
":",
"string_features",
".",
"append",
"(",
"city",
")",
"if",
"zip_code",
":",
"string_features",
".",
"append",
"(",
"zip_code",
")",
"## Compile the distance components.",
"dist",
"=",
"[",
"]",
"if",
"number",
":",
"dist",
".",
"append",
"(",
"[",
"[",
"number",
"]",
",",
"'jaccard'",
",",
"1",
"]",
")",
"if",
"street",
":",
"dist",
".",
"append",
"(",
"[",
"[",
"street",
"]",
",",
"'jaccard'",
",",
"5",
"]",
")",
"if",
"state",
":",
"dist",
".",
"append",
"(",
"[",
"[",
"state",
"]",
",",
"'jaccard'",
",",
"5",
"]",
")",
"if",
"len",
"(",
"string_features",
")",
">",
"0",
":",
"dist",
".",
"append",
"(",
"[",
"string_features",
",",
"'levenshtein'",
",",
"1",
"]",
")",
"return",
"dist"
] | Construct a composite distance appropriate for matching address data. NOTE:
this utility function does not guarantee that the output composite distance
will work with a particular dataset and model. When the composite distance
is applied in a particular context, the feature types and individual
distance functions must be appropriate for the given model.
Parameters
----------
number, street, city, state, zip_code : string, optional
Name of the SFrame column for the feature corresponding to the address
component. Each feature name is mapped to an appropriate distance
function and scalar multiplier.
Returns
-------
dist : list
A composite distance function, mapping sets of feature names to distance
functions.
Examples
--------
>>> homes = turicreate.SFrame({'sqft': [1230, 875, 1745],
... 'street': ['phinney', 'fairview', 'cottage'],
... 'city': ['seattle', 'olympia', 'boston'],
... 'state': ['WA', 'WA', 'MA']})
...
>>> my_dist = turicreate.distances.build_address_distance(street='street',
... city='city',
... state='state')
>>> my_dist
[[['street'], 'jaccard', 5],
[['state'], 'jaccard', 5],
[['city'], 'levenshtein', 1]] | [
"Construct",
"a",
"composite",
"distance",
"appropriate",
"for",
"matching",
"address",
"data",
".",
"NOTE",
":",
"this",
"utility",
"function",
"does",
"not",
"guarantee",
"that",
"the",
"output",
"composite",
"distance",
"will",
"work",
"with",
"a",
"particular",
"dataset",
"and",
"model",
".",
"When",
"the",
"composite",
"distance",
"is",
"applied",
"in",
"a",
"particular",
"context",
"the",
"feature",
"types",
"and",
"individual",
"distance",
"functions",
"must",
"be",
"appropriate",
"for",
"the",
"given",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/distances/_util.py#L232-L301 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/message_factory.py | GetMessages | def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: A sequence of file protos to build messages out of.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
"""
for file_proto in file_protos:
_FACTORY.pool.Add(file_proto)
return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos]) | python | def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: A sequence of file protos to build messages out of.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
"""
for file_proto in file_protos:
_FACTORY.pool.Add(file_proto)
return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos]) | [
"def",
"GetMessages",
"(",
"file_protos",
")",
":",
"for",
"file_proto",
"in",
"file_protos",
":",
"_FACTORY",
".",
"pool",
".",
"Add",
"(",
"file_proto",
")",
"return",
"_FACTORY",
".",
"GetMessages",
"(",
"[",
"file_proto",
".",
"name",
"for",
"file_proto",
"in",
"file_protos",
"]",
")"
] | Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: A sequence of file protos to build messages out of.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message. | [
"Builds",
"a",
"dictionary",
"of",
"all",
"the",
"messages",
"available",
"in",
"a",
"set",
"of",
"files",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/message_factory.py#L129-L142 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/message_factory.py | MessageFactory.GetPrototype | def GetPrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor.full_name not in self._classes:
descriptor_name = descriptor.name
if str is bytes: # PY2
descriptor_name = descriptor.name.encode('ascii', 'ignore')
result_class = reflection.GeneratedProtocolMessageType(
descriptor_name,
(message.Message,),
{'DESCRIPTOR': descriptor, '__module__': None})
# If module not set, it wrongly points to the reflection.py module.
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
for extension in result_class.DESCRIPTOR.extensions:
if extension.containing_type.full_name not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
extended_class.RegisterExtension(extension)
return self._classes[descriptor.full_name] | python | def GetPrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor.full_name not in self._classes:
descriptor_name = descriptor.name
if str is bytes: # PY2
descriptor_name = descriptor.name.encode('ascii', 'ignore')
result_class = reflection.GeneratedProtocolMessageType(
descriptor_name,
(message.Message,),
{'DESCRIPTOR': descriptor, '__module__': None})
# If module not set, it wrongly points to the reflection.py module.
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
for extension in result_class.DESCRIPTOR.extensions:
if extension.containing_type.full_name not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
extended_class.RegisterExtension(extension)
return self._classes[descriptor.full_name] | [
"def",
"GetPrototype",
"(",
"self",
",",
"descriptor",
")",
":",
"if",
"descriptor",
".",
"full_name",
"not",
"in",
"self",
".",
"_classes",
":",
"descriptor_name",
"=",
"descriptor",
".",
"name",
"if",
"str",
"is",
"bytes",
":",
"# PY2",
"descriptor_name",
"=",
"descriptor",
".",
"name",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"result_class",
"=",
"reflection",
".",
"GeneratedProtocolMessageType",
"(",
"descriptor_name",
",",
"(",
"message",
".",
"Message",
",",
")",
",",
"{",
"'DESCRIPTOR'",
":",
"descriptor",
",",
"'__module__'",
":",
"None",
"}",
")",
"# If module not set, it wrongly points to the reflection.py module.",
"self",
".",
"_classes",
"[",
"descriptor",
".",
"full_name",
"]",
"=",
"result_class",
"for",
"field",
"in",
"descriptor",
".",
"fields",
":",
"if",
"field",
".",
"message_type",
":",
"self",
".",
"GetPrototype",
"(",
"field",
".",
"message_type",
")",
"for",
"extension",
"in",
"result_class",
".",
"DESCRIPTOR",
".",
"extensions",
":",
"if",
"extension",
".",
"containing_type",
".",
"full_name",
"not",
"in",
"self",
".",
"_classes",
":",
"self",
".",
"GetPrototype",
"(",
"extension",
".",
"containing_type",
")",
"extended_class",
"=",
"self",
".",
"_classes",
"[",
"extension",
".",
"containing_type",
".",
"full_name",
"]",
"extended_class",
".",
"RegisterExtension",
"(",
"extension",
")",
"return",
"self",
".",
"_classes",
"[",
"descriptor",
".",
"full_name",
"]"
] | Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor. | [
"Builds",
"a",
"proto2",
"message",
"class",
"based",
"on",
"the",
"passed",
"in",
"descriptor",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/message_factory.py#L57-L87 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/message_factory.py | MessageFactory.GetMessages | def GetMessages(self, files):
"""Gets all the messages from a specified file.
This will find and resolve dependencies, failing if the descriptor
pool cannot satisfy them.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
"""
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for desc in file_desc.message_types_by_name.values():
result[desc.full_name] = self.GetPrototype(desc)
# While the extension FieldDescriptors are created by the descriptor pool,
# the python classes created in the factory need them to be registered
# explicitly, which is done below.
#
# The call to RegisterExtension will specifically check if the
# extension was already registered on the object and either
# ignore the registration if the original was the same, or raise
# an error if they were different.
for extension in file_desc.extensions_by_name.values():
if extension.containing_type.full_name not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
extended_class.RegisterExtension(extension)
return result | python | def GetMessages(self, files):
"""Gets all the messages from a specified file.
This will find and resolve dependencies, failing if the descriptor
pool cannot satisfy them.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
"""
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for desc in file_desc.message_types_by_name.values():
result[desc.full_name] = self.GetPrototype(desc)
# While the extension FieldDescriptors are created by the descriptor pool,
# the python classes created in the factory need them to be registered
# explicitly, which is done below.
#
# The call to RegisterExtension will specifically check if the
# extension was already registered on the object and either
# ignore the registration if the original was the same, or raise
# an error if they were different.
for extension in file_desc.extensions_by_name.values():
if extension.containing_type.full_name not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
extended_class.RegisterExtension(extension)
return result | [
"def",
"GetMessages",
"(",
"self",
",",
"files",
")",
":",
"result",
"=",
"{",
"}",
"for",
"file_name",
"in",
"files",
":",
"file_desc",
"=",
"self",
".",
"pool",
".",
"FindFileByName",
"(",
"file_name",
")",
"for",
"desc",
"in",
"file_desc",
".",
"message_types_by_name",
".",
"values",
"(",
")",
":",
"result",
"[",
"desc",
".",
"full_name",
"]",
"=",
"self",
".",
"GetPrototype",
"(",
"desc",
")",
"# While the extension FieldDescriptors are created by the descriptor pool,",
"# the python classes created in the factory need them to be registered",
"# explicitly, which is done below.",
"#",
"# The call to RegisterExtension will specifically check if the",
"# extension was already registered on the object and either",
"# ignore the registration if the original was the same, or raise",
"# an error if they were different.",
"for",
"extension",
"in",
"file_desc",
".",
"extensions_by_name",
".",
"values",
"(",
")",
":",
"if",
"extension",
".",
"containing_type",
".",
"full_name",
"not",
"in",
"self",
".",
"_classes",
":",
"self",
".",
"GetPrototype",
"(",
"extension",
".",
"containing_type",
")",
"extended_class",
"=",
"self",
".",
"_classes",
"[",
"extension",
".",
"containing_type",
".",
"full_name",
"]",
"extended_class",
".",
"RegisterExtension",
"(",
"extension",
")",
"return",
"result"
] | Gets all the messages from a specified file.
This will find and resolve dependencies, failing if the descriptor
pool cannot satisfy them.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message. | [
"Gets",
"all",
"the",
"messages",
"from",
"a",
"specified",
"file",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/message_factory.py#L89-L123 | train |
apple/turicreate | src/unity/python/turicreate/meta/decompiler/control_flow_instructions.py | refactor_ifs | def refactor_ifs(stmnt, ifs):
'''
for if statements in list comprehension
'''
if isinstance(stmnt, _ast.BoolOp):
test, right = stmnt.values
if isinstance(stmnt.op, _ast.Or):
test = _ast.UnaryOp(op=_ast.Not(), operand=test, lineno=0, col_offset=0)
ifs.append(test)
return refactor_ifs(right, ifs)
return stmnt | python | def refactor_ifs(stmnt, ifs):
'''
for if statements in list comprehension
'''
if isinstance(stmnt, _ast.BoolOp):
test, right = stmnt.values
if isinstance(stmnt.op, _ast.Or):
test = _ast.UnaryOp(op=_ast.Not(), operand=test, lineno=0, col_offset=0)
ifs.append(test)
return refactor_ifs(right, ifs)
return stmnt | [
"def",
"refactor_ifs",
"(",
"stmnt",
",",
"ifs",
")",
":",
"if",
"isinstance",
"(",
"stmnt",
",",
"_ast",
".",
"BoolOp",
")",
":",
"test",
",",
"right",
"=",
"stmnt",
".",
"values",
"if",
"isinstance",
"(",
"stmnt",
".",
"op",
",",
"_ast",
".",
"Or",
")",
":",
"test",
"=",
"_ast",
".",
"UnaryOp",
"(",
"op",
"=",
"_ast",
".",
"Not",
"(",
")",
",",
"operand",
"=",
"test",
",",
"lineno",
"=",
"0",
",",
"col_offset",
"=",
"0",
")",
"ifs",
".",
"append",
"(",
"test",
")",
"return",
"refactor_ifs",
"(",
"right",
",",
"ifs",
")",
"return",
"stmnt"
] | for if statements in list comprehension | [
"for",
"if",
"statements",
"in",
"list",
"comprehension"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/control_flow_instructions.py#L58-L70 | train |
apple/turicreate | src/unity/python/turicreate/meta/decompiler/control_flow_instructions.py | CtrlFlowInstructions.MAP_ADD | def MAP_ADD(self, instr):
key = self.ast_stack.pop()
value = self.ast_stack.pop()
self.ast_stack.append((key, value))
'NOP' | python | def MAP_ADD(self, instr):
key = self.ast_stack.pop()
value = self.ast_stack.pop()
self.ast_stack.append((key, value))
'NOP' | [
"def",
"MAP_ADD",
"(",
"self",
",",
"instr",
")",
":",
"key",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"value",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"self",
".",
"ast_stack",
".",
"append",
"(",
"(",
"key",
",",
"value",
")",
")"
] | NOP | [
"NOP"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/control_flow_instructions.py#L551-L556 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/object_detector.py | _get_mps_od_net | def _get_mps_od_net(input_image_shape, batch_size, output_size, anchors,
config, weights={}):
"""
Initializes an MpsGraphAPI for object detection.
"""
network = _MpsGraphAPI(network_id=_MpsGraphNetworkType.kODGraphNet)
c_in, h_in, w_in = input_image_shape
c_out = output_size
h_out = h_in // 32
w_out = w_in // 32
c_view = c_in
h_view = h_in
w_view = w_in
network.init(batch_size, c_in, h_in, w_in, c_out, h_out, w_out,
weights=weights, config=config)
return network | python | def _get_mps_od_net(input_image_shape, batch_size, output_size, anchors,
config, weights={}):
"""
Initializes an MpsGraphAPI for object detection.
"""
network = _MpsGraphAPI(network_id=_MpsGraphNetworkType.kODGraphNet)
c_in, h_in, w_in = input_image_shape
c_out = output_size
h_out = h_in // 32
w_out = w_in // 32
c_view = c_in
h_view = h_in
w_view = w_in
network.init(batch_size, c_in, h_in, w_in, c_out, h_out, w_out,
weights=weights, config=config)
return network | [
"def",
"_get_mps_od_net",
"(",
"input_image_shape",
",",
"batch_size",
",",
"output_size",
",",
"anchors",
",",
"config",
",",
"weights",
"=",
"{",
"}",
")",
":",
"network",
"=",
"_MpsGraphAPI",
"(",
"network_id",
"=",
"_MpsGraphNetworkType",
".",
"kODGraphNet",
")",
"c_in",
",",
"h_in",
",",
"w_in",
"=",
"input_image_shape",
"c_out",
"=",
"output_size",
"h_out",
"=",
"h_in",
"//",
"32",
"w_out",
"=",
"w_in",
"//",
"32",
"c_view",
"=",
"c_in",
"h_view",
"=",
"h_in",
"w_view",
"=",
"w_in",
"network",
".",
"init",
"(",
"batch_size",
",",
"c_in",
",",
"h_in",
",",
"w_in",
",",
"c_out",
",",
"h_out",
",",
"w_out",
",",
"weights",
"=",
"weights",
",",
"config",
"=",
"config",
")",
"return",
"network"
] | Initializes an MpsGraphAPI for object detection. | [
"Initializes",
"an",
"MpsGraphAPI",
"for",
"object",
"detection",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/object_detector.py#L44-L62 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/object_detector.py | create | def create(dataset, annotations=None, feature=None, model='darknet-yolo',
classes=None, batch_size=0, max_iterations=0, verbose=True,
**kwargs):
"""
Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
batch_size: int
The number of images per training iteration. If 0, then it will be
automatically determined based on resource availability.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_raise_error_if_not_sframe(dataset, "dataset")
from ._mx_detector import YOLOLoss as _YOLOLoss
from ._model import tiny_darknet as _tiny_darknet
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._manual_scheduler import ManualScheduler as _ManualScheduler
import mxnet as _mx
from .._mxnet import _mxnet_utils
if len(dataset) == 0:
raise _ToolkitError('Unable to train on empty dataset')
_numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)
start_time = _time.time()
supported_detectors = ['darknet-yolo']
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
if verbose:
print("Using '%s' as feature column" % feature)
if annotations is None:
annotations = _tkutl._find_only_column_of_type(dataset,
target_type=[list, dict],
type_name='list',
col_name='annotations')
if verbose:
print("Using '%s' as annotations column" % annotations)
_raise_error_if_not_detection_sframe(dataset, feature, annotations,
require_annotations=True)
is_annotations_list = dataset[annotations].dtype == list
_tkutl._check_categorical_option_type('model', model,
supported_detectors)
base_model = model.split('-', 1)[0]
ref_model = _pre_trained_models.OBJECT_DETECTION_BASE_MODELS[base_model]()
params = {
'anchors': [
(1.0, 2.0), (1.0, 1.0), (2.0, 1.0),
(2.0, 4.0), (2.0, 2.0), (4.0, 2.0),
(4.0, 8.0), (4.0, 4.0), (8.0, 4.0),
(8.0, 16.0), (8.0, 8.0), (16.0, 8.0),
(16.0, 32.0), (16.0, 16.0), (32.0, 16.0),
],
'grid_shape': [13, 13],
'aug_resize': 0,
'aug_rand_crop': 0.9,
'aug_rand_pad': 0.9,
'aug_rand_gray': 0.0,
'aug_aspect_ratio': 1.25,
'aug_hue': 0.05,
'aug_brightness': 0.05,
'aug_saturation': 0.05,
'aug_contrast': 0.05,
'aug_horizontal_flip': True,
'aug_min_object_covered': 0,
'aug_min_eject_coverage': 0.5,
'aug_area_range': (.15, 2),
'aug_pca_noise': 0.0,
'aug_max_attempts': 20,
'aug_inter_method': 2,
'lmb_coord_xy': 10.0,
'lmb_coord_wh': 10.0,
'lmb_obj': 100.0,
'lmb_noobj': 5.0,
'lmb_class': 2.0,
'non_maximum_suppression_threshold': 0.45,
'rescore': True,
'clip_gradients': 0.025,
'weight_decay': 0.0005,
'sgd_momentum': 0.9,
'learning_rate': 1.0e-3,
'shuffle': True,
'mps_loss_mult': 8,
# This large buffer size (8 batches) is an attempt to mitigate against
# the SFrame shuffle operation that can occur after each epoch.
'io_thread_buffer_size': 8,
}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported))
params.update(kwargs['_advanced_parameters'])
anchors = params['anchors']
num_anchors = len(anchors)
if batch_size < 1:
batch_size = 32 # Default if not user-specified
cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=batch_size)
num_mxnet_gpus = len(cuda_gpus)
use_mps = _use_mps() and num_mxnet_gpus == 0
batch_size_each = batch_size // max(num_mxnet_gpus, 1)
if use_mps and _mps_device_memory_limit() < 4 * 1024 * 1024 * 1024:
# Reduce batch size for GPUs with less than 4GB RAM
batch_size_each = 16
# Note, this may slightly alter the batch size to fit evenly on the GPUs
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
if verbose:
print("Setting 'batch_size' to {}".format(batch_size))
# The IO thread also handles MXNet-powered data augmentation. This seems
# to be problematic to run independently of a MXNet-powered neural network
# in a separate thread. For this reason, we restrict IO threads to when
# the neural network backend is MPS.
io_thread_buffer_size = params['io_thread_buffer_size'] if use_mps else 0
if verbose:
# Estimate memory usage (based on experiments)
cuda_mem_req = 550 + batch_size_each * 85
_tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=use_mps,
cuda_mem_req=cuda_mem_req)
grid_shape = params['grid_shape']
input_image_shape = (3,
grid_shape[0] * ref_model.spatial_reduction,
grid_shape[1] * ref_model.spatial_reduction)
try:
if is_annotations_list:
instances = (dataset.stack(annotations, new_column_name='_bbox', drop_na=True)
.unpack('_bbox', limit=['label']))
else:
instances = dataset.rename({annotations: '_bbox'}).dropna('_bbox')
instances = instances.unpack('_bbox', limit=['label'])
except (TypeError, RuntimeError):
# If this fails, the annotation format isinvalid at the coarsest level
raise _ToolkitError("Annotations format is invalid. Must be a list of "
"dictionaries or single dictionary containing 'label' and 'coordinates'.")
num_images = len(dataset)
num_instances = len(instances)
if classes is None:
classes = instances['_bbox.label'].unique()
classes = sorted(classes)
# Make a class-to-index look-up table
class_to_index = {name: index for index, name in enumerate(classes)}
num_classes = len(classes)
if max_iterations == 0:
# Set number of iterations through a heuristic
num_iterations_raw = 5000 * _np.sqrt(num_instances) / batch_size
num_iterations = 1000 * max(1, int(round(num_iterations_raw / 1000)))
if verbose:
print("Setting 'max_iterations' to {}".format(num_iterations))
else:
num_iterations = max_iterations
# Create data loader
loader = _SFrameDetectionIter(dataset,
batch_size=batch_size,
input_shape=input_image_shape[1:],
output_shape=grid_shape,
anchors=anchors,
class_to_index=class_to_index,
aug_params=params,
shuffle=params['shuffle'],
loader_type='augmented',
feature_column=feature,
annotations_column=annotations,
io_thread_buffer_size=io_thread_buffer_size,
iterations=num_iterations)
# Predictions per anchor box: x/y + w/h + object confidence + class probs
preds_per_box = 5 + num_classes
output_size = preds_per_box * num_anchors
ymap_shape = (batch_size_each,) + tuple(grid_shape) + (num_anchors, preds_per_box)
net = _tiny_darknet(output_size=output_size)
loss = _YOLOLoss(input_shape=input_image_shape[1:],
output_shape=grid_shape,
batch_size=batch_size_each,
num_classes=num_classes,
anchors=anchors,
parameters=params)
base_lr = params['learning_rate']
steps = [num_iterations // 2, 3 * num_iterations // 4, num_iterations]
steps_and_factors = [(step, 10**(-i)) for i, step in enumerate(steps)]
steps, factors = zip(*steps_and_factors)
lr_scheduler = _ManualScheduler(step=steps, factor=factors)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
net_params = net.collect_params()
net_params.initialize(_mx.init.Xavier(), ctx=ctx)
net_params['conv7_weight'].initialize(_mx.init.Xavier(factor_type='avg'), ctx=ctx, force_reinit=True)
net_params['conv8_weight'].initialize(_mx.init.Uniform(0.00005), ctx=ctx, force_reinit=True)
# Initialize object confidence low, preventing an unnecessary adjustment
# period toward conservative estimates
bias = _np.zeros(output_size, dtype=_np.float32)
bias[4::preds_per_box] -= 6
from ._mx_detector import ConstantArray
net_params['conv8_bias'].initialize(ConstantArray(bias), ctx, force_reinit=True)
# Take a subset and then load the rest of the parameters. It is possible to
# do allow_missing=True directly on net_params. However, this will more
# easily hide bugs caused by names getting out of sync.
ref_model.available_parameters_subset(net_params).load(ref_model.model_path, ctx)
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
progress = {'smoothed_loss': None, 'last_time': 0}
iteration = 0
def update_progress(cur_loss, iteration):
iteration_base1 = iteration + 1
if progress['smoothed_loss'] is None:
progress['smoothed_loss'] = cur_loss
else:
progress['smoothed_loss'] = 0.9 * progress['smoothed_loss'] + 0.1 * cur_loss
cur_time = _time.time()
# Printing of table header is deferred, so that start-of-training
# warnings appear above the table
if verbose and iteration == 0:
# Print progress table header
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
print(hr)
if verbose and (cur_time > progress['last_time'] + 10 or
iteration_base1 == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format(
cur_iter=iteration_base1, loss=progress['smoothed_loss'],
time=elapsed_time , width=column_width-1))
progress['last_time'] = cur_time
if use_mps:
# Force initialization of net_params
# TODO: Do not rely on MXNet to initialize MPS-based network
net.forward(_mx.nd.uniform(0, 1, (batch_size_each,) + input_image_shape))
mps_net_params = {}
keys = list(net_params)
for k in keys:
mps_net_params[k] = net_params[k].data().asnumpy()
# Multiplies the loss to move the fp16 gradients away from subnormals
# and gradual underflow. The learning rate is correspondingly divided
# by the same multiple to make training mathematically equivalent. The
# update is done in fp32, which is why this trick works. Does not
# affect how loss is presented to the user.
mps_loss_mult = params['mps_loss_mult']
mps_config = {
'mode': _MpsGraphMode.Train,
'use_sgd': True,
'learning_rate': base_lr / params['mps_loss_mult'],
'gradient_clipping': params.get('clip_gradients', 0.0) * mps_loss_mult,
'weight_decay': params['weight_decay'],
'od_include_network': True,
'od_include_loss': True,
'od_scale_xy': params['lmb_coord_xy'] * mps_loss_mult,
'od_scale_wh': params['lmb_coord_wh'] * mps_loss_mult,
'od_scale_no_object': params['lmb_noobj'] * mps_loss_mult,
'od_scale_object': params['lmb_obj'] * mps_loss_mult,
'od_scale_class': params['lmb_class'] * mps_loss_mult,
'od_max_iou_for_no_object': 0.3,
'od_min_iou_for_object': 0.7,
'od_rescore': params['rescore'],
}
mps_net = _get_mps_od_net(input_image_shape=input_image_shape,
batch_size=batch_size,
output_size=output_size,
anchors=anchors,
config=mps_config,
weights=mps_net_params)
# Use worker threads to isolate different points of synchronization
# and/or waiting for non-Python tasks to finish. The
# sframe_worker_thread will spend most of its time waiting for SFrame
# operations, largely image I/O and decoding, along with scheduling
# MXNet data augmentation. The numpy_worker_thread will spend most of
# its time waiting for MXNet data augmentation to complete, along with
# copying the results into NumPy arrays. Finally, the main thread will
# spend most of its time copying NumPy data into MPS and waiting for the
# results. Note that using three threads here only makes sense because
# each thread spends time waiting for non-Python code to finish (so that
# no thread hogs the global interpreter lock).
mxnet_batch_queue = _Queue(1)
numpy_batch_queue = _Queue(1)
def sframe_worker():
# Once a batch is loaded into NumPy, pass it immediately to the
# numpy_worker so that we can start I/O and decoding for the next
# batch.
for batch in loader:
mxnet_batch_queue.put(batch)
mxnet_batch_queue.put(None)
def numpy_worker():
while True:
batch = mxnet_batch_queue.get()
if batch is None:
break
for x, y in zip(batch.data, batch.label):
# Convert to NumPy arrays with required shapes. Note that
# asnumpy waits for any pending MXNet operations to finish.
input_data = _mxnet_to_mps(x.asnumpy())
label_data = y.asnumpy().reshape(y.shape[:-2] + (-1,))
# Convert to packed 32-bit arrays.
input_data = input_data.astype(_np.float32)
if not input_data.flags.c_contiguous:
input_data = input_data.copy()
label_data = label_data.astype(_np.float32)
if not label_data.flags.c_contiguous:
label_data = label_data.copy()
# Push this batch to the main thread.
numpy_batch_queue.put({'input' : input_data,
'label' : label_data,
'iteration' : batch.iteration})
# Tell the main thread there's no more data.
numpy_batch_queue.put(None)
sframe_worker_thread = _Thread(target=sframe_worker)
sframe_worker_thread.start()
numpy_worker_thread = _Thread(target=numpy_worker)
numpy_worker_thread.start()
batch_queue = []
def wait_for_batch():
pending_loss = batch_queue.pop(0)
batch_loss = pending_loss.asnumpy() # Waits for the batch to finish
return batch_loss.sum() / mps_loss_mult
while True:
batch = numpy_batch_queue.get()
if batch is None:
break
# Adjust learning rate according to our schedule.
if batch['iteration'] in steps:
ii = steps.index(batch['iteration']) + 1
new_lr = factors[ii] * base_lr
mps_net.set_learning_rate(new_lr / mps_loss_mult)
# Submit this match to MPS.
batch_queue.append(mps_net.train(batch['input'], batch['label']))
# If we have two batches in flight, wait for the first one.
if len(batch_queue) > 1:
cur_loss = wait_for_batch()
# If we just submitted the first batch of an iteration, update
# progress for the iteration completed by the last batch we just
# waited for.
if batch['iteration'] > iteration:
update_progress(cur_loss, iteration)
iteration = batch['iteration']
# Wait for any pending batches and finalize our progress updates.
while len(batch_queue) > 0:
cur_loss = wait_for_batch()
update_progress(cur_loss, iteration)
sframe_worker_thread.join()
numpy_worker_thread.join()
# Load back into mxnet
mps_net_params = mps_net.export()
keys = mps_net_params.keys()
for k in keys:
if k in net_params:
net_params[k].set_data(mps_net_params[k])
else: # Use MxNet
net.hybridize()
options = {'learning_rate': base_lr, 'lr_scheduler': lr_scheduler,
'momentum': params['sgd_momentum'], 'wd': params['weight_decay'], 'rescale_grad': 1.0}
clip_grad = params.get('clip_gradients')
if clip_grad:
options['clip_gradient'] = clip_grad
trainer = _mx.gluon.Trainer(net.collect_params(), 'sgd', options)
for batch in loader:
data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
Ls = []
Zs = []
with _mx.autograd.record():
for x, y in zip(data, label):
z = net(x)
z0 = _mx.nd.transpose(z, [0, 2, 3, 1]).reshape(ymap_shape)
L = loss(z0, y)
Ls.append(L)
for L in Ls:
L.backward()
trainer.step(1)
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
update_progress(cur_loss, batch.iteration)
iteration = batch.iteration
training_time = _time.time() - start_time
if verbose:
print(hr) # progress table footer
# Save the model
training_iterations = iteration + 1
state = {
'_model': net,
'_class_to_index': class_to_index,
'_training_time_as_string': _seconds_as_string(training_time),
'_grid_shape': grid_shape,
'anchors': anchors,
'model': model,
'classes': classes,
'batch_size': batch_size,
'input_image_shape': input_image_shape,
'feature': feature,
'non_maximum_suppression_threshold': params['non_maximum_suppression_threshold'],
'annotations': annotations,
'num_classes': num_classes,
'num_examples': num_images,
'num_bounding_boxes': num_instances,
'training_time': training_time,
'training_epochs': training_iterations * batch_size // num_images,
'training_iterations': training_iterations,
'max_iterations': max_iterations,
'training_loss': progress['smoothed_loss'],
}
return ObjectDetector(state) | python | def create(dataset, annotations=None, feature=None, model='darknet-yolo',
classes=None, batch_size=0, max_iterations=0, verbose=True,
**kwargs):
"""
Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
batch_size: int
The number of images per training iteration. If 0, then it will be
automatically determined based on resource availability.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_raise_error_if_not_sframe(dataset, "dataset")
from ._mx_detector import YOLOLoss as _YOLOLoss
from ._model import tiny_darknet as _tiny_darknet
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._manual_scheduler import ManualScheduler as _ManualScheduler
import mxnet as _mx
from .._mxnet import _mxnet_utils
if len(dataset) == 0:
raise _ToolkitError('Unable to train on empty dataset')
_numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)
start_time = _time.time()
supported_detectors = ['darknet-yolo']
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
if verbose:
print("Using '%s' as feature column" % feature)
if annotations is None:
annotations = _tkutl._find_only_column_of_type(dataset,
target_type=[list, dict],
type_name='list',
col_name='annotations')
if verbose:
print("Using '%s' as annotations column" % annotations)
_raise_error_if_not_detection_sframe(dataset, feature, annotations,
require_annotations=True)
is_annotations_list = dataset[annotations].dtype == list
_tkutl._check_categorical_option_type('model', model,
supported_detectors)
base_model = model.split('-', 1)[0]
ref_model = _pre_trained_models.OBJECT_DETECTION_BASE_MODELS[base_model]()
params = {
'anchors': [
(1.0, 2.0), (1.0, 1.0), (2.0, 1.0),
(2.0, 4.0), (2.0, 2.0), (4.0, 2.0),
(4.0, 8.0), (4.0, 4.0), (8.0, 4.0),
(8.0, 16.0), (8.0, 8.0), (16.0, 8.0),
(16.0, 32.0), (16.0, 16.0), (32.0, 16.0),
],
'grid_shape': [13, 13],
'aug_resize': 0,
'aug_rand_crop': 0.9,
'aug_rand_pad': 0.9,
'aug_rand_gray': 0.0,
'aug_aspect_ratio': 1.25,
'aug_hue': 0.05,
'aug_brightness': 0.05,
'aug_saturation': 0.05,
'aug_contrast': 0.05,
'aug_horizontal_flip': True,
'aug_min_object_covered': 0,
'aug_min_eject_coverage': 0.5,
'aug_area_range': (.15, 2),
'aug_pca_noise': 0.0,
'aug_max_attempts': 20,
'aug_inter_method': 2,
'lmb_coord_xy': 10.0,
'lmb_coord_wh': 10.0,
'lmb_obj': 100.0,
'lmb_noobj': 5.0,
'lmb_class': 2.0,
'non_maximum_suppression_threshold': 0.45,
'rescore': True,
'clip_gradients': 0.025,
'weight_decay': 0.0005,
'sgd_momentum': 0.9,
'learning_rate': 1.0e-3,
'shuffle': True,
'mps_loss_mult': 8,
# This large buffer size (8 batches) is an attempt to mitigate against
# the SFrame shuffle operation that can occur after each epoch.
'io_thread_buffer_size': 8,
}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported))
params.update(kwargs['_advanced_parameters'])
anchors = params['anchors']
num_anchors = len(anchors)
if batch_size < 1:
batch_size = 32 # Default if not user-specified
cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=batch_size)
num_mxnet_gpus = len(cuda_gpus)
use_mps = _use_mps() and num_mxnet_gpus == 0
batch_size_each = batch_size // max(num_mxnet_gpus, 1)
if use_mps and _mps_device_memory_limit() < 4 * 1024 * 1024 * 1024:
# Reduce batch size for GPUs with less than 4GB RAM
batch_size_each = 16
# Note, this may slightly alter the batch size to fit evenly on the GPUs
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
if verbose:
print("Setting 'batch_size' to {}".format(batch_size))
# The IO thread also handles MXNet-powered data augmentation. This seems
# to be problematic to run independently of a MXNet-powered neural network
# in a separate thread. For this reason, we restrict IO threads to when
# the neural network backend is MPS.
io_thread_buffer_size = params['io_thread_buffer_size'] if use_mps else 0
if verbose:
# Estimate memory usage (based on experiments)
cuda_mem_req = 550 + batch_size_each * 85
_tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=use_mps,
cuda_mem_req=cuda_mem_req)
grid_shape = params['grid_shape']
input_image_shape = (3,
grid_shape[0] * ref_model.spatial_reduction,
grid_shape[1] * ref_model.spatial_reduction)
try:
if is_annotations_list:
instances = (dataset.stack(annotations, new_column_name='_bbox', drop_na=True)
.unpack('_bbox', limit=['label']))
else:
instances = dataset.rename({annotations: '_bbox'}).dropna('_bbox')
instances = instances.unpack('_bbox', limit=['label'])
except (TypeError, RuntimeError):
# If this fails, the annotation format isinvalid at the coarsest level
raise _ToolkitError("Annotations format is invalid. Must be a list of "
"dictionaries or single dictionary containing 'label' and 'coordinates'.")
num_images = len(dataset)
num_instances = len(instances)
if classes is None:
classes = instances['_bbox.label'].unique()
classes = sorted(classes)
# Make a class-to-index look-up table
class_to_index = {name: index for index, name in enumerate(classes)}
num_classes = len(classes)
if max_iterations == 0:
# Set number of iterations through a heuristic
num_iterations_raw = 5000 * _np.sqrt(num_instances) / batch_size
num_iterations = 1000 * max(1, int(round(num_iterations_raw / 1000)))
if verbose:
print("Setting 'max_iterations' to {}".format(num_iterations))
else:
num_iterations = max_iterations
# Create data loader
loader = _SFrameDetectionIter(dataset,
batch_size=batch_size,
input_shape=input_image_shape[1:],
output_shape=grid_shape,
anchors=anchors,
class_to_index=class_to_index,
aug_params=params,
shuffle=params['shuffle'],
loader_type='augmented',
feature_column=feature,
annotations_column=annotations,
io_thread_buffer_size=io_thread_buffer_size,
iterations=num_iterations)
# Predictions per anchor box: x/y + w/h + object confidence + class probs
preds_per_box = 5 + num_classes
output_size = preds_per_box * num_anchors
ymap_shape = (batch_size_each,) + tuple(grid_shape) + (num_anchors, preds_per_box)
net = _tiny_darknet(output_size=output_size)
loss = _YOLOLoss(input_shape=input_image_shape[1:],
output_shape=grid_shape,
batch_size=batch_size_each,
num_classes=num_classes,
anchors=anchors,
parameters=params)
base_lr = params['learning_rate']
steps = [num_iterations // 2, 3 * num_iterations // 4, num_iterations]
steps_and_factors = [(step, 10**(-i)) for i, step in enumerate(steps)]
steps, factors = zip(*steps_and_factors)
lr_scheduler = _ManualScheduler(step=steps, factor=factors)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
net_params = net.collect_params()
net_params.initialize(_mx.init.Xavier(), ctx=ctx)
net_params['conv7_weight'].initialize(_mx.init.Xavier(factor_type='avg'), ctx=ctx, force_reinit=True)
net_params['conv8_weight'].initialize(_mx.init.Uniform(0.00005), ctx=ctx, force_reinit=True)
# Initialize object confidence low, preventing an unnecessary adjustment
# period toward conservative estimates
bias = _np.zeros(output_size, dtype=_np.float32)
bias[4::preds_per_box] -= 6
from ._mx_detector import ConstantArray
net_params['conv8_bias'].initialize(ConstantArray(bias), ctx, force_reinit=True)
# Take a subset and then load the rest of the parameters. It is possible to
# do allow_missing=True directly on net_params. However, this will more
# easily hide bugs caused by names getting out of sync.
ref_model.available_parameters_subset(net_params).load(ref_model.model_path, ctx)
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
progress = {'smoothed_loss': None, 'last_time': 0}
iteration = 0
def update_progress(cur_loss, iteration):
iteration_base1 = iteration + 1
if progress['smoothed_loss'] is None:
progress['smoothed_loss'] = cur_loss
else:
progress['smoothed_loss'] = 0.9 * progress['smoothed_loss'] + 0.1 * cur_loss
cur_time = _time.time()
# Printing of table header is deferred, so that start-of-training
# warnings appear above the table
if verbose and iteration == 0:
# Print progress table header
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
print(hr)
if verbose and (cur_time > progress['last_time'] + 10 or
iteration_base1 == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format(
cur_iter=iteration_base1, loss=progress['smoothed_loss'],
time=elapsed_time , width=column_width-1))
progress['last_time'] = cur_time
if use_mps:
# Force initialization of net_params
# TODO: Do not rely on MXNet to initialize MPS-based network
net.forward(_mx.nd.uniform(0, 1, (batch_size_each,) + input_image_shape))
mps_net_params = {}
keys = list(net_params)
for k in keys:
mps_net_params[k] = net_params[k].data().asnumpy()
# Multiplies the loss to move the fp16 gradients away from subnormals
# and gradual underflow. The learning rate is correspondingly divided
# by the same multiple to make training mathematically equivalent. The
# update is done in fp32, which is why this trick works. Does not
# affect how loss is presented to the user.
mps_loss_mult = params['mps_loss_mult']
mps_config = {
'mode': _MpsGraphMode.Train,
'use_sgd': True,
'learning_rate': base_lr / params['mps_loss_mult'],
'gradient_clipping': params.get('clip_gradients', 0.0) * mps_loss_mult,
'weight_decay': params['weight_decay'],
'od_include_network': True,
'od_include_loss': True,
'od_scale_xy': params['lmb_coord_xy'] * mps_loss_mult,
'od_scale_wh': params['lmb_coord_wh'] * mps_loss_mult,
'od_scale_no_object': params['lmb_noobj'] * mps_loss_mult,
'od_scale_object': params['lmb_obj'] * mps_loss_mult,
'od_scale_class': params['lmb_class'] * mps_loss_mult,
'od_max_iou_for_no_object': 0.3,
'od_min_iou_for_object': 0.7,
'od_rescore': params['rescore'],
}
mps_net = _get_mps_od_net(input_image_shape=input_image_shape,
batch_size=batch_size,
output_size=output_size,
anchors=anchors,
config=mps_config,
weights=mps_net_params)
# Use worker threads to isolate different points of synchronization
# and/or waiting for non-Python tasks to finish. The
# sframe_worker_thread will spend most of its time waiting for SFrame
# operations, largely image I/O and decoding, along with scheduling
# MXNet data augmentation. The numpy_worker_thread will spend most of
# its time waiting for MXNet data augmentation to complete, along with
# copying the results into NumPy arrays. Finally, the main thread will
# spend most of its time copying NumPy data into MPS and waiting for the
# results. Note that using three threads here only makes sense because
# each thread spends time waiting for non-Python code to finish (so that
# no thread hogs the global interpreter lock).
mxnet_batch_queue = _Queue(1)
numpy_batch_queue = _Queue(1)
def sframe_worker():
# Once a batch is loaded into NumPy, pass it immediately to the
# numpy_worker so that we can start I/O and decoding for the next
# batch.
for batch in loader:
mxnet_batch_queue.put(batch)
mxnet_batch_queue.put(None)
def numpy_worker():
while True:
batch = mxnet_batch_queue.get()
if batch is None:
break
for x, y in zip(batch.data, batch.label):
# Convert to NumPy arrays with required shapes. Note that
# asnumpy waits for any pending MXNet operations to finish.
input_data = _mxnet_to_mps(x.asnumpy())
label_data = y.asnumpy().reshape(y.shape[:-2] + (-1,))
# Convert to packed 32-bit arrays.
input_data = input_data.astype(_np.float32)
if not input_data.flags.c_contiguous:
input_data = input_data.copy()
label_data = label_data.astype(_np.float32)
if not label_data.flags.c_contiguous:
label_data = label_data.copy()
# Push this batch to the main thread.
numpy_batch_queue.put({'input' : input_data,
'label' : label_data,
'iteration' : batch.iteration})
# Tell the main thread there's no more data.
numpy_batch_queue.put(None)
sframe_worker_thread = _Thread(target=sframe_worker)
sframe_worker_thread.start()
numpy_worker_thread = _Thread(target=numpy_worker)
numpy_worker_thread.start()
batch_queue = []
def wait_for_batch():
pending_loss = batch_queue.pop(0)
batch_loss = pending_loss.asnumpy() # Waits for the batch to finish
return batch_loss.sum() / mps_loss_mult
while True:
batch = numpy_batch_queue.get()
if batch is None:
break
# Adjust learning rate according to our schedule.
if batch['iteration'] in steps:
ii = steps.index(batch['iteration']) + 1
new_lr = factors[ii] * base_lr
mps_net.set_learning_rate(new_lr / mps_loss_mult)
# Submit this match to MPS.
batch_queue.append(mps_net.train(batch['input'], batch['label']))
# If we have two batches in flight, wait for the first one.
if len(batch_queue) > 1:
cur_loss = wait_for_batch()
# If we just submitted the first batch of an iteration, update
# progress for the iteration completed by the last batch we just
# waited for.
if batch['iteration'] > iteration:
update_progress(cur_loss, iteration)
iteration = batch['iteration']
# Wait for any pending batches and finalize our progress updates.
while len(batch_queue) > 0:
cur_loss = wait_for_batch()
update_progress(cur_loss, iteration)
sframe_worker_thread.join()
numpy_worker_thread.join()
# Load back into mxnet
mps_net_params = mps_net.export()
keys = mps_net_params.keys()
for k in keys:
if k in net_params:
net_params[k].set_data(mps_net_params[k])
else: # Use MxNet
net.hybridize()
options = {'learning_rate': base_lr, 'lr_scheduler': lr_scheduler,
'momentum': params['sgd_momentum'], 'wd': params['weight_decay'], 'rescale_grad': 1.0}
clip_grad = params.get('clip_gradients')
if clip_grad:
options['clip_gradient'] = clip_grad
trainer = _mx.gluon.Trainer(net.collect_params(), 'sgd', options)
for batch in loader:
data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
Ls = []
Zs = []
with _mx.autograd.record():
for x, y in zip(data, label):
z = net(x)
z0 = _mx.nd.transpose(z, [0, 2, 3, 1]).reshape(ymap_shape)
L = loss(z0, y)
Ls.append(L)
for L in Ls:
L.backward()
trainer.step(1)
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
update_progress(cur_loss, batch.iteration)
iteration = batch.iteration
training_time = _time.time() - start_time
if verbose:
print(hr) # progress table footer
# Save the model
training_iterations = iteration + 1
state = {
'_model': net,
'_class_to_index': class_to_index,
'_training_time_as_string': _seconds_as_string(training_time),
'_grid_shape': grid_shape,
'anchors': anchors,
'model': model,
'classes': classes,
'batch_size': batch_size,
'input_image_shape': input_image_shape,
'feature': feature,
'non_maximum_suppression_threshold': params['non_maximum_suppression_threshold'],
'annotations': annotations,
'num_classes': num_classes,
'num_examples': num_images,
'num_bounding_boxes': num_instances,
'training_time': training_time,
'training_epochs': training_iterations * batch_size // num_images,
'training_iterations': training_iterations,
'max_iterations': max_iterations,
'training_loss': progress['smoothed_loss'],
}
return ObjectDetector(state) | [
"def",
"create",
"(",
"dataset",
",",
"annotations",
"=",
"None",
",",
"feature",
"=",
"None",
",",
"model",
"=",
"'darknet-yolo'",
",",
"classes",
"=",
"None",
",",
"batch_size",
"=",
"0",
",",
"max_iterations",
"=",
"0",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"from",
".",
"_mx_detector",
"import",
"YOLOLoss",
"as",
"_YOLOLoss",
"from",
".",
"_model",
"import",
"tiny_darknet",
"as",
"_tiny_darknet",
"from",
".",
"_sframe_loader",
"import",
"SFrameDetectionIter",
"as",
"_SFrameDetectionIter",
"from",
".",
"_manual_scheduler",
"import",
"ManualScheduler",
"as",
"_ManualScheduler",
"import",
"mxnet",
"as",
"_mx",
"from",
".",
".",
"_mxnet",
"import",
"_mxnet_utils",
"if",
"len",
"(",
"dataset",
")",
"==",
"0",
":",
"raise",
"_ToolkitError",
"(",
"'Unable to train on empty dataset'",
")",
"_numeric_param_check_range",
"(",
"'max_iterations'",
",",
"max_iterations",
",",
"0",
",",
"_six",
".",
"MAXSIZE",
")",
"start_time",
"=",
"_time",
".",
"time",
"(",
")",
"supported_detectors",
"=",
"[",
"'darknet-yolo'",
"]",
"if",
"feature",
"is",
"None",
":",
"feature",
"=",
"_tkutl",
".",
"_find_only_image_column",
"(",
"dataset",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Using '%s' as feature column\"",
"%",
"feature",
")",
"if",
"annotations",
"is",
"None",
":",
"annotations",
"=",
"_tkutl",
".",
"_find_only_column_of_type",
"(",
"dataset",
",",
"target_type",
"=",
"[",
"list",
",",
"dict",
"]",
",",
"type_name",
"=",
"'list'",
",",
"col_name",
"=",
"'annotations'",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Using '%s' as annotations column\"",
"%",
"annotations",
")",
"_raise_error_if_not_detection_sframe",
"(",
"dataset",
",",
"feature",
",",
"annotations",
",",
"require_annotations",
"=",
"True",
")",
"is_annotations_list",
"=",
"dataset",
"[",
"annotations",
"]",
".",
"dtype",
"==",
"list",
"_tkutl",
".",
"_check_categorical_option_type",
"(",
"'model'",
",",
"model",
",",
"supported_detectors",
")",
"base_model",
"=",
"model",
".",
"split",
"(",
"'-'",
",",
"1",
")",
"[",
"0",
"]",
"ref_model",
"=",
"_pre_trained_models",
".",
"OBJECT_DETECTION_BASE_MODELS",
"[",
"base_model",
"]",
"(",
")",
"params",
"=",
"{",
"'anchors'",
":",
"[",
"(",
"1.0",
",",
"2.0",
")",
",",
"(",
"1.0",
",",
"1.0",
")",
",",
"(",
"2.0",
",",
"1.0",
")",
",",
"(",
"2.0",
",",
"4.0",
")",
",",
"(",
"2.0",
",",
"2.0",
")",
",",
"(",
"4.0",
",",
"2.0",
")",
",",
"(",
"4.0",
",",
"8.0",
")",
",",
"(",
"4.0",
",",
"4.0",
")",
",",
"(",
"8.0",
",",
"4.0",
")",
",",
"(",
"8.0",
",",
"16.0",
")",
",",
"(",
"8.0",
",",
"8.0",
")",
",",
"(",
"16.0",
",",
"8.0",
")",
",",
"(",
"16.0",
",",
"32.0",
")",
",",
"(",
"16.0",
",",
"16.0",
")",
",",
"(",
"32.0",
",",
"16.0",
")",
",",
"]",
",",
"'grid_shape'",
":",
"[",
"13",
",",
"13",
"]",
",",
"'aug_resize'",
":",
"0",
",",
"'aug_rand_crop'",
":",
"0.9",
",",
"'aug_rand_pad'",
":",
"0.9",
",",
"'aug_rand_gray'",
":",
"0.0",
",",
"'aug_aspect_ratio'",
":",
"1.25",
",",
"'aug_hue'",
":",
"0.05",
",",
"'aug_brightness'",
":",
"0.05",
",",
"'aug_saturation'",
":",
"0.05",
",",
"'aug_contrast'",
":",
"0.05",
",",
"'aug_horizontal_flip'",
":",
"True",
",",
"'aug_min_object_covered'",
":",
"0",
",",
"'aug_min_eject_coverage'",
":",
"0.5",
",",
"'aug_area_range'",
":",
"(",
".15",
",",
"2",
")",
",",
"'aug_pca_noise'",
":",
"0.0",
",",
"'aug_max_attempts'",
":",
"20",
",",
"'aug_inter_method'",
":",
"2",
",",
"'lmb_coord_xy'",
":",
"10.0",
",",
"'lmb_coord_wh'",
":",
"10.0",
",",
"'lmb_obj'",
":",
"100.0",
",",
"'lmb_noobj'",
":",
"5.0",
",",
"'lmb_class'",
":",
"2.0",
",",
"'non_maximum_suppression_threshold'",
":",
"0.45",
",",
"'rescore'",
":",
"True",
",",
"'clip_gradients'",
":",
"0.025",
",",
"'weight_decay'",
":",
"0.0005",
",",
"'sgd_momentum'",
":",
"0.9",
",",
"'learning_rate'",
":",
"1.0e-3",
",",
"'shuffle'",
":",
"True",
",",
"'mps_loss_mult'",
":",
"8",
",",
"# This large buffer size (8 batches) is an attempt to mitigate against",
"# the SFrame shuffle operation that can occur after each epoch.",
"'io_thread_buffer_size'",
":",
"8",
",",
"}",
"if",
"'_advanced_parameters'",
"in",
"kwargs",
":",
"# Make sure no additional parameters are provided",
"new_keys",
"=",
"set",
"(",
"kwargs",
"[",
"'_advanced_parameters'",
"]",
".",
"keys",
"(",
")",
")",
"set_keys",
"=",
"set",
"(",
"params",
".",
"keys",
"(",
")",
")",
"unsupported",
"=",
"new_keys",
"-",
"set_keys",
"if",
"unsupported",
":",
"raise",
"_ToolkitError",
"(",
"'Unknown advanced parameters: {}'",
".",
"format",
"(",
"unsupported",
")",
")",
"params",
".",
"update",
"(",
"kwargs",
"[",
"'_advanced_parameters'",
"]",
")",
"anchors",
"=",
"params",
"[",
"'anchors'",
"]",
"num_anchors",
"=",
"len",
"(",
"anchors",
")",
"if",
"batch_size",
"<",
"1",
":",
"batch_size",
"=",
"32",
"# Default if not user-specified",
"cuda_gpus",
"=",
"_mxnet_utils",
".",
"get_gpus_in_use",
"(",
"max_devices",
"=",
"batch_size",
")",
"num_mxnet_gpus",
"=",
"len",
"(",
"cuda_gpus",
")",
"use_mps",
"=",
"_use_mps",
"(",
")",
"and",
"num_mxnet_gpus",
"==",
"0",
"batch_size_each",
"=",
"batch_size",
"//",
"max",
"(",
"num_mxnet_gpus",
",",
"1",
")",
"if",
"use_mps",
"and",
"_mps_device_memory_limit",
"(",
")",
"<",
"4",
"*",
"1024",
"*",
"1024",
"*",
"1024",
":",
"# Reduce batch size for GPUs with less than 4GB RAM",
"batch_size_each",
"=",
"16",
"# Note, this may slightly alter the batch size to fit evenly on the GPUs",
"batch_size",
"=",
"max",
"(",
"num_mxnet_gpus",
",",
"1",
")",
"*",
"batch_size_each",
"if",
"verbose",
":",
"print",
"(",
"\"Setting 'batch_size' to {}\"",
".",
"format",
"(",
"batch_size",
")",
")",
"# The IO thread also handles MXNet-powered data augmentation. This seems",
"# to be problematic to run independently of a MXNet-powered neural network",
"# in a separate thread. For this reason, we restrict IO threads to when",
"# the neural network backend is MPS.",
"io_thread_buffer_size",
"=",
"params",
"[",
"'io_thread_buffer_size'",
"]",
"if",
"use_mps",
"else",
"0",
"if",
"verbose",
":",
"# Estimate memory usage (based on experiments)",
"cuda_mem_req",
"=",
"550",
"+",
"batch_size_each",
"*",
"85",
"_tkutl",
".",
"_print_neural_compute_device",
"(",
"cuda_gpus",
"=",
"cuda_gpus",
",",
"use_mps",
"=",
"use_mps",
",",
"cuda_mem_req",
"=",
"cuda_mem_req",
")",
"grid_shape",
"=",
"params",
"[",
"'grid_shape'",
"]",
"input_image_shape",
"=",
"(",
"3",
",",
"grid_shape",
"[",
"0",
"]",
"*",
"ref_model",
".",
"spatial_reduction",
",",
"grid_shape",
"[",
"1",
"]",
"*",
"ref_model",
".",
"spatial_reduction",
")",
"try",
":",
"if",
"is_annotations_list",
":",
"instances",
"=",
"(",
"dataset",
".",
"stack",
"(",
"annotations",
",",
"new_column_name",
"=",
"'_bbox'",
",",
"drop_na",
"=",
"True",
")",
".",
"unpack",
"(",
"'_bbox'",
",",
"limit",
"=",
"[",
"'label'",
"]",
")",
")",
"else",
":",
"instances",
"=",
"dataset",
".",
"rename",
"(",
"{",
"annotations",
":",
"'_bbox'",
"}",
")",
".",
"dropna",
"(",
"'_bbox'",
")",
"instances",
"=",
"instances",
".",
"unpack",
"(",
"'_bbox'",
",",
"limit",
"=",
"[",
"'label'",
"]",
")",
"except",
"(",
"TypeError",
",",
"RuntimeError",
")",
":",
"# If this fails, the annotation format isinvalid at the coarsest level",
"raise",
"_ToolkitError",
"(",
"\"Annotations format is invalid. Must be a list of \"",
"\"dictionaries or single dictionary containing 'label' and 'coordinates'.\"",
")",
"num_images",
"=",
"len",
"(",
"dataset",
")",
"num_instances",
"=",
"len",
"(",
"instances",
")",
"if",
"classes",
"is",
"None",
":",
"classes",
"=",
"instances",
"[",
"'_bbox.label'",
"]",
".",
"unique",
"(",
")",
"classes",
"=",
"sorted",
"(",
"classes",
")",
"# Make a class-to-index look-up table",
"class_to_index",
"=",
"{",
"name",
":",
"index",
"for",
"index",
",",
"name",
"in",
"enumerate",
"(",
"classes",
")",
"}",
"num_classes",
"=",
"len",
"(",
"classes",
")",
"if",
"max_iterations",
"==",
"0",
":",
"# Set number of iterations through a heuristic",
"num_iterations_raw",
"=",
"5000",
"*",
"_np",
".",
"sqrt",
"(",
"num_instances",
")",
"/",
"batch_size",
"num_iterations",
"=",
"1000",
"*",
"max",
"(",
"1",
",",
"int",
"(",
"round",
"(",
"num_iterations_raw",
"/",
"1000",
")",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Setting 'max_iterations' to {}\"",
".",
"format",
"(",
"num_iterations",
")",
")",
"else",
":",
"num_iterations",
"=",
"max_iterations",
"# Create data loader",
"loader",
"=",
"_SFrameDetectionIter",
"(",
"dataset",
",",
"batch_size",
"=",
"batch_size",
",",
"input_shape",
"=",
"input_image_shape",
"[",
"1",
":",
"]",
",",
"output_shape",
"=",
"grid_shape",
",",
"anchors",
"=",
"anchors",
",",
"class_to_index",
"=",
"class_to_index",
",",
"aug_params",
"=",
"params",
",",
"shuffle",
"=",
"params",
"[",
"'shuffle'",
"]",
",",
"loader_type",
"=",
"'augmented'",
",",
"feature_column",
"=",
"feature",
",",
"annotations_column",
"=",
"annotations",
",",
"io_thread_buffer_size",
"=",
"io_thread_buffer_size",
",",
"iterations",
"=",
"num_iterations",
")",
"# Predictions per anchor box: x/y + w/h + object confidence + class probs",
"preds_per_box",
"=",
"5",
"+",
"num_classes",
"output_size",
"=",
"preds_per_box",
"*",
"num_anchors",
"ymap_shape",
"=",
"(",
"batch_size_each",
",",
")",
"+",
"tuple",
"(",
"grid_shape",
")",
"+",
"(",
"num_anchors",
",",
"preds_per_box",
")",
"net",
"=",
"_tiny_darknet",
"(",
"output_size",
"=",
"output_size",
")",
"loss",
"=",
"_YOLOLoss",
"(",
"input_shape",
"=",
"input_image_shape",
"[",
"1",
":",
"]",
",",
"output_shape",
"=",
"grid_shape",
",",
"batch_size",
"=",
"batch_size_each",
",",
"num_classes",
"=",
"num_classes",
",",
"anchors",
"=",
"anchors",
",",
"parameters",
"=",
"params",
")",
"base_lr",
"=",
"params",
"[",
"'learning_rate'",
"]",
"steps",
"=",
"[",
"num_iterations",
"//",
"2",
",",
"3",
"*",
"num_iterations",
"//",
"4",
",",
"num_iterations",
"]",
"steps_and_factors",
"=",
"[",
"(",
"step",
",",
"10",
"**",
"(",
"-",
"i",
")",
")",
"for",
"i",
",",
"step",
"in",
"enumerate",
"(",
"steps",
")",
"]",
"steps",
",",
"factors",
"=",
"zip",
"(",
"*",
"steps_and_factors",
")",
"lr_scheduler",
"=",
"_ManualScheduler",
"(",
"step",
"=",
"steps",
",",
"factor",
"=",
"factors",
")",
"ctx",
"=",
"_mxnet_utils",
".",
"get_mxnet_context",
"(",
"max_devices",
"=",
"batch_size",
")",
"net_params",
"=",
"net",
".",
"collect_params",
"(",
")",
"net_params",
".",
"initialize",
"(",
"_mx",
".",
"init",
".",
"Xavier",
"(",
")",
",",
"ctx",
"=",
"ctx",
")",
"net_params",
"[",
"'conv7_weight'",
"]",
".",
"initialize",
"(",
"_mx",
".",
"init",
".",
"Xavier",
"(",
"factor_type",
"=",
"'avg'",
")",
",",
"ctx",
"=",
"ctx",
",",
"force_reinit",
"=",
"True",
")",
"net_params",
"[",
"'conv8_weight'",
"]",
".",
"initialize",
"(",
"_mx",
".",
"init",
".",
"Uniform",
"(",
"0.00005",
")",
",",
"ctx",
"=",
"ctx",
",",
"force_reinit",
"=",
"True",
")",
"# Initialize object confidence low, preventing an unnecessary adjustment",
"# period toward conservative estimates",
"bias",
"=",
"_np",
".",
"zeros",
"(",
"output_size",
",",
"dtype",
"=",
"_np",
".",
"float32",
")",
"bias",
"[",
"4",
":",
":",
"preds_per_box",
"]",
"-=",
"6",
"from",
".",
"_mx_detector",
"import",
"ConstantArray",
"net_params",
"[",
"'conv8_bias'",
"]",
".",
"initialize",
"(",
"ConstantArray",
"(",
"bias",
")",
",",
"ctx",
",",
"force_reinit",
"=",
"True",
")",
"# Take a subset and then load the rest of the parameters. It is possible to",
"# do allow_missing=True directly on net_params. However, this will more",
"# easily hide bugs caused by names getting out of sync.",
"ref_model",
".",
"available_parameters_subset",
"(",
"net_params",
")",
".",
"load",
"(",
"ref_model",
".",
"model_path",
",",
"ctx",
")",
"column_names",
"=",
"[",
"'Iteration'",
",",
"'Loss'",
",",
"'Elapsed Time'",
"]",
"num_columns",
"=",
"len",
"(",
"column_names",
")",
"column_width",
"=",
"max",
"(",
"map",
"(",
"lambda",
"x",
":",
"len",
"(",
"x",
")",
",",
"column_names",
")",
")",
"+",
"2",
"hr",
"=",
"'+'",
"+",
"'+'",
".",
"join",
"(",
"[",
"'-'",
"*",
"column_width",
"]",
"*",
"num_columns",
")",
"+",
"'+'",
"progress",
"=",
"{",
"'smoothed_loss'",
":",
"None",
",",
"'last_time'",
":",
"0",
"}",
"iteration",
"=",
"0",
"def",
"update_progress",
"(",
"cur_loss",
",",
"iteration",
")",
":",
"iteration_base1",
"=",
"iteration",
"+",
"1",
"if",
"progress",
"[",
"'smoothed_loss'",
"]",
"is",
"None",
":",
"progress",
"[",
"'smoothed_loss'",
"]",
"=",
"cur_loss",
"else",
":",
"progress",
"[",
"'smoothed_loss'",
"]",
"=",
"0.9",
"*",
"progress",
"[",
"'smoothed_loss'",
"]",
"+",
"0.1",
"*",
"cur_loss",
"cur_time",
"=",
"_time",
".",
"time",
"(",
")",
"# Printing of table header is deferred, so that start-of-training",
"# warnings appear above the table",
"if",
"verbose",
"and",
"iteration",
"==",
"0",
":",
"# Print progress table header",
"print",
"(",
"hr",
")",
"print",
"(",
"(",
"'| {:<{width}}'",
"*",
"num_columns",
"+",
"'|'",
")",
".",
"format",
"(",
"*",
"column_names",
",",
"width",
"=",
"column_width",
"-",
"1",
")",
")",
"print",
"(",
"hr",
")",
"if",
"verbose",
"and",
"(",
"cur_time",
">",
"progress",
"[",
"'last_time'",
"]",
"+",
"10",
"or",
"iteration_base1",
"==",
"max_iterations",
")",
":",
"# Print progress table row",
"elapsed_time",
"=",
"cur_time",
"-",
"start_time",
"print",
"(",
"\"| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|\"",
".",
"format",
"(",
"cur_iter",
"=",
"iteration_base1",
",",
"loss",
"=",
"progress",
"[",
"'smoothed_loss'",
"]",
",",
"time",
"=",
"elapsed_time",
",",
"width",
"=",
"column_width",
"-",
"1",
")",
")",
"progress",
"[",
"'last_time'",
"]",
"=",
"cur_time",
"if",
"use_mps",
":",
"# Force initialization of net_params",
"# TODO: Do not rely on MXNet to initialize MPS-based network",
"net",
".",
"forward",
"(",
"_mx",
".",
"nd",
".",
"uniform",
"(",
"0",
",",
"1",
",",
"(",
"batch_size_each",
",",
")",
"+",
"input_image_shape",
")",
")",
"mps_net_params",
"=",
"{",
"}",
"keys",
"=",
"list",
"(",
"net_params",
")",
"for",
"k",
"in",
"keys",
":",
"mps_net_params",
"[",
"k",
"]",
"=",
"net_params",
"[",
"k",
"]",
".",
"data",
"(",
")",
".",
"asnumpy",
"(",
")",
"# Multiplies the loss to move the fp16 gradients away from subnormals",
"# and gradual underflow. The learning rate is correspondingly divided",
"# by the same multiple to make training mathematically equivalent. The",
"# update is done in fp32, which is why this trick works. Does not",
"# affect how loss is presented to the user.",
"mps_loss_mult",
"=",
"params",
"[",
"'mps_loss_mult'",
"]",
"mps_config",
"=",
"{",
"'mode'",
":",
"_MpsGraphMode",
".",
"Train",
",",
"'use_sgd'",
":",
"True",
",",
"'learning_rate'",
":",
"base_lr",
"/",
"params",
"[",
"'mps_loss_mult'",
"]",
",",
"'gradient_clipping'",
":",
"params",
".",
"get",
"(",
"'clip_gradients'",
",",
"0.0",
")",
"*",
"mps_loss_mult",
",",
"'weight_decay'",
":",
"params",
"[",
"'weight_decay'",
"]",
",",
"'od_include_network'",
":",
"True",
",",
"'od_include_loss'",
":",
"True",
",",
"'od_scale_xy'",
":",
"params",
"[",
"'lmb_coord_xy'",
"]",
"*",
"mps_loss_mult",
",",
"'od_scale_wh'",
":",
"params",
"[",
"'lmb_coord_wh'",
"]",
"*",
"mps_loss_mult",
",",
"'od_scale_no_object'",
":",
"params",
"[",
"'lmb_noobj'",
"]",
"*",
"mps_loss_mult",
",",
"'od_scale_object'",
":",
"params",
"[",
"'lmb_obj'",
"]",
"*",
"mps_loss_mult",
",",
"'od_scale_class'",
":",
"params",
"[",
"'lmb_class'",
"]",
"*",
"mps_loss_mult",
",",
"'od_max_iou_for_no_object'",
":",
"0.3",
",",
"'od_min_iou_for_object'",
":",
"0.7",
",",
"'od_rescore'",
":",
"params",
"[",
"'rescore'",
"]",
",",
"}",
"mps_net",
"=",
"_get_mps_od_net",
"(",
"input_image_shape",
"=",
"input_image_shape",
",",
"batch_size",
"=",
"batch_size",
",",
"output_size",
"=",
"output_size",
",",
"anchors",
"=",
"anchors",
",",
"config",
"=",
"mps_config",
",",
"weights",
"=",
"mps_net_params",
")",
"# Use worker threads to isolate different points of synchronization",
"# and/or waiting for non-Python tasks to finish. The",
"# sframe_worker_thread will spend most of its time waiting for SFrame",
"# operations, largely image I/O and decoding, along with scheduling",
"# MXNet data augmentation. The numpy_worker_thread will spend most of",
"# its time waiting for MXNet data augmentation to complete, along with",
"# copying the results into NumPy arrays. Finally, the main thread will",
"# spend most of its time copying NumPy data into MPS and waiting for the",
"# results. Note that using three threads here only makes sense because",
"# each thread spends time waiting for non-Python code to finish (so that",
"# no thread hogs the global interpreter lock).",
"mxnet_batch_queue",
"=",
"_Queue",
"(",
"1",
")",
"numpy_batch_queue",
"=",
"_Queue",
"(",
"1",
")",
"def",
"sframe_worker",
"(",
")",
":",
"# Once a batch is loaded into NumPy, pass it immediately to the",
"# numpy_worker so that we can start I/O and decoding for the next",
"# batch.",
"for",
"batch",
"in",
"loader",
":",
"mxnet_batch_queue",
".",
"put",
"(",
"batch",
")",
"mxnet_batch_queue",
".",
"put",
"(",
"None",
")",
"def",
"numpy_worker",
"(",
")",
":",
"while",
"True",
":",
"batch",
"=",
"mxnet_batch_queue",
".",
"get",
"(",
")",
"if",
"batch",
"is",
"None",
":",
"break",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"batch",
".",
"data",
",",
"batch",
".",
"label",
")",
":",
"# Convert to NumPy arrays with required shapes. Note that",
"# asnumpy waits for any pending MXNet operations to finish.",
"input_data",
"=",
"_mxnet_to_mps",
"(",
"x",
".",
"asnumpy",
"(",
")",
")",
"label_data",
"=",
"y",
".",
"asnumpy",
"(",
")",
".",
"reshape",
"(",
"y",
".",
"shape",
"[",
":",
"-",
"2",
"]",
"+",
"(",
"-",
"1",
",",
")",
")",
"# Convert to packed 32-bit arrays.",
"input_data",
"=",
"input_data",
".",
"astype",
"(",
"_np",
".",
"float32",
")",
"if",
"not",
"input_data",
".",
"flags",
".",
"c_contiguous",
":",
"input_data",
"=",
"input_data",
".",
"copy",
"(",
")",
"label_data",
"=",
"label_data",
".",
"astype",
"(",
"_np",
".",
"float32",
")",
"if",
"not",
"label_data",
".",
"flags",
".",
"c_contiguous",
":",
"label_data",
"=",
"label_data",
".",
"copy",
"(",
")",
"# Push this batch to the main thread.",
"numpy_batch_queue",
".",
"put",
"(",
"{",
"'input'",
":",
"input_data",
",",
"'label'",
":",
"label_data",
",",
"'iteration'",
":",
"batch",
".",
"iteration",
"}",
")",
"# Tell the main thread there's no more data.",
"numpy_batch_queue",
".",
"put",
"(",
"None",
")",
"sframe_worker_thread",
"=",
"_Thread",
"(",
"target",
"=",
"sframe_worker",
")",
"sframe_worker_thread",
".",
"start",
"(",
")",
"numpy_worker_thread",
"=",
"_Thread",
"(",
"target",
"=",
"numpy_worker",
")",
"numpy_worker_thread",
".",
"start",
"(",
")",
"batch_queue",
"=",
"[",
"]",
"def",
"wait_for_batch",
"(",
")",
":",
"pending_loss",
"=",
"batch_queue",
".",
"pop",
"(",
"0",
")",
"batch_loss",
"=",
"pending_loss",
".",
"asnumpy",
"(",
")",
"# Waits for the batch to finish",
"return",
"batch_loss",
".",
"sum",
"(",
")",
"/",
"mps_loss_mult",
"while",
"True",
":",
"batch",
"=",
"numpy_batch_queue",
".",
"get",
"(",
")",
"if",
"batch",
"is",
"None",
":",
"break",
"# Adjust learning rate according to our schedule.",
"if",
"batch",
"[",
"'iteration'",
"]",
"in",
"steps",
":",
"ii",
"=",
"steps",
".",
"index",
"(",
"batch",
"[",
"'iteration'",
"]",
")",
"+",
"1",
"new_lr",
"=",
"factors",
"[",
"ii",
"]",
"*",
"base_lr",
"mps_net",
".",
"set_learning_rate",
"(",
"new_lr",
"/",
"mps_loss_mult",
")",
"# Submit this match to MPS.",
"batch_queue",
".",
"append",
"(",
"mps_net",
".",
"train",
"(",
"batch",
"[",
"'input'",
"]",
",",
"batch",
"[",
"'label'",
"]",
")",
")",
"# If we have two batches in flight, wait for the first one.",
"if",
"len",
"(",
"batch_queue",
")",
">",
"1",
":",
"cur_loss",
"=",
"wait_for_batch",
"(",
")",
"# If we just submitted the first batch of an iteration, update",
"# progress for the iteration completed by the last batch we just",
"# waited for.",
"if",
"batch",
"[",
"'iteration'",
"]",
">",
"iteration",
":",
"update_progress",
"(",
"cur_loss",
",",
"iteration",
")",
"iteration",
"=",
"batch",
"[",
"'iteration'",
"]",
"# Wait for any pending batches and finalize our progress updates.",
"while",
"len",
"(",
"batch_queue",
")",
">",
"0",
":",
"cur_loss",
"=",
"wait_for_batch",
"(",
")",
"update_progress",
"(",
"cur_loss",
",",
"iteration",
")",
"sframe_worker_thread",
".",
"join",
"(",
")",
"numpy_worker_thread",
".",
"join",
"(",
")",
"# Load back into mxnet",
"mps_net_params",
"=",
"mps_net",
".",
"export",
"(",
")",
"keys",
"=",
"mps_net_params",
".",
"keys",
"(",
")",
"for",
"k",
"in",
"keys",
":",
"if",
"k",
"in",
"net_params",
":",
"net_params",
"[",
"k",
"]",
".",
"set_data",
"(",
"mps_net_params",
"[",
"k",
"]",
")",
"else",
":",
"# Use MxNet",
"net",
".",
"hybridize",
"(",
")",
"options",
"=",
"{",
"'learning_rate'",
":",
"base_lr",
",",
"'lr_scheduler'",
":",
"lr_scheduler",
",",
"'momentum'",
":",
"params",
"[",
"'sgd_momentum'",
"]",
",",
"'wd'",
":",
"params",
"[",
"'weight_decay'",
"]",
",",
"'rescale_grad'",
":",
"1.0",
"}",
"clip_grad",
"=",
"params",
".",
"get",
"(",
"'clip_gradients'",
")",
"if",
"clip_grad",
":",
"options",
"[",
"'clip_gradient'",
"]",
"=",
"clip_grad",
"trainer",
"=",
"_mx",
".",
"gluon",
".",
"Trainer",
"(",
"net",
".",
"collect_params",
"(",
")",
",",
"'sgd'",
",",
"options",
")",
"for",
"batch",
"in",
"loader",
":",
"data",
"=",
"_mx",
".",
"gluon",
".",
"utils",
".",
"split_and_load",
"(",
"batch",
".",
"data",
"[",
"0",
"]",
",",
"ctx_list",
"=",
"ctx",
",",
"batch_axis",
"=",
"0",
")",
"label",
"=",
"_mx",
".",
"gluon",
".",
"utils",
".",
"split_and_load",
"(",
"batch",
".",
"label",
"[",
"0",
"]",
",",
"ctx_list",
"=",
"ctx",
",",
"batch_axis",
"=",
"0",
")",
"Ls",
"=",
"[",
"]",
"Zs",
"=",
"[",
"]",
"with",
"_mx",
".",
"autograd",
".",
"record",
"(",
")",
":",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"data",
",",
"label",
")",
":",
"z",
"=",
"net",
"(",
"x",
")",
"z0",
"=",
"_mx",
".",
"nd",
".",
"transpose",
"(",
"z",
",",
"[",
"0",
",",
"2",
",",
"3",
",",
"1",
"]",
")",
".",
"reshape",
"(",
"ymap_shape",
")",
"L",
"=",
"loss",
"(",
"z0",
",",
"y",
")",
"Ls",
".",
"append",
"(",
"L",
")",
"for",
"L",
"in",
"Ls",
":",
"L",
".",
"backward",
"(",
")",
"trainer",
".",
"step",
"(",
"1",
")",
"cur_loss",
"=",
"_np",
".",
"mean",
"(",
"[",
"L",
".",
"asnumpy",
"(",
")",
"[",
"0",
"]",
"for",
"L",
"in",
"Ls",
"]",
")",
"update_progress",
"(",
"cur_loss",
",",
"batch",
".",
"iteration",
")",
"iteration",
"=",
"batch",
".",
"iteration",
"training_time",
"=",
"_time",
".",
"time",
"(",
")",
"-",
"start_time",
"if",
"verbose",
":",
"print",
"(",
"hr",
")",
"# progress table footer",
"# Save the model",
"training_iterations",
"=",
"iteration",
"+",
"1",
"state",
"=",
"{",
"'_model'",
":",
"net",
",",
"'_class_to_index'",
":",
"class_to_index",
",",
"'_training_time_as_string'",
":",
"_seconds_as_string",
"(",
"training_time",
")",
",",
"'_grid_shape'",
":",
"grid_shape",
",",
"'anchors'",
":",
"anchors",
",",
"'model'",
":",
"model",
",",
"'classes'",
":",
"classes",
",",
"'batch_size'",
":",
"batch_size",
",",
"'input_image_shape'",
":",
"input_image_shape",
",",
"'feature'",
":",
"feature",
",",
"'non_maximum_suppression_threshold'",
":",
"params",
"[",
"'non_maximum_suppression_threshold'",
"]",
",",
"'annotations'",
":",
"annotations",
",",
"'num_classes'",
":",
"num_classes",
",",
"'num_examples'",
":",
"num_images",
",",
"'num_bounding_boxes'",
":",
"num_instances",
",",
"'training_time'",
":",
"training_time",
",",
"'training_epochs'",
":",
"training_iterations",
"*",
"batch_size",
"//",
"num_images",
",",
"'training_iterations'",
":",
"training_iterations",
",",
"'max_iterations'",
":",
"max_iterations",
",",
"'training_loss'",
":",
"progress",
"[",
"'smoothed_loss'",
"]",
",",
"}",
"return",
"ObjectDetector",
"(",
"state",
")"
] | Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
batch_size: int
The number of images per training iteration. If 0, then it will be
automatically determined based on resource availability.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions']) | [
"Create",
"a",
":",
"class",
":",
"ObjectDetector",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/object_detector.py#L98-L622 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/object_detector.py | ObjectDetector._predict_with_options | def _predict_with_options(self, dataset, with_ground_truth,
postprocess=True, confidence_threshold=0.001,
iou_threshold=None,
verbose=True):
"""
Predict with options for what kind of SFrame should be returned.
If postprocess is False, a single numpy array with raw unprocessed
results will be returned.
"""
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
_raise_error_if_not_detection_sframe(dataset, self.feature, self.annotations,
require_annotations=with_ground_truth)
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._detection import (yolo_map_to_bounding_boxes as _yolo_map_to_bounding_boxes,
non_maximum_suppression as _non_maximum_suppression,
bbox_to_ybox as _bbox_to_ybox)
from .._mxnet import _mxnet_utils
import mxnet as _mx
loader = _SFrameDetectionIter(dataset,
batch_size=self.batch_size,
input_shape=self.input_image_shape[1:],
output_shape=self._grid_shape,
anchors=self.anchors,
class_to_index=self._class_to_index,
loader_type='stretched',
load_labels=with_ground_truth,
shuffle=False,
epochs=1,
feature_column=self.feature,
annotations_column=self.annotations)
num_anchors = len(self.anchors)
preds_per_box = 5 + len(self.classes)
output_size = preds_per_box * num_anchors
# If prediction is done with ground truth, two sframes of the same
# structure are returned, the second one containing ground truth labels
num_returns = 2 if with_ground_truth else 1
sf_builders = [
_tc.SFrameBuilder([int, str, float, float, float, float, float],
column_names=['row_id', 'label', 'confidence',
'x', 'y', 'width', 'height'])
for _ in range(num_returns)
]
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=self.batch_size)
use_mps = _use_mps() and num_mxnet_gpus == 0
if use_mps:
if not hasattr(self, '_mps_inference_net') or self._mps_inference_net is None:
mxnet_params = self._model.collect_params()
mps_net_params = { k : mxnet_params[k].data().asnumpy()
for k in mxnet_params }
mps_config = {
'mode': _MpsGraphMode.Inference,
'od_include_network': True,
'od_include_loss': False,
}
mps_net = _get_mps_od_net(input_image_shape=self.input_image_shape,
batch_size=self.batch_size,
output_size=output_size,
anchors=self.anchors,
config=mps_config,
weights=mps_net_params)
self._mps_inference_net = mps_net
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
done = False
last_time = 0
raw_results = []
for batch in loader:
if batch.pad is not None:
size = self.batch_size - batch.pad
b_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
b_indices = _mx.nd.slice_axis(batch.label[1], axis=0, begin=0, end=size)
b_oshapes = _mx.nd.slice_axis(batch.label[2], axis=0, begin=0, end=size)
else:
b_data = batch.data[0]
b_indices = batch.label[1]
b_oshapes = batch.label[2]
size = self.batch_size
if b_data.shape[0] < len(ctx):
ctx0 = ctx[:b_data.shape[0]]
else:
ctx0 = ctx
split_data = _mx.gluon.utils.split_and_load(b_data, ctx_list=ctx0, even_split=False)
split_indices = _mx.gluon.utils.split_data(b_indices, num_slice=len(ctx0), even_split=False)
split_oshapes = _mx.gluon.utils.split_data(b_oshapes, num_slice=len(ctx0), even_split=False)
for data, indices, oshapes in zip(split_data, split_indices, split_oshapes):
if use_mps:
mps_data = _mxnet_to_mps(data.asnumpy())
n_samples = mps_data.shape[0]
if mps_data.shape[0] != self.batch_size:
mps_data_padded = _np.zeros((self.batch_size,) + mps_data.shape[1:],
dtype=mps_data.dtype)
mps_data_padded[:mps_data.shape[0]] = mps_data
mps_data = mps_data_padded
mps_float_array = self._mps_inference_net.predict(mps_data)
mps_z = mps_float_array.asnumpy()[:n_samples]
z = _mps_to_mxnet(mps_z)
else:
z = self._model(data).asnumpy()
if not postprocess:
raw_results.append(z)
continue
ypred = z.transpose(0, 2, 3, 1)
ypred = ypred.reshape(ypred.shape[:-1] + (num_anchors, -1))
zipped = zip(indices.asnumpy(), ypred, oshapes.asnumpy())
for index0, output0, oshape0 in zipped:
index0 = int(index0)
x_boxes, x_classes, x_scores = _yolo_map_to_bounding_boxes(
output0[_np.newaxis], anchors=self.anchors,
confidence_threshold=confidence_threshold,
nms_thresh=None)
x_boxes0 = _np.array(x_boxes).reshape(-1, 4)
# Normalize
x_boxes0[:, 0::2] /= self.input_image_shape[1]
x_boxes0[:, 1::2] /= self.input_image_shape[2]
# Re-shape to original input size
x_boxes0[:, 0::2] *= oshape0[0]
x_boxes0[:, 1::2] *= oshape0[1]
# Clip the boxes to the original sizes
x_boxes0[:, 0::2] = _np.clip(x_boxes0[:, 0::2], 0, oshape0[0])
x_boxes0[:, 1::2] = _np.clip(x_boxes0[:, 1::2], 0, oshape0[1])
# Non-maximum suppression (also limit to 100 detection per
# image, inspired by the evaluation in COCO)
x_boxes0, x_classes, x_scores = _non_maximum_suppression(
x_boxes0, x_classes, x_scores,
num_classes=self.num_classes, threshold=iou_threshold,
limit=100)
for bbox, cls, s in zip(x_boxes0, x_classes, x_scores):
cls = int(cls)
values = [index0, self.classes[cls], s] + list(_bbox_to_ybox(bbox))
sf_builders[0].append(values)
if index0 == len(dataset) - 1:
done = True
cur_time = _time.time()
# Do not print process if only a few samples are predicted
if verbose and (dataset_size >= 5 and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n=index0 + 1, max_n=dataset_size, width=len(str(dataset_size))))
last_time = cur_time
if done:
break
# Ground truth
if with_ground_truth:
zipped = _itertools.islice(zip(batch.label[1].asnumpy(), batch.raw_bboxes, batch.raw_classes), size)
for index0, bbox0, cls0 in zipped:
index0 = int(index0)
for bbox, cls in zip(bbox0, cls0):
cls = int(cls)
if cls == -1:
break
values = [index0, self.classes[cls], 1.0] + list(bbox)
sf_builders[1].append(values)
if index0 == len(dataset) - 1:
break
if postprocess:
ret = tuple([sb.close() for sb in sf_builders])
if len(ret) == 1:
return ret[0]
else:
return ret
else:
return _np.concatenate(raw_results, axis=0) | python | def _predict_with_options(self, dataset, with_ground_truth,
postprocess=True, confidence_threshold=0.001,
iou_threshold=None,
verbose=True):
"""
Predict with options for what kind of SFrame should be returned.
If postprocess is False, a single numpy array with raw unprocessed
results will be returned.
"""
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
_raise_error_if_not_detection_sframe(dataset, self.feature, self.annotations,
require_annotations=with_ground_truth)
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._detection import (yolo_map_to_bounding_boxes as _yolo_map_to_bounding_boxes,
non_maximum_suppression as _non_maximum_suppression,
bbox_to_ybox as _bbox_to_ybox)
from .._mxnet import _mxnet_utils
import mxnet as _mx
loader = _SFrameDetectionIter(dataset,
batch_size=self.batch_size,
input_shape=self.input_image_shape[1:],
output_shape=self._grid_shape,
anchors=self.anchors,
class_to_index=self._class_to_index,
loader_type='stretched',
load_labels=with_ground_truth,
shuffle=False,
epochs=1,
feature_column=self.feature,
annotations_column=self.annotations)
num_anchors = len(self.anchors)
preds_per_box = 5 + len(self.classes)
output_size = preds_per_box * num_anchors
# If prediction is done with ground truth, two sframes of the same
# structure are returned, the second one containing ground truth labels
num_returns = 2 if with_ground_truth else 1
sf_builders = [
_tc.SFrameBuilder([int, str, float, float, float, float, float],
column_names=['row_id', 'label', 'confidence',
'x', 'y', 'width', 'height'])
for _ in range(num_returns)
]
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=self.batch_size)
use_mps = _use_mps() and num_mxnet_gpus == 0
if use_mps:
if not hasattr(self, '_mps_inference_net') or self._mps_inference_net is None:
mxnet_params = self._model.collect_params()
mps_net_params = { k : mxnet_params[k].data().asnumpy()
for k in mxnet_params }
mps_config = {
'mode': _MpsGraphMode.Inference,
'od_include_network': True,
'od_include_loss': False,
}
mps_net = _get_mps_od_net(input_image_shape=self.input_image_shape,
batch_size=self.batch_size,
output_size=output_size,
anchors=self.anchors,
config=mps_config,
weights=mps_net_params)
self._mps_inference_net = mps_net
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
done = False
last_time = 0
raw_results = []
for batch in loader:
if batch.pad is not None:
size = self.batch_size - batch.pad
b_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
b_indices = _mx.nd.slice_axis(batch.label[1], axis=0, begin=0, end=size)
b_oshapes = _mx.nd.slice_axis(batch.label[2], axis=0, begin=0, end=size)
else:
b_data = batch.data[0]
b_indices = batch.label[1]
b_oshapes = batch.label[2]
size = self.batch_size
if b_data.shape[0] < len(ctx):
ctx0 = ctx[:b_data.shape[0]]
else:
ctx0 = ctx
split_data = _mx.gluon.utils.split_and_load(b_data, ctx_list=ctx0, even_split=False)
split_indices = _mx.gluon.utils.split_data(b_indices, num_slice=len(ctx0), even_split=False)
split_oshapes = _mx.gluon.utils.split_data(b_oshapes, num_slice=len(ctx0), even_split=False)
for data, indices, oshapes in zip(split_data, split_indices, split_oshapes):
if use_mps:
mps_data = _mxnet_to_mps(data.asnumpy())
n_samples = mps_data.shape[0]
if mps_data.shape[0] != self.batch_size:
mps_data_padded = _np.zeros((self.batch_size,) + mps_data.shape[1:],
dtype=mps_data.dtype)
mps_data_padded[:mps_data.shape[0]] = mps_data
mps_data = mps_data_padded
mps_float_array = self._mps_inference_net.predict(mps_data)
mps_z = mps_float_array.asnumpy()[:n_samples]
z = _mps_to_mxnet(mps_z)
else:
z = self._model(data).asnumpy()
if not postprocess:
raw_results.append(z)
continue
ypred = z.transpose(0, 2, 3, 1)
ypred = ypred.reshape(ypred.shape[:-1] + (num_anchors, -1))
zipped = zip(indices.asnumpy(), ypred, oshapes.asnumpy())
for index0, output0, oshape0 in zipped:
index0 = int(index0)
x_boxes, x_classes, x_scores = _yolo_map_to_bounding_boxes(
output0[_np.newaxis], anchors=self.anchors,
confidence_threshold=confidence_threshold,
nms_thresh=None)
x_boxes0 = _np.array(x_boxes).reshape(-1, 4)
# Normalize
x_boxes0[:, 0::2] /= self.input_image_shape[1]
x_boxes0[:, 1::2] /= self.input_image_shape[2]
# Re-shape to original input size
x_boxes0[:, 0::2] *= oshape0[0]
x_boxes0[:, 1::2] *= oshape0[1]
# Clip the boxes to the original sizes
x_boxes0[:, 0::2] = _np.clip(x_boxes0[:, 0::2], 0, oshape0[0])
x_boxes0[:, 1::2] = _np.clip(x_boxes0[:, 1::2], 0, oshape0[1])
# Non-maximum suppression (also limit to 100 detection per
# image, inspired by the evaluation in COCO)
x_boxes0, x_classes, x_scores = _non_maximum_suppression(
x_boxes0, x_classes, x_scores,
num_classes=self.num_classes, threshold=iou_threshold,
limit=100)
for bbox, cls, s in zip(x_boxes0, x_classes, x_scores):
cls = int(cls)
values = [index0, self.classes[cls], s] + list(_bbox_to_ybox(bbox))
sf_builders[0].append(values)
if index0 == len(dataset) - 1:
done = True
cur_time = _time.time()
# Do not print process if only a few samples are predicted
if verbose and (dataset_size >= 5 and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n=index0 + 1, max_n=dataset_size, width=len(str(dataset_size))))
last_time = cur_time
if done:
break
# Ground truth
if with_ground_truth:
zipped = _itertools.islice(zip(batch.label[1].asnumpy(), batch.raw_bboxes, batch.raw_classes), size)
for index0, bbox0, cls0 in zipped:
index0 = int(index0)
for bbox, cls in zip(bbox0, cls0):
cls = int(cls)
if cls == -1:
break
values = [index0, self.classes[cls], 1.0] + list(bbox)
sf_builders[1].append(values)
if index0 == len(dataset) - 1:
break
if postprocess:
ret = tuple([sb.close() for sb in sf_builders])
if len(ret) == 1:
return ret[0]
else:
return ret
else:
return _np.concatenate(raw_results, axis=0) | [
"def",
"_predict_with_options",
"(",
"self",
",",
"dataset",
",",
"with_ground_truth",
",",
"postprocess",
"=",
"True",
",",
"confidence_threshold",
"=",
"0.001",
",",
"iou_threshold",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"iou_threshold",
"is",
"None",
":",
"iou_threshold",
"=",
"self",
".",
"non_maximum_suppression_threshold",
"_raise_error_if_not_detection_sframe",
"(",
"dataset",
",",
"self",
".",
"feature",
",",
"self",
".",
"annotations",
",",
"require_annotations",
"=",
"with_ground_truth",
")",
"from",
".",
"_sframe_loader",
"import",
"SFrameDetectionIter",
"as",
"_SFrameDetectionIter",
"from",
".",
"_detection",
"import",
"(",
"yolo_map_to_bounding_boxes",
"as",
"_yolo_map_to_bounding_boxes",
",",
"non_maximum_suppression",
"as",
"_non_maximum_suppression",
",",
"bbox_to_ybox",
"as",
"_bbox_to_ybox",
")",
"from",
".",
".",
"_mxnet",
"import",
"_mxnet_utils",
"import",
"mxnet",
"as",
"_mx",
"loader",
"=",
"_SFrameDetectionIter",
"(",
"dataset",
",",
"batch_size",
"=",
"self",
".",
"batch_size",
",",
"input_shape",
"=",
"self",
".",
"input_image_shape",
"[",
"1",
":",
"]",
",",
"output_shape",
"=",
"self",
".",
"_grid_shape",
",",
"anchors",
"=",
"self",
".",
"anchors",
",",
"class_to_index",
"=",
"self",
".",
"_class_to_index",
",",
"loader_type",
"=",
"'stretched'",
",",
"load_labels",
"=",
"with_ground_truth",
",",
"shuffle",
"=",
"False",
",",
"epochs",
"=",
"1",
",",
"feature_column",
"=",
"self",
".",
"feature",
",",
"annotations_column",
"=",
"self",
".",
"annotations",
")",
"num_anchors",
"=",
"len",
"(",
"self",
".",
"anchors",
")",
"preds_per_box",
"=",
"5",
"+",
"len",
"(",
"self",
".",
"classes",
")",
"output_size",
"=",
"preds_per_box",
"*",
"num_anchors",
"# If prediction is done with ground truth, two sframes of the same",
"# structure are returned, the second one containing ground truth labels",
"num_returns",
"=",
"2",
"if",
"with_ground_truth",
"else",
"1",
"sf_builders",
"=",
"[",
"_tc",
".",
"SFrameBuilder",
"(",
"[",
"int",
",",
"str",
",",
"float",
",",
"float",
",",
"float",
",",
"float",
",",
"float",
"]",
",",
"column_names",
"=",
"[",
"'row_id'",
",",
"'label'",
",",
"'confidence'",
",",
"'x'",
",",
"'y'",
",",
"'width'",
",",
"'height'",
"]",
")",
"for",
"_",
"in",
"range",
"(",
"num_returns",
")",
"]",
"num_mxnet_gpus",
"=",
"_mxnet_utils",
".",
"get_num_gpus_in_use",
"(",
"max_devices",
"=",
"self",
".",
"batch_size",
")",
"use_mps",
"=",
"_use_mps",
"(",
")",
"and",
"num_mxnet_gpus",
"==",
"0",
"if",
"use_mps",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_mps_inference_net'",
")",
"or",
"self",
".",
"_mps_inference_net",
"is",
"None",
":",
"mxnet_params",
"=",
"self",
".",
"_model",
".",
"collect_params",
"(",
")",
"mps_net_params",
"=",
"{",
"k",
":",
"mxnet_params",
"[",
"k",
"]",
".",
"data",
"(",
")",
".",
"asnumpy",
"(",
")",
"for",
"k",
"in",
"mxnet_params",
"}",
"mps_config",
"=",
"{",
"'mode'",
":",
"_MpsGraphMode",
".",
"Inference",
",",
"'od_include_network'",
":",
"True",
",",
"'od_include_loss'",
":",
"False",
",",
"}",
"mps_net",
"=",
"_get_mps_od_net",
"(",
"input_image_shape",
"=",
"self",
".",
"input_image_shape",
",",
"batch_size",
"=",
"self",
".",
"batch_size",
",",
"output_size",
"=",
"output_size",
",",
"anchors",
"=",
"self",
".",
"anchors",
",",
"config",
"=",
"mps_config",
",",
"weights",
"=",
"mps_net_params",
")",
"self",
".",
"_mps_inference_net",
"=",
"mps_net",
"dataset_size",
"=",
"len",
"(",
"dataset",
")",
"ctx",
"=",
"_mxnet_utils",
".",
"get_mxnet_context",
"(",
")",
"done",
"=",
"False",
"last_time",
"=",
"0",
"raw_results",
"=",
"[",
"]",
"for",
"batch",
"in",
"loader",
":",
"if",
"batch",
".",
"pad",
"is",
"not",
"None",
":",
"size",
"=",
"self",
".",
"batch_size",
"-",
"batch",
".",
"pad",
"b_data",
"=",
"_mx",
".",
"nd",
".",
"slice_axis",
"(",
"batch",
".",
"data",
"[",
"0",
"]",
",",
"axis",
"=",
"0",
",",
"begin",
"=",
"0",
",",
"end",
"=",
"size",
")",
"b_indices",
"=",
"_mx",
".",
"nd",
".",
"slice_axis",
"(",
"batch",
".",
"label",
"[",
"1",
"]",
",",
"axis",
"=",
"0",
",",
"begin",
"=",
"0",
",",
"end",
"=",
"size",
")",
"b_oshapes",
"=",
"_mx",
".",
"nd",
".",
"slice_axis",
"(",
"batch",
".",
"label",
"[",
"2",
"]",
",",
"axis",
"=",
"0",
",",
"begin",
"=",
"0",
",",
"end",
"=",
"size",
")",
"else",
":",
"b_data",
"=",
"batch",
".",
"data",
"[",
"0",
"]",
"b_indices",
"=",
"batch",
".",
"label",
"[",
"1",
"]",
"b_oshapes",
"=",
"batch",
".",
"label",
"[",
"2",
"]",
"size",
"=",
"self",
".",
"batch_size",
"if",
"b_data",
".",
"shape",
"[",
"0",
"]",
"<",
"len",
"(",
"ctx",
")",
":",
"ctx0",
"=",
"ctx",
"[",
":",
"b_data",
".",
"shape",
"[",
"0",
"]",
"]",
"else",
":",
"ctx0",
"=",
"ctx",
"split_data",
"=",
"_mx",
".",
"gluon",
".",
"utils",
".",
"split_and_load",
"(",
"b_data",
",",
"ctx_list",
"=",
"ctx0",
",",
"even_split",
"=",
"False",
")",
"split_indices",
"=",
"_mx",
".",
"gluon",
".",
"utils",
".",
"split_data",
"(",
"b_indices",
",",
"num_slice",
"=",
"len",
"(",
"ctx0",
")",
",",
"even_split",
"=",
"False",
")",
"split_oshapes",
"=",
"_mx",
".",
"gluon",
".",
"utils",
".",
"split_data",
"(",
"b_oshapes",
",",
"num_slice",
"=",
"len",
"(",
"ctx0",
")",
",",
"even_split",
"=",
"False",
")",
"for",
"data",
",",
"indices",
",",
"oshapes",
"in",
"zip",
"(",
"split_data",
",",
"split_indices",
",",
"split_oshapes",
")",
":",
"if",
"use_mps",
":",
"mps_data",
"=",
"_mxnet_to_mps",
"(",
"data",
".",
"asnumpy",
"(",
")",
")",
"n_samples",
"=",
"mps_data",
".",
"shape",
"[",
"0",
"]",
"if",
"mps_data",
".",
"shape",
"[",
"0",
"]",
"!=",
"self",
".",
"batch_size",
":",
"mps_data_padded",
"=",
"_np",
".",
"zeros",
"(",
"(",
"self",
".",
"batch_size",
",",
")",
"+",
"mps_data",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"mps_data",
".",
"dtype",
")",
"mps_data_padded",
"[",
":",
"mps_data",
".",
"shape",
"[",
"0",
"]",
"]",
"=",
"mps_data",
"mps_data",
"=",
"mps_data_padded",
"mps_float_array",
"=",
"self",
".",
"_mps_inference_net",
".",
"predict",
"(",
"mps_data",
")",
"mps_z",
"=",
"mps_float_array",
".",
"asnumpy",
"(",
")",
"[",
":",
"n_samples",
"]",
"z",
"=",
"_mps_to_mxnet",
"(",
"mps_z",
")",
"else",
":",
"z",
"=",
"self",
".",
"_model",
"(",
"data",
")",
".",
"asnumpy",
"(",
")",
"if",
"not",
"postprocess",
":",
"raw_results",
".",
"append",
"(",
"z",
")",
"continue",
"ypred",
"=",
"z",
".",
"transpose",
"(",
"0",
",",
"2",
",",
"3",
",",
"1",
")",
"ypred",
"=",
"ypred",
".",
"reshape",
"(",
"ypred",
".",
"shape",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"num_anchors",
",",
"-",
"1",
")",
")",
"zipped",
"=",
"zip",
"(",
"indices",
".",
"asnumpy",
"(",
")",
",",
"ypred",
",",
"oshapes",
".",
"asnumpy",
"(",
")",
")",
"for",
"index0",
",",
"output0",
",",
"oshape0",
"in",
"zipped",
":",
"index0",
"=",
"int",
"(",
"index0",
")",
"x_boxes",
",",
"x_classes",
",",
"x_scores",
"=",
"_yolo_map_to_bounding_boxes",
"(",
"output0",
"[",
"_np",
".",
"newaxis",
"]",
",",
"anchors",
"=",
"self",
".",
"anchors",
",",
"confidence_threshold",
"=",
"confidence_threshold",
",",
"nms_thresh",
"=",
"None",
")",
"x_boxes0",
"=",
"_np",
".",
"array",
"(",
"x_boxes",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"4",
")",
"# Normalize",
"x_boxes0",
"[",
":",
",",
"0",
":",
":",
"2",
"]",
"/=",
"self",
".",
"input_image_shape",
"[",
"1",
"]",
"x_boxes0",
"[",
":",
",",
"1",
":",
":",
"2",
"]",
"/=",
"self",
".",
"input_image_shape",
"[",
"2",
"]",
"# Re-shape to original input size",
"x_boxes0",
"[",
":",
",",
"0",
":",
":",
"2",
"]",
"*=",
"oshape0",
"[",
"0",
"]",
"x_boxes0",
"[",
":",
",",
"1",
":",
":",
"2",
"]",
"*=",
"oshape0",
"[",
"1",
"]",
"# Clip the boxes to the original sizes",
"x_boxes0",
"[",
":",
",",
"0",
":",
":",
"2",
"]",
"=",
"_np",
".",
"clip",
"(",
"x_boxes0",
"[",
":",
",",
"0",
":",
":",
"2",
"]",
",",
"0",
",",
"oshape0",
"[",
"0",
"]",
")",
"x_boxes0",
"[",
":",
",",
"1",
":",
":",
"2",
"]",
"=",
"_np",
".",
"clip",
"(",
"x_boxes0",
"[",
":",
",",
"1",
":",
":",
"2",
"]",
",",
"0",
",",
"oshape0",
"[",
"1",
"]",
")",
"# Non-maximum suppression (also limit to 100 detection per",
"# image, inspired by the evaluation in COCO)",
"x_boxes0",
",",
"x_classes",
",",
"x_scores",
"=",
"_non_maximum_suppression",
"(",
"x_boxes0",
",",
"x_classes",
",",
"x_scores",
",",
"num_classes",
"=",
"self",
".",
"num_classes",
",",
"threshold",
"=",
"iou_threshold",
",",
"limit",
"=",
"100",
")",
"for",
"bbox",
",",
"cls",
",",
"s",
"in",
"zip",
"(",
"x_boxes0",
",",
"x_classes",
",",
"x_scores",
")",
":",
"cls",
"=",
"int",
"(",
"cls",
")",
"values",
"=",
"[",
"index0",
",",
"self",
".",
"classes",
"[",
"cls",
"]",
",",
"s",
"]",
"+",
"list",
"(",
"_bbox_to_ybox",
"(",
"bbox",
")",
")",
"sf_builders",
"[",
"0",
"]",
".",
"append",
"(",
"values",
")",
"if",
"index0",
"==",
"len",
"(",
"dataset",
")",
"-",
"1",
":",
"done",
"=",
"True",
"cur_time",
"=",
"_time",
".",
"time",
"(",
")",
"# Do not print process if only a few samples are predicted",
"if",
"verbose",
"and",
"(",
"dataset_size",
">=",
"5",
"and",
"cur_time",
">",
"last_time",
"+",
"10",
"or",
"done",
")",
":",
"print",
"(",
"'Predicting {cur_n:{width}d}/{max_n:{width}d}'",
".",
"format",
"(",
"cur_n",
"=",
"index0",
"+",
"1",
",",
"max_n",
"=",
"dataset_size",
",",
"width",
"=",
"len",
"(",
"str",
"(",
"dataset_size",
")",
")",
")",
")",
"last_time",
"=",
"cur_time",
"if",
"done",
":",
"break",
"# Ground truth",
"if",
"with_ground_truth",
":",
"zipped",
"=",
"_itertools",
".",
"islice",
"(",
"zip",
"(",
"batch",
".",
"label",
"[",
"1",
"]",
".",
"asnumpy",
"(",
")",
",",
"batch",
".",
"raw_bboxes",
",",
"batch",
".",
"raw_classes",
")",
",",
"size",
")",
"for",
"index0",
",",
"bbox0",
",",
"cls0",
"in",
"zipped",
":",
"index0",
"=",
"int",
"(",
"index0",
")",
"for",
"bbox",
",",
"cls",
"in",
"zip",
"(",
"bbox0",
",",
"cls0",
")",
":",
"cls",
"=",
"int",
"(",
"cls",
")",
"if",
"cls",
"==",
"-",
"1",
":",
"break",
"values",
"=",
"[",
"index0",
",",
"self",
".",
"classes",
"[",
"cls",
"]",
",",
"1.0",
"]",
"+",
"list",
"(",
"bbox",
")",
"sf_builders",
"[",
"1",
"]",
".",
"append",
"(",
"values",
")",
"if",
"index0",
"==",
"len",
"(",
"dataset",
")",
"-",
"1",
":",
"break",
"if",
"postprocess",
":",
"ret",
"=",
"tuple",
"(",
"[",
"sb",
".",
"close",
"(",
")",
"for",
"sb",
"in",
"sf_builders",
"]",
")",
"if",
"len",
"(",
"ret",
")",
"==",
"1",
":",
"return",
"ret",
"[",
"0",
"]",
"else",
":",
"return",
"ret",
"else",
":",
"return",
"_np",
".",
"concatenate",
"(",
"raw_results",
",",
"axis",
"=",
"0",
")"
] | Predict with options for what kind of SFrame should be returned.
If postprocess is False, a single numpy array with raw unprocessed
results will be returned. | [
"Predict",
"with",
"options",
"for",
"what",
"kind",
"of",
"SFrame",
"should",
"be",
"returned",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/object_detector.py#L730-L914 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/object_detector.py | ObjectDetector._canonize_input | def _canonize_input(self, dataset):
"""
Takes input and returns tuple of the input in canonical form (SFrame)
along with an unpack callback function that can be applied to
prediction results to "undo" the canonization.
"""
unpack = lambda x: x
if isinstance(dataset, _tc.SArray):
dataset = _tc.SFrame({self.feature: dataset})
elif isinstance(dataset, _tc.Image):
dataset = _tc.SFrame({self.feature: [dataset]})
unpack = lambda x: x[0]
return dataset, unpack | python | def _canonize_input(self, dataset):
"""
Takes input and returns tuple of the input in canonical form (SFrame)
along with an unpack callback function that can be applied to
prediction results to "undo" the canonization.
"""
unpack = lambda x: x
if isinstance(dataset, _tc.SArray):
dataset = _tc.SFrame({self.feature: dataset})
elif isinstance(dataset, _tc.Image):
dataset = _tc.SFrame({self.feature: [dataset]})
unpack = lambda x: x[0]
return dataset, unpack | [
"def",
"_canonize_input",
"(",
"self",
",",
"dataset",
")",
":",
"unpack",
"=",
"lambda",
"x",
":",
"x",
"if",
"isinstance",
"(",
"dataset",
",",
"_tc",
".",
"SArray",
")",
":",
"dataset",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"self",
".",
"feature",
":",
"dataset",
"}",
")",
"elif",
"isinstance",
"(",
"dataset",
",",
"_tc",
".",
"Image",
")",
":",
"dataset",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"self",
".",
"feature",
":",
"[",
"dataset",
"]",
"}",
")",
"unpack",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"return",
"dataset",
",",
"unpack"
] | Takes input and returns tuple of the input in canonical form (SFrame)
along with an unpack callback function that can be applied to
prediction results to "undo" the canonization. | [
"Takes",
"input",
"and",
"returns",
"tuple",
"of",
"the",
"input",
"in",
"canonical",
"form",
"(",
"SFrame",
")",
"along",
"with",
"an",
"unpack",
"callback",
"function",
"that",
"can",
"be",
"applied",
"to",
"prediction",
"results",
"to",
"undo",
"the",
"canonization",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/object_detector.py#L920-L932 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/object_detector.py | ObjectDetector.predict | def predict(self, dataset, confidence_threshold=0.25, iou_threshold=None, verbose=True):
"""
Predict object instances in an sframe of images.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images on which to perform object detection.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
verbose : bool
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
an image and contains a list of dictionaries. Each dictionary
describes an object instances that was found in the image. If
`dataset` is a single image, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Stack predictions, for a better overview
>>> turicreate.object_detector.util.stack_annotations(pred)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0)
dataset, unpack = self._canonize_input(dataset)
stacked_pred = self._predict_with_options(dataset, with_ground_truth=False,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
from . import util
return unpack(util.unstack_annotations(stacked_pred, num_rows=len(dataset))) | python | def predict(self, dataset, confidence_threshold=0.25, iou_threshold=None, verbose=True):
"""
Predict object instances in an sframe of images.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images on which to perform object detection.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
verbose : bool
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
an image and contains a list of dictionaries. Each dictionary
describes an object instances that was found in the image. If
`dataset` is a single image, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Stack predictions, for a better overview
>>> turicreate.object_detector.util.stack_annotations(pred)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0)
dataset, unpack = self._canonize_input(dataset)
stacked_pred = self._predict_with_options(dataset, with_ground_truth=False,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
from . import util
return unpack(util.unstack_annotations(stacked_pred, num_rows=len(dataset))) | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"confidence_threshold",
"=",
"0.25",
",",
"iou_threshold",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"_numeric_param_check_range",
"(",
"'confidence_threshold'",
",",
"confidence_threshold",
",",
"0.0",
",",
"1.0",
")",
"dataset",
",",
"unpack",
"=",
"self",
".",
"_canonize_input",
"(",
"dataset",
")",
"stacked_pred",
"=",
"self",
".",
"_predict_with_options",
"(",
"dataset",
",",
"with_ground_truth",
"=",
"False",
",",
"confidence_threshold",
"=",
"confidence_threshold",
",",
"iou_threshold",
"=",
"iou_threshold",
",",
"verbose",
"=",
"verbose",
")",
"from",
".",
"import",
"util",
"return",
"unpack",
"(",
"util",
".",
"unstack_annotations",
"(",
"stacked_pred",
",",
"num_rows",
"=",
"len",
"(",
"dataset",
")",
")",
")"
] | Predict object instances in an sframe of images.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images on which to perform object detection.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
verbose : bool
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
an image and contains a list of dictionaries. Each dictionary
describes an object instances that was found in the image. If
`dataset` is a single image, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Stack predictions, for a better overview
>>> turicreate.object_detector.util.stack_annotations(pred)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions']) | [
"Predict",
"object",
"instances",
"in",
"an",
"sframe",
"of",
"images",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/object_detector.py#L934-L1004 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/object_detector.py | ObjectDetector.evaluate | def evaluate(self, dataset, metric='auto',
output_type='dict', iou_threshold=None,
confidence_threshold=None, verbose=True):
"""
Evaluate the model by making predictions and comparing these to ground
truth bounding box annotations.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the annotations and feature used for model training.
Additional columns are ignored.
metric : str or list, optional
Name of the evaluation metric or list of several names. The primary
metric is average precision, which is the area under the
precision/recall curve and reported as a value between 0 and 1 (1
being perfect). Possible values are:
- 'auto' : Returns all primary metrics.
- 'all' : Returns all available metrics.
- 'average_precision_50' : Average precision per class with
intersection-over-union threshold at
50% (PASCAL VOC metric).
- 'average_precision' : Average precision per class calculated over multiple
intersection-over-union thresholds
(at 50%, 55%, ..., 95%) and averaged.
- 'mean_average_precision_50' : Mean over all classes (for ``'average_precision_50'``).
This is the primary single-value metric.
- 'mean_average_precision' : Mean over all classes (for ``'average_precision'``)
output_type : str
Type of output:
- 'dict' : You are given a dictionary where each key is a metric name and the
value is another dictionary containing class-to-metric entries.
- 'sframe' : All metrics are returned as a single `SFrame`, where each row is a
class and each column is a metric. Metrics that are averaged over
class cannot be returned and are ignored under this format.
However, these are easily computed from the `SFrame` (e.g.
``results['average_precision'].mean()``).
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
verbose : bool
If True, prints evaluation progress.
Returns
-------
out : dict / SFrame
Output type depends on the option `output_type`.
See Also
--------
create, predict
Examples
--------
>>> results = model.evaluate(data)
>>> print('mAP: {:.1%}'.format(results['mean_average_precision']))
mAP: 43.2%
"""
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
if confidence_threshold is None: confidence_threshold = 0.001
AP = 'average_precision'
MAP = 'mean_average_precision'
AP50 = 'average_precision_50'
MAP50 = 'mean_average_precision_50'
ALL_METRICS = {AP, MAP, AP50, MAP50}
if isinstance(metric, (list, tuple, set)):
metrics = metric
elif metric == 'all':
metrics = ALL_METRICS
elif metric == 'auto':
metrics = {AP50, MAP50}
elif metric in ALL_METRICS:
metrics = {metric}
else:
raise _ToolkitError("Metric '{}' not supported".format(metric))
pred, gt = self._predict_with_options(dataset, with_ground_truth=True,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
pred_df = pred.to_dataframe()
gt_df = gt.to_dataframe()
thresholds = _np.arange(0.5, 1.0, 0.05)
all_th_aps = _average_precision(pred_df, gt_df,
class_to_index=self._class_to_index,
iou_thresholds=thresholds)
def class_dict(aps):
return {classname: aps[index]
for classname, index in self._class_to_index.items()}
if output_type == 'dict':
ret = {}
if AP50 in metrics:
ret[AP50] = class_dict(all_th_aps[0])
if AP in metrics:
ret[AP] = class_dict(all_th_aps.mean(0))
if MAP50 in metrics:
ret[MAP50] = all_th_aps[0].mean()
if MAP in metrics:
ret[MAP] = all_th_aps.mean()
elif output_type == 'sframe':
ret = _tc.SFrame({'label': self.classes})
if AP50 in metrics:
ret[AP50] = all_th_aps[0]
if AP in metrics:
ret[AP] = all_th_aps.mean(0)
else:
raise _ToolkitError("Output type '{}' not supported".format(output_type))
return ret | python | def evaluate(self, dataset, metric='auto',
output_type='dict', iou_threshold=None,
confidence_threshold=None, verbose=True):
"""
Evaluate the model by making predictions and comparing these to ground
truth bounding box annotations.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the annotations and feature used for model training.
Additional columns are ignored.
metric : str or list, optional
Name of the evaluation metric or list of several names. The primary
metric is average precision, which is the area under the
precision/recall curve and reported as a value between 0 and 1 (1
being perfect). Possible values are:
- 'auto' : Returns all primary metrics.
- 'all' : Returns all available metrics.
- 'average_precision_50' : Average precision per class with
intersection-over-union threshold at
50% (PASCAL VOC metric).
- 'average_precision' : Average precision per class calculated over multiple
intersection-over-union thresholds
(at 50%, 55%, ..., 95%) and averaged.
- 'mean_average_precision_50' : Mean over all classes (for ``'average_precision_50'``).
This is the primary single-value metric.
- 'mean_average_precision' : Mean over all classes (for ``'average_precision'``)
output_type : str
Type of output:
- 'dict' : You are given a dictionary where each key is a metric name and the
value is another dictionary containing class-to-metric entries.
- 'sframe' : All metrics are returned as a single `SFrame`, where each row is a
class and each column is a metric. Metrics that are averaged over
class cannot be returned and are ignored under this format.
However, these are easily computed from the `SFrame` (e.g.
``results['average_precision'].mean()``).
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
verbose : bool
If True, prints evaluation progress.
Returns
-------
out : dict / SFrame
Output type depends on the option `output_type`.
See Also
--------
create, predict
Examples
--------
>>> results = model.evaluate(data)
>>> print('mAP: {:.1%}'.format(results['mean_average_precision']))
mAP: 43.2%
"""
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
if confidence_threshold is None: confidence_threshold = 0.001
AP = 'average_precision'
MAP = 'mean_average_precision'
AP50 = 'average_precision_50'
MAP50 = 'mean_average_precision_50'
ALL_METRICS = {AP, MAP, AP50, MAP50}
if isinstance(metric, (list, tuple, set)):
metrics = metric
elif metric == 'all':
metrics = ALL_METRICS
elif metric == 'auto':
metrics = {AP50, MAP50}
elif metric in ALL_METRICS:
metrics = {metric}
else:
raise _ToolkitError("Metric '{}' not supported".format(metric))
pred, gt = self._predict_with_options(dataset, with_ground_truth=True,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
pred_df = pred.to_dataframe()
gt_df = gt.to_dataframe()
thresholds = _np.arange(0.5, 1.0, 0.05)
all_th_aps = _average_precision(pred_df, gt_df,
class_to_index=self._class_to_index,
iou_thresholds=thresholds)
def class_dict(aps):
return {classname: aps[index]
for classname, index in self._class_to_index.items()}
if output_type == 'dict':
ret = {}
if AP50 in metrics:
ret[AP50] = class_dict(all_th_aps[0])
if AP in metrics:
ret[AP] = class_dict(all_th_aps.mean(0))
if MAP50 in metrics:
ret[MAP50] = all_th_aps[0].mean()
if MAP in metrics:
ret[MAP] = all_th_aps.mean()
elif output_type == 'sframe':
ret = _tc.SFrame({'label': self.classes})
if AP50 in metrics:
ret[AP50] = all_th_aps[0]
if AP in metrics:
ret[AP] = all_th_aps.mean(0)
else:
raise _ToolkitError("Output type '{}' not supported".format(output_type))
return ret | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
",",
"output_type",
"=",
"'dict'",
",",
"iou_threshold",
"=",
"None",
",",
"confidence_threshold",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"iou_threshold",
"is",
"None",
":",
"iou_threshold",
"=",
"self",
".",
"non_maximum_suppression_threshold",
"if",
"confidence_threshold",
"is",
"None",
":",
"confidence_threshold",
"=",
"0.001",
"AP",
"=",
"'average_precision'",
"MAP",
"=",
"'mean_average_precision'",
"AP50",
"=",
"'average_precision_50'",
"MAP50",
"=",
"'mean_average_precision_50'",
"ALL_METRICS",
"=",
"{",
"AP",
",",
"MAP",
",",
"AP50",
",",
"MAP50",
"}",
"if",
"isinstance",
"(",
"metric",
",",
"(",
"list",
",",
"tuple",
",",
"set",
")",
")",
":",
"metrics",
"=",
"metric",
"elif",
"metric",
"==",
"'all'",
":",
"metrics",
"=",
"ALL_METRICS",
"elif",
"metric",
"==",
"'auto'",
":",
"metrics",
"=",
"{",
"AP50",
",",
"MAP50",
"}",
"elif",
"metric",
"in",
"ALL_METRICS",
":",
"metrics",
"=",
"{",
"metric",
"}",
"else",
":",
"raise",
"_ToolkitError",
"(",
"\"Metric '{}' not supported\"",
".",
"format",
"(",
"metric",
")",
")",
"pred",
",",
"gt",
"=",
"self",
".",
"_predict_with_options",
"(",
"dataset",
",",
"with_ground_truth",
"=",
"True",
",",
"confidence_threshold",
"=",
"confidence_threshold",
",",
"iou_threshold",
"=",
"iou_threshold",
",",
"verbose",
"=",
"verbose",
")",
"pred_df",
"=",
"pred",
".",
"to_dataframe",
"(",
")",
"gt_df",
"=",
"gt",
".",
"to_dataframe",
"(",
")",
"thresholds",
"=",
"_np",
".",
"arange",
"(",
"0.5",
",",
"1.0",
",",
"0.05",
")",
"all_th_aps",
"=",
"_average_precision",
"(",
"pred_df",
",",
"gt_df",
",",
"class_to_index",
"=",
"self",
".",
"_class_to_index",
",",
"iou_thresholds",
"=",
"thresholds",
")",
"def",
"class_dict",
"(",
"aps",
")",
":",
"return",
"{",
"classname",
":",
"aps",
"[",
"index",
"]",
"for",
"classname",
",",
"index",
"in",
"self",
".",
"_class_to_index",
".",
"items",
"(",
")",
"}",
"if",
"output_type",
"==",
"'dict'",
":",
"ret",
"=",
"{",
"}",
"if",
"AP50",
"in",
"metrics",
":",
"ret",
"[",
"AP50",
"]",
"=",
"class_dict",
"(",
"all_th_aps",
"[",
"0",
"]",
")",
"if",
"AP",
"in",
"metrics",
":",
"ret",
"[",
"AP",
"]",
"=",
"class_dict",
"(",
"all_th_aps",
".",
"mean",
"(",
"0",
")",
")",
"if",
"MAP50",
"in",
"metrics",
":",
"ret",
"[",
"MAP50",
"]",
"=",
"all_th_aps",
"[",
"0",
"]",
".",
"mean",
"(",
")",
"if",
"MAP",
"in",
"metrics",
":",
"ret",
"[",
"MAP",
"]",
"=",
"all_th_aps",
".",
"mean",
"(",
")",
"elif",
"output_type",
"==",
"'sframe'",
":",
"ret",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"'label'",
":",
"self",
".",
"classes",
"}",
")",
"if",
"AP50",
"in",
"metrics",
":",
"ret",
"[",
"AP50",
"]",
"=",
"all_th_aps",
"[",
"0",
"]",
"if",
"AP",
"in",
"metrics",
":",
"ret",
"[",
"AP",
"]",
"=",
"all_th_aps",
".",
"mean",
"(",
"0",
")",
"else",
":",
"raise",
"_ToolkitError",
"(",
"\"Output type '{}' not supported\"",
".",
"format",
"(",
"output_type",
")",
")",
"return",
"ret"
] | Evaluate the model by making predictions and comparing these to ground
truth bounding box annotations.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the annotations and feature used for model training.
Additional columns are ignored.
metric : str or list, optional
Name of the evaluation metric or list of several names. The primary
metric is average precision, which is the area under the
precision/recall curve and reported as a value between 0 and 1 (1
being perfect). Possible values are:
- 'auto' : Returns all primary metrics.
- 'all' : Returns all available metrics.
- 'average_precision_50' : Average precision per class with
intersection-over-union threshold at
50% (PASCAL VOC metric).
- 'average_precision' : Average precision per class calculated over multiple
intersection-over-union thresholds
(at 50%, 55%, ..., 95%) and averaged.
- 'mean_average_precision_50' : Mean over all classes (for ``'average_precision_50'``).
This is the primary single-value metric.
- 'mean_average_precision' : Mean over all classes (for ``'average_precision'``)
output_type : str
Type of output:
- 'dict' : You are given a dictionary where each key is a metric name and the
value is another dictionary containing class-to-metric entries.
- 'sframe' : All metrics are returned as a single `SFrame`, where each row is a
class and each column is a metric. Metrics that are averaged over
class cannot be returned and are ignored under this format.
However, these are easily computed from the `SFrame` (e.g.
``results['average_precision'].mean()``).
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
verbose : bool
If True, prints evaluation progress.
Returns
-------
out : dict / SFrame
Output type depends on the option `output_type`.
See Also
--------
create, predict
Examples
--------
>>> results = model.evaluate(data)
>>> print('mAP: {:.1%}'.format(results['mean_average_precision']))
mAP: 43.2% | [
"Evaluate",
"the",
"model",
"by",
"making",
"predictions",
"and",
"comparing",
"these",
"to",
"ground",
"truth",
"bounding",
"box",
"annotations",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/object_detector.py#L1006-L1134 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/object_detector.py | ObjectDetector.export_coreml | def export_coreml(self, filename,
include_non_maximum_suppression = True,
iou_threshold = None,
confidence_threshold = None):
"""
Save the model in Core ML format. The Core ML model takes an image of
fixed size as input and produces two output arrays: `confidence` and
`coordinates`.
The first one, `confidence` is an `N`-by-`C` array, where `N` is the
number of instances predicted and `C` is the number of classes. The
number `N` is fixed and will include many low-confidence predictions.
The instances are not sorted by confidence, so the first one will
generally not have the highest confidence (unlike in `predict`). Also
unlike the `predict` function, the instances have not undergone
what is called `non-maximum suppression`, which means there could be
several instances close in location and size that have all discovered
the same object instance. Confidences do not need to sum to 1 over the
classes; any remaining probability is implied as confidence there is no
object instance present at all at the given coordinates. The classes
appear in the array alphabetically sorted.
The second array `coordinates` is of size `N`-by-4, where the first
dimension `N` again represents instances and corresponds to the
`confidence` array. The second dimension represents `x`, `y`, `width`,
`height`, in that order. The values are represented in relative
coordinates, so (0.5, 0.5) represents the center of the image and (1,
1) the bottom right corner. You will need to multiply the relative
values with the original image size before you resized it to the fixed
input size to get pixel-value coordinates similar to `predict`.
See Also
--------
save
Parameters
----------
filename : string
The path of the file where we want to save the Core ML model.
include_non_maximum_suppression : bool
Non-maximum suppression is only available in iOS 12+.
A boolean parameter to indicate whether the Core ML model should be
saved with built-in non-maximum suppression or not.
This parameter is set to True by default.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
Examples
--------
>>> model.export_coreml('detector.mlmodel')
"""
import mxnet as _mx
from .._mxnet._mxnet_to_coreml import _mxnet_converter
import coremltools
from coremltools.models import datatypes, neural_network
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
if confidence_threshold is None: confidence_threshold = 0.25
preds_per_box = 5 + self.num_classes
num_anchors = len(self.anchors)
num_classes = self.num_classes
batch_size = 1
image_shape = (batch_size,) + tuple(self.input_image_shape)
s_image_uint8 = _mx.sym.Variable(self.feature, shape=image_shape, dtype=_np.float32)
s_image = s_image_uint8 / 255
# Swap a maxpool+slice in mxnet to a coreml natively supported layer
from copy import copy
net = copy(self._model)
net._children = copy(self._model._children)
from ._model import _SpecialDarknetMaxpoolBlock
op = _SpecialDarknetMaxpoolBlock(name='pool5')
# Make sure we are removing the right layers
assert (self._model[23].name == 'pool5' and
self._model[24].name == 'specialcrop5')
del net._children[24]
net._children[23] = op
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
# Copy over params from net
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
input_names = [self.feature]
input_dims = [list(self.input_image_shape)]
input_types = [datatypes.Array(*dim) for dim in input_dims]
input_features = list(zip(input_names, input_types))
num_spatial = self._grid_shape[0] * self._grid_shape[1]
num_bounding_boxes = num_anchors * num_spatial
CONFIDENCE_STR = ("raw_confidence" if include_non_maximum_suppression
else "confidence")
COORDINATES_STR = ("raw_coordinates" if include_non_maximum_suppression
else "coordinates")
output_names = [
CONFIDENCE_STR,
COORDINATES_STR
]
output_dims = [
(num_bounding_boxes, num_classes),
(num_bounding_boxes, 4),
]
output_types = [datatypes.Array(*dim) for dim in output_dims]
output_features = list(zip(output_names, output_types))
mode = None
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
_mxnet_converter.convert(mod, mode=None,
input_shape=[(self.feature, image_shape)],
builder=builder, verbose=False)
prefix = '__tc__internal__'
# (1, B, C+5, S*S)
builder.add_reshape(name=prefix + 'ymap_sp_pre',
target_shape=[batch_size, num_anchors, preds_per_box, num_spatial],
mode=0,
input_name='conv8_fwd_output',
output_name=prefix + 'ymap_sp_pre')
# (1, C+5, B, S*S)
builder.add_permute(name=prefix + 'ymap_sp',
dim=[0, 2, 1, 3],
input_name=prefix + 'ymap_sp_pre',
output_name=prefix + 'ymap_sp')
# POSITION: X/Y
# (1, 2, B, S*S)
builder.add_slice(name=prefix + 'raw_rel_xy_sp',
axis='channel',
start_index=0,
end_index=2,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'raw_rel_xy_sp')
# (1, 2, B, S*S)
builder.add_activation(name=prefix + 'rel_xy_sp',
non_linearity='SIGMOID',
input_name=prefix + 'raw_rel_xy_sp',
output_name=prefix + 'rel_xy_sp')
# (1, 2, B*H*W, 1)
builder.add_reshape(name=prefix + 'rel_xy',
target_shape=[batch_size, 2, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'rel_xy_sp',
output_name=prefix + 'rel_xy')
c_xy = _np.array(_np.meshgrid(_np.arange(self._grid_shape[1]),
_np.arange(self._grid_shape[0])), dtype=_np.float32)
c_xy_reshaped = (_np.tile(c_xy[:, _np.newaxis], (num_anchors, 1, 1))
.reshape(2, -1))[_np.newaxis, ..., _np.newaxis]
# (1, 2, B*H*W, 1)
builder.add_load_constant(prefix + 'constant_xy',
constant_value=c_xy_reshaped,
shape=c_xy_reshaped.shape[1:],
output_name=prefix + 'constant_xy')
# (1, 2, B*H*W, 1)
builder.add_elementwise(name=prefix + 'xy',
mode='ADD',
input_names=[prefix + 'constant_xy', prefix + 'rel_xy'],
output_name=prefix + 'xy')
# SHAPE: WIDTH/HEIGHT
# (1, 2, B, S*S)
builder.add_slice(name=prefix + 'raw_rel_wh_sp',
axis='channel',
start_index=2,
end_index=4,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'raw_rel_wh_sp')
# (1, 2, B, S*S)
builder.add_unary(name=prefix + 'rel_wh_sp',
mode='exp',
input_name=prefix + 'raw_rel_wh_sp',
output_name=prefix + 'rel_wh_sp')
# (1, 2*B, S, S)
builder.add_reshape(name=prefix + 'rel_wh',
target_shape=[batch_size, 2 * num_anchors] + list(self._grid_shape),
mode=0,
input_name=prefix + 'rel_wh_sp',
output_name=prefix + 'rel_wh')
np_anchors = _np.asarray(self.anchors, dtype=_np.float32).T
anchors_0 = _np.tile(np_anchors.reshape([2 * num_anchors, 1, 1]), self._grid_shape)
# (1, 2*B, S, S)
builder.add_load_constant(name=prefix + 'c_anchors',
constant_value=anchors_0,
shape=anchors_0.shape,
output_name=prefix + 'c_anchors')
# (1, 2*B, S, S)
builder.add_elementwise(name=prefix + 'wh_pre',
mode='MULTIPLY',
input_names=[prefix + 'c_anchors', prefix + 'rel_wh'],
output_name=prefix + 'wh_pre')
# (1, 2, B*H*W, 1)
builder.add_reshape(name=prefix + 'wh',
target_shape=[1, 2, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'wh_pre',
output_name=prefix + 'wh')
# (1, 4, B*H*W, 1)
builder.add_elementwise(name=prefix + 'boxes_out_transposed',
mode='CONCAT',
input_names=[prefix + 'xy', prefix + 'wh'],
output_name=prefix + 'boxes_out_transposed')
# (1, B*H*W, 4, 1)
builder.add_permute(name=prefix + 'boxes_out',
dim=[0, 2, 1, 3],
input_name=prefix + 'boxes_out_transposed',
output_name=prefix + 'boxes_out')
scale = _np.zeros((num_bounding_boxes, 4, 1))
scale[:, 0::2] = 1.0 / self._grid_shape[1]
scale[:, 1::2] = 1.0 / self._grid_shape[0]
# (1, B*H*W, 4, 1)
builder.add_scale(name=COORDINATES_STR,
W=scale,
b=0,
has_bias=False,
shape_scale=(num_bounding_boxes, 4, 1),
input_name=prefix + 'boxes_out',
output_name=COORDINATES_STR)
# CLASS PROBABILITIES AND OBJECT CONFIDENCE
# (1, C, B, H*W)
builder.add_slice(name=prefix + 'scores_sp',
axis='channel',
start_index=5,
end_index=preds_per_box,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'scores_sp')
# (1, C, B, H*W)
builder.add_softmax(name=prefix + 'probs_sp',
input_name=prefix + 'scores_sp',
output_name=prefix + 'probs_sp')
# (1, 1, B, H*W)
builder.add_slice(name=prefix + 'logit_conf_sp',
axis='channel',
start_index=4,
end_index=5,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'logit_conf_sp')
# (1, 1, B, H*W)
builder.add_activation(name=prefix + 'conf_sp',
non_linearity='SIGMOID',
input_name=prefix + 'logit_conf_sp',
output_name=prefix + 'conf_sp')
# (1, C, B, H*W)
if num_classes > 1:
conf = prefix + 'conf_tiled_sp'
builder.add_elementwise(name=prefix + 'conf_tiled_sp',
mode='CONCAT',
input_names=[prefix+'conf_sp']*num_classes,
output_name=conf)
else:
conf = prefix + 'conf_sp'
# (1, C, B, H*W)
builder.add_elementwise(name=prefix + 'confprobs_sp',
mode='MULTIPLY',
input_names=[conf, prefix + 'probs_sp'],
output_name=prefix + 'confprobs_sp')
# (1, C, B*H*W, 1)
builder.add_reshape(name=prefix + 'confprobs_transposed',
target_shape=[1, num_classes, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'confprobs_sp',
output_name=prefix + 'confprobs_transposed')
# (1, B*H*W, C, 1)
builder.add_permute(name=CONFIDENCE_STR,
dim=[0, 2, 1, 3],
input_name=prefix + 'confprobs_transposed',
output_name=CONFIDENCE_STR)
_mxnet_converter._set_input_output_layers(
builder, input_names, output_names)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
builder.set_pre_processing_parameters(image_input_names=self.feature)
model = builder.spec
if include_non_maximum_suppression:
# Non-Maximum Suppression is a post-processing algorithm
# responsible for merging all detections that belong to the
# same object.
# Core ML schematic
# +------------------------------------+
# | Pipeline |
# | |
# | +------------+ +-------------+ |
# | | Neural | | Non-maximum | |
# | | network +---> suppression +-----> confidences
# Image +----> | | | |
# | | +---> +-----> coordinates
# | | | | | |
# Optional inputs: | +------------+ +-^---^-------+ |
# | | | |
# IOU threshold +-----------------------+ | |
# | | |
# Confidence threshold +---------------------------+ |
# +------------------------------------+
model_neural_network = model.neuralNetwork
model.specificationVersion = 3
model.pipeline.ParseFromString(b'')
model.pipeline.models.add()
model.pipeline.models[0].neuralNetwork.ParseFromString(b'')
model.pipeline.models.add()
model.pipeline.models[1].nonMaximumSuppression.ParseFromString(b'')
# begin: Neural network model
nn_model = model.pipeline.models[0]
nn_model.description.ParseFromString(b'')
input_image = model.description.input[0]
input_image.type.imageType.width = self.input_image_shape[1]
input_image.type.imageType.height = self.input_image_shape[2]
nn_model.description.input.add()
nn_model.description.input[0].ParseFromString(
input_image.SerializeToString())
for i in range(2):
del model.description.output[i].type.multiArrayType.shape[:]
names = ["raw_confidence", "raw_coordinates"]
bounds = [self.num_classes, 4]
for i in range(2):
output_i = model.description.output[i]
output_i.name = names[i]
for j in range(2):
ma_type = output_i.type.multiArrayType
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[j].lowerBound = (
bounds[i] if j == 1 else 0)
ma_type.shapeRange.sizeRanges[j].upperBound = (
bounds[i] if j == 1 else -1)
nn_model.description.output.add()
nn_model.description.output[i].ParseFromString(
output_i.SerializeToString())
ma_type = nn_model.description.output[i].type.multiArrayType
ma_type.shape.append(num_bounding_boxes)
ma_type.shape.append(bounds[i])
# Think more about this line
nn_model.neuralNetwork.ParseFromString(
model_neural_network.SerializeToString())
nn_model.specificationVersion = model.specificationVersion
# end: Neural network model
# begin: Non maximum suppression model
nms_model = model.pipeline.models[1]
nms_model_nonMaxSup = nms_model.nonMaximumSuppression
for i in range(2):
output_i = model.description.output[i]
nms_model.description.input.add()
nms_model.description.input[i].ParseFromString(
output_i.SerializeToString())
nms_model.description.output.add()
nms_model.description.output[i].ParseFromString(
output_i.SerializeToString())
nms_model.description.output[i].name = (
'confidence' if i==0 else 'coordinates')
nms_model_nonMaxSup.iouThreshold = iou_threshold
nms_model_nonMaxSup.confidenceThreshold = confidence_threshold
nms_model_nonMaxSup.confidenceInputFeatureName = 'raw_confidence'
nms_model_nonMaxSup.coordinatesInputFeatureName = 'raw_coordinates'
nms_model_nonMaxSup.confidenceOutputFeatureName = 'confidence'
nms_model_nonMaxSup.coordinatesOutputFeatureName = 'coordinates'
nms_model.specificationVersion = model.specificationVersion
nms_model_nonMaxSup.stringClassLabels.vector.extend(self.classes)
for i in range(2):
nms_model.description.input[i].ParseFromString(
nn_model.description.output[i].SerializeToString()
)
if include_non_maximum_suppression:
# Iou Threshold
IOU_THRESHOLD_STRING = 'iouThreshold'
model.description.input.add()
model.description.input[1].type.doubleType.ParseFromString(b'')
model.description.input[1].name = IOU_THRESHOLD_STRING
nms_model.description.input.add()
nms_model.description.input[2].ParseFromString(
model.description.input[1].SerializeToString()
)
nms_model_nonMaxSup.iouThresholdInputFeatureName = IOU_THRESHOLD_STRING
# Confidence Threshold
CONFIDENCE_THRESHOLD_STRING = 'confidenceThreshold'
model.description.input.add()
model.description.input[2].type.doubleType.ParseFromString(b'')
model.description.input[2].name = CONFIDENCE_THRESHOLD_STRING
nms_model.description.input.add()
nms_model.description.input[3].ParseFromString(
model.description.input[2].SerializeToString())
nms_model_nonMaxSup.confidenceThresholdInputFeatureName = \
CONFIDENCE_THRESHOLD_STRING
# end: Non maximum suppression model
model.description.output[0].name = 'confidence'
model.description.output[1].name = 'coordinates'
iouThresholdString = '(optional) IOU Threshold override (default: {})'
confidenceThresholdString = ('(optional)' +
' Confidence Threshold override (default: {})')
model_type = 'object detector (%s)' % self.model
if include_non_maximum_suppression:
model_type += ' with non-maximum suppression'
model.description.metadata.shortDescription = \
_coreml_utils._mlmodel_short_description(model_type)
model.description.input[0].shortDescription = 'Input image'
if include_non_maximum_suppression:
iouThresholdString = '(optional) IOU Threshold override (default: {})'
model.description.input[1].shortDescription = \
iouThresholdString.format(iou_threshold)
confidenceThresholdString = ('(optional)' +
' Confidence Threshold override (default: {})')
model.description.input[2].shortDescription = \
confidenceThresholdString.format(confidence_threshold)
model.description.output[0].shortDescription = \
u'Boxes \xd7 Class confidence (see user-defined metadata "classes")'
model.description.output[1].shortDescription = \
u'Boxes \xd7 [x, y, width, height] (relative to image size)'
version = ObjectDetector._PYTHON_OBJECT_DETECTOR_VERSION
partial_user_defined_metadata = {
'model': self.model,
'max_iterations': str(self.max_iterations),
'training_iterations': str(self.training_iterations),
'include_non_maximum_suppression': str(
include_non_maximum_suppression),
'non_maximum_suppression_threshold': str(
iou_threshold),
'confidence_threshold': str(confidence_threshold),
'iou_threshold': str(iou_threshold),
'feature': self.feature,
'annotations': self.annotations,
'classes': ','.join(self.classes)
}
user_defined_metadata = _coreml_utils._get_model_metadata(
self.__class__.__name__,
partial_user_defined_metadata,
version)
model.description.metadata.userDefined.update(user_defined_metadata)
from coremltools.models.utils import save_spec as _save_spec
_save_spec(model, filename) | python | def export_coreml(self, filename,
include_non_maximum_suppression = True,
iou_threshold = None,
confidence_threshold = None):
"""
Save the model in Core ML format. The Core ML model takes an image of
fixed size as input and produces two output arrays: `confidence` and
`coordinates`.
The first one, `confidence` is an `N`-by-`C` array, where `N` is the
number of instances predicted and `C` is the number of classes. The
number `N` is fixed and will include many low-confidence predictions.
The instances are not sorted by confidence, so the first one will
generally not have the highest confidence (unlike in `predict`). Also
unlike the `predict` function, the instances have not undergone
what is called `non-maximum suppression`, which means there could be
several instances close in location and size that have all discovered
the same object instance. Confidences do not need to sum to 1 over the
classes; any remaining probability is implied as confidence there is no
object instance present at all at the given coordinates. The classes
appear in the array alphabetically sorted.
The second array `coordinates` is of size `N`-by-4, where the first
dimension `N` again represents instances and corresponds to the
`confidence` array. The second dimension represents `x`, `y`, `width`,
`height`, in that order. The values are represented in relative
coordinates, so (0.5, 0.5) represents the center of the image and (1,
1) the bottom right corner. You will need to multiply the relative
values with the original image size before you resized it to the fixed
input size to get pixel-value coordinates similar to `predict`.
See Also
--------
save
Parameters
----------
filename : string
The path of the file where we want to save the Core ML model.
include_non_maximum_suppression : bool
Non-maximum suppression is only available in iOS 12+.
A boolean parameter to indicate whether the Core ML model should be
saved with built-in non-maximum suppression or not.
This parameter is set to True by default.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
Examples
--------
>>> model.export_coreml('detector.mlmodel')
"""
import mxnet as _mx
from .._mxnet._mxnet_to_coreml import _mxnet_converter
import coremltools
from coremltools.models import datatypes, neural_network
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
if confidence_threshold is None: confidence_threshold = 0.25
preds_per_box = 5 + self.num_classes
num_anchors = len(self.anchors)
num_classes = self.num_classes
batch_size = 1
image_shape = (batch_size,) + tuple(self.input_image_shape)
s_image_uint8 = _mx.sym.Variable(self.feature, shape=image_shape, dtype=_np.float32)
s_image = s_image_uint8 / 255
# Swap a maxpool+slice in mxnet to a coreml natively supported layer
from copy import copy
net = copy(self._model)
net._children = copy(self._model._children)
from ._model import _SpecialDarknetMaxpoolBlock
op = _SpecialDarknetMaxpoolBlock(name='pool5')
# Make sure we are removing the right layers
assert (self._model[23].name == 'pool5' and
self._model[24].name == 'specialcrop5')
del net._children[24]
net._children[23] = op
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
# Copy over params from net
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
input_names = [self.feature]
input_dims = [list(self.input_image_shape)]
input_types = [datatypes.Array(*dim) for dim in input_dims]
input_features = list(zip(input_names, input_types))
num_spatial = self._grid_shape[0] * self._grid_shape[1]
num_bounding_boxes = num_anchors * num_spatial
CONFIDENCE_STR = ("raw_confidence" if include_non_maximum_suppression
else "confidence")
COORDINATES_STR = ("raw_coordinates" if include_non_maximum_suppression
else "coordinates")
output_names = [
CONFIDENCE_STR,
COORDINATES_STR
]
output_dims = [
(num_bounding_boxes, num_classes),
(num_bounding_boxes, 4),
]
output_types = [datatypes.Array(*dim) for dim in output_dims]
output_features = list(zip(output_names, output_types))
mode = None
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
_mxnet_converter.convert(mod, mode=None,
input_shape=[(self.feature, image_shape)],
builder=builder, verbose=False)
prefix = '__tc__internal__'
# (1, B, C+5, S*S)
builder.add_reshape(name=prefix + 'ymap_sp_pre',
target_shape=[batch_size, num_anchors, preds_per_box, num_spatial],
mode=0,
input_name='conv8_fwd_output',
output_name=prefix + 'ymap_sp_pre')
# (1, C+5, B, S*S)
builder.add_permute(name=prefix + 'ymap_sp',
dim=[0, 2, 1, 3],
input_name=prefix + 'ymap_sp_pre',
output_name=prefix + 'ymap_sp')
# POSITION: X/Y
# (1, 2, B, S*S)
builder.add_slice(name=prefix + 'raw_rel_xy_sp',
axis='channel',
start_index=0,
end_index=2,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'raw_rel_xy_sp')
# (1, 2, B, S*S)
builder.add_activation(name=prefix + 'rel_xy_sp',
non_linearity='SIGMOID',
input_name=prefix + 'raw_rel_xy_sp',
output_name=prefix + 'rel_xy_sp')
# (1, 2, B*H*W, 1)
builder.add_reshape(name=prefix + 'rel_xy',
target_shape=[batch_size, 2, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'rel_xy_sp',
output_name=prefix + 'rel_xy')
c_xy = _np.array(_np.meshgrid(_np.arange(self._grid_shape[1]),
_np.arange(self._grid_shape[0])), dtype=_np.float32)
c_xy_reshaped = (_np.tile(c_xy[:, _np.newaxis], (num_anchors, 1, 1))
.reshape(2, -1))[_np.newaxis, ..., _np.newaxis]
# (1, 2, B*H*W, 1)
builder.add_load_constant(prefix + 'constant_xy',
constant_value=c_xy_reshaped,
shape=c_xy_reshaped.shape[1:],
output_name=prefix + 'constant_xy')
# (1, 2, B*H*W, 1)
builder.add_elementwise(name=prefix + 'xy',
mode='ADD',
input_names=[prefix + 'constant_xy', prefix + 'rel_xy'],
output_name=prefix + 'xy')
# SHAPE: WIDTH/HEIGHT
# (1, 2, B, S*S)
builder.add_slice(name=prefix + 'raw_rel_wh_sp',
axis='channel',
start_index=2,
end_index=4,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'raw_rel_wh_sp')
# (1, 2, B, S*S)
builder.add_unary(name=prefix + 'rel_wh_sp',
mode='exp',
input_name=prefix + 'raw_rel_wh_sp',
output_name=prefix + 'rel_wh_sp')
# (1, 2*B, S, S)
builder.add_reshape(name=prefix + 'rel_wh',
target_shape=[batch_size, 2 * num_anchors] + list(self._grid_shape),
mode=0,
input_name=prefix + 'rel_wh_sp',
output_name=prefix + 'rel_wh')
np_anchors = _np.asarray(self.anchors, dtype=_np.float32).T
anchors_0 = _np.tile(np_anchors.reshape([2 * num_anchors, 1, 1]), self._grid_shape)
# (1, 2*B, S, S)
builder.add_load_constant(name=prefix + 'c_anchors',
constant_value=anchors_0,
shape=anchors_0.shape,
output_name=prefix + 'c_anchors')
# (1, 2*B, S, S)
builder.add_elementwise(name=prefix + 'wh_pre',
mode='MULTIPLY',
input_names=[prefix + 'c_anchors', prefix + 'rel_wh'],
output_name=prefix + 'wh_pre')
# (1, 2, B*H*W, 1)
builder.add_reshape(name=prefix + 'wh',
target_shape=[1, 2, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'wh_pre',
output_name=prefix + 'wh')
# (1, 4, B*H*W, 1)
builder.add_elementwise(name=prefix + 'boxes_out_transposed',
mode='CONCAT',
input_names=[prefix + 'xy', prefix + 'wh'],
output_name=prefix + 'boxes_out_transposed')
# (1, B*H*W, 4, 1)
builder.add_permute(name=prefix + 'boxes_out',
dim=[0, 2, 1, 3],
input_name=prefix + 'boxes_out_transposed',
output_name=prefix + 'boxes_out')
scale = _np.zeros((num_bounding_boxes, 4, 1))
scale[:, 0::2] = 1.0 / self._grid_shape[1]
scale[:, 1::2] = 1.0 / self._grid_shape[0]
# (1, B*H*W, 4, 1)
builder.add_scale(name=COORDINATES_STR,
W=scale,
b=0,
has_bias=False,
shape_scale=(num_bounding_boxes, 4, 1),
input_name=prefix + 'boxes_out',
output_name=COORDINATES_STR)
# CLASS PROBABILITIES AND OBJECT CONFIDENCE
# (1, C, B, H*W)
builder.add_slice(name=prefix + 'scores_sp',
axis='channel',
start_index=5,
end_index=preds_per_box,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'scores_sp')
# (1, C, B, H*W)
builder.add_softmax(name=prefix + 'probs_sp',
input_name=prefix + 'scores_sp',
output_name=prefix + 'probs_sp')
# (1, 1, B, H*W)
builder.add_slice(name=prefix + 'logit_conf_sp',
axis='channel',
start_index=4,
end_index=5,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'logit_conf_sp')
# (1, 1, B, H*W)
builder.add_activation(name=prefix + 'conf_sp',
non_linearity='SIGMOID',
input_name=prefix + 'logit_conf_sp',
output_name=prefix + 'conf_sp')
# (1, C, B, H*W)
if num_classes > 1:
conf = prefix + 'conf_tiled_sp'
builder.add_elementwise(name=prefix + 'conf_tiled_sp',
mode='CONCAT',
input_names=[prefix+'conf_sp']*num_classes,
output_name=conf)
else:
conf = prefix + 'conf_sp'
# (1, C, B, H*W)
builder.add_elementwise(name=prefix + 'confprobs_sp',
mode='MULTIPLY',
input_names=[conf, prefix + 'probs_sp'],
output_name=prefix + 'confprobs_sp')
# (1, C, B*H*W, 1)
builder.add_reshape(name=prefix + 'confprobs_transposed',
target_shape=[1, num_classes, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'confprobs_sp',
output_name=prefix + 'confprobs_transposed')
# (1, B*H*W, C, 1)
builder.add_permute(name=CONFIDENCE_STR,
dim=[0, 2, 1, 3],
input_name=prefix + 'confprobs_transposed',
output_name=CONFIDENCE_STR)
_mxnet_converter._set_input_output_layers(
builder, input_names, output_names)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
builder.set_pre_processing_parameters(image_input_names=self.feature)
model = builder.spec
if include_non_maximum_suppression:
# Non-Maximum Suppression is a post-processing algorithm
# responsible for merging all detections that belong to the
# same object.
# Core ML schematic
# +------------------------------------+
# | Pipeline |
# | |
# | +------------+ +-------------+ |
# | | Neural | | Non-maximum | |
# | | network +---> suppression +-----> confidences
# Image +----> | | | |
# | | +---> +-----> coordinates
# | | | | | |
# Optional inputs: | +------------+ +-^---^-------+ |
# | | | |
# IOU threshold +-----------------------+ | |
# | | |
# Confidence threshold +---------------------------+ |
# +------------------------------------+
model_neural_network = model.neuralNetwork
model.specificationVersion = 3
model.pipeline.ParseFromString(b'')
model.pipeline.models.add()
model.pipeline.models[0].neuralNetwork.ParseFromString(b'')
model.pipeline.models.add()
model.pipeline.models[1].nonMaximumSuppression.ParseFromString(b'')
# begin: Neural network model
nn_model = model.pipeline.models[0]
nn_model.description.ParseFromString(b'')
input_image = model.description.input[0]
input_image.type.imageType.width = self.input_image_shape[1]
input_image.type.imageType.height = self.input_image_shape[2]
nn_model.description.input.add()
nn_model.description.input[0].ParseFromString(
input_image.SerializeToString())
for i in range(2):
del model.description.output[i].type.multiArrayType.shape[:]
names = ["raw_confidence", "raw_coordinates"]
bounds = [self.num_classes, 4]
for i in range(2):
output_i = model.description.output[i]
output_i.name = names[i]
for j in range(2):
ma_type = output_i.type.multiArrayType
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[j].lowerBound = (
bounds[i] if j == 1 else 0)
ma_type.shapeRange.sizeRanges[j].upperBound = (
bounds[i] if j == 1 else -1)
nn_model.description.output.add()
nn_model.description.output[i].ParseFromString(
output_i.SerializeToString())
ma_type = nn_model.description.output[i].type.multiArrayType
ma_type.shape.append(num_bounding_boxes)
ma_type.shape.append(bounds[i])
# Think more about this line
nn_model.neuralNetwork.ParseFromString(
model_neural_network.SerializeToString())
nn_model.specificationVersion = model.specificationVersion
# end: Neural network model
# begin: Non maximum suppression model
nms_model = model.pipeline.models[1]
nms_model_nonMaxSup = nms_model.nonMaximumSuppression
for i in range(2):
output_i = model.description.output[i]
nms_model.description.input.add()
nms_model.description.input[i].ParseFromString(
output_i.SerializeToString())
nms_model.description.output.add()
nms_model.description.output[i].ParseFromString(
output_i.SerializeToString())
nms_model.description.output[i].name = (
'confidence' if i==0 else 'coordinates')
nms_model_nonMaxSup.iouThreshold = iou_threshold
nms_model_nonMaxSup.confidenceThreshold = confidence_threshold
nms_model_nonMaxSup.confidenceInputFeatureName = 'raw_confidence'
nms_model_nonMaxSup.coordinatesInputFeatureName = 'raw_coordinates'
nms_model_nonMaxSup.confidenceOutputFeatureName = 'confidence'
nms_model_nonMaxSup.coordinatesOutputFeatureName = 'coordinates'
nms_model.specificationVersion = model.specificationVersion
nms_model_nonMaxSup.stringClassLabels.vector.extend(self.classes)
for i in range(2):
nms_model.description.input[i].ParseFromString(
nn_model.description.output[i].SerializeToString()
)
if include_non_maximum_suppression:
# Iou Threshold
IOU_THRESHOLD_STRING = 'iouThreshold'
model.description.input.add()
model.description.input[1].type.doubleType.ParseFromString(b'')
model.description.input[1].name = IOU_THRESHOLD_STRING
nms_model.description.input.add()
nms_model.description.input[2].ParseFromString(
model.description.input[1].SerializeToString()
)
nms_model_nonMaxSup.iouThresholdInputFeatureName = IOU_THRESHOLD_STRING
# Confidence Threshold
CONFIDENCE_THRESHOLD_STRING = 'confidenceThreshold'
model.description.input.add()
model.description.input[2].type.doubleType.ParseFromString(b'')
model.description.input[2].name = CONFIDENCE_THRESHOLD_STRING
nms_model.description.input.add()
nms_model.description.input[3].ParseFromString(
model.description.input[2].SerializeToString())
nms_model_nonMaxSup.confidenceThresholdInputFeatureName = \
CONFIDENCE_THRESHOLD_STRING
# end: Non maximum suppression model
model.description.output[0].name = 'confidence'
model.description.output[1].name = 'coordinates'
iouThresholdString = '(optional) IOU Threshold override (default: {})'
confidenceThresholdString = ('(optional)' +
' Confidence Threshold override (default: {})')
model_type = 'object detector (%s)' % self.model
if include_non_maximum_suppression:
model_type += ' with non-maximum suppression'
model.description.metadata.shortDescription = \
_coreml_utils._mlmodel_short_description(model_type)
model.description.input[0].shortDescription = 'Input image'
if include_non_maximum_suppression:
iouThresholdString = '(optional) IOU Threshold override (default: {})'
model.description.input[1].shortDescription = \
iouThresholdString.format(iou_threshold)
confidenceThresholdString = ('(optional)' +
' Confidence Threshold override (default: {})')
model.description.input[2].shortDescription = \
confidenceThresholdString.format(confidence_threshold)
model.description.output[0].shortDescription = \
u'Boxes \xd7 Class confidence (see user-defined metadata "classes")'
model.description.output[1].shortDescription = \
u'Boxes \xd7 [x, y, width, height] (relative to image size)'
version = ObjectDetector._PYTHON_OBJECT_DETECTOR_VERSION
partial_user_defined_metadata = {
'model': self.model,
'max_iterations': str(self.max_iterations),
'training_iterations': str(self.training_iterations),
'include_non_maximum_suppression': str(
include_non_maximum_suppression),
'non_maximum_suppression_threshold': str(
iou_threshold),
'confidence_threshold': str(confidence_threshold),
'iou_threshold': str(iou_threshold),
'feature': self.feature,
'annotations': self.annotations,
'classes': ','.join(self.classes)
}
user_defined_metadata = _coreml_utils._get_model_metadata(
self.__class__.__name__,
partial_user_defined_metadata,
version)
model.description.metadata.userDefined.update(user_defined_metadata)
from coremltools.models.utils import save_spec as _save_spec
_save_spec(model, filename) | [
"def",
"export_coreml",
"(",
"self",
",",
"filename",
",",
"include_non_maximum_suppression",
"=",
"True",
",",
"iou_threshold",
"=",
"None",
",",
"confidence_threshold",
"=",
"None",
")",
":",
"import",
"mxnet",
"as",
"_mx",
"from",
".",
".",
"_mxnet",
".",
"_mxnet_to_coreml",
"import",
"_mxnet_converter",
"import",
"coremltools",
"from",
"coremltools",
".",
"models",
"import",
"datatypes",
",",
"neural_network",
"if",
"iou_threshold",
"is",
"None",
":",
"iou_threshold",
"=",
"self",
".",
"non_maximum_suppression_threshold",
"if",
"confidence_threshold",
"is",
"None",
":",
"confidence_threshold",
"=",
"0.25",
"preds_per_box",
"=",
"5",
"+",
"self",
".",
"num_classes",
"num_anchors",
"=",
"len",
"(",
"self",
".",
"anchors",
")",
"num_classes",
"=",
"self",
".",
"num_classes",
"batch_size",
"=",
"1",
"image_shape",
"=",
"(",
"batch_size",
",",
")",
"+",
"tuple",
"(",
"self",
".",
"input_image_shape",
")",
"s_image_uint8",
"=",
"_mx",
".",
"sym",
".",
"Variable",
"(",
"self",
".",
"feature",
",",
"shape",
"=",
"image_shape",
",",
"dtype",
"=",
"_np",
".",
"float32",
")",
"s_image",
"=",
"s_image_uint8",
"/",
"255",
"# Swap a maxpool+slice in mxnet to a coreml natively supported layer",
"from",
"copy",
"import",
"copy",
"net",
"=",
"copy",
"(",
"self",
".",
"_model",
")",
"net",
".",
"_children",
"=",
"copy",
"(",
"self",
".",
"_model",
".",
"_children",
")",
"from",
".",
"_model",
"import",
"_SpecialDarknetMaxpoolBlock",
"op",
"=",
"_SpecialDarknetMaxpoolBlock",
"(",
"name",
"=",
"'pool5'",
")",
"# Make sure we are removing the right layers",
"assert",
"(",
"self",
".",
"_model",
"[",
"23",
"]",
".",
"name",
"==",
"'pool5'",
"and",
"self",
".",
"_model",
"[",
"24",
"]",
".",
"name",
"==",
"'specialcrop5'",
")",
"del",
"net",
".",
"_children",
"[",
"24",
"]",
"net",
".",
"_children",
"[",
"23",
"]",
"=",
"op",
"s_ymap",
"=",
"net",
"(",
"s_image",
")",
"mod",
"=",
"_mx",
".",
"mod",
".",
"Module",
"(",
"symbol",
"=",
"s_ymap",
",",
"label_names",
"=",
"None",
",",
"data_names",
"=",
"[",
"self",
".",
"feature",
"]",
")",
"mod",
".",
"bind",
"(",
"for_training",
"=",
"False",
",",
"data_shapes",
"=",
"[",
"(",
"self",
".",
"feature",
",",
"image_shape",
")",
"]",
")",
"# Copy over params from net",
"mod",
".",
"init_params",
"(",
")",
"arg_params",
",",
"aux_params",
"=",
"mod",
".",
"get_params",
"(",
")",
"net_params",
"=",
"net",
".",
"collect_params",
"(",
")",
"new_arg_params",
"=",
"{",
"}",
"for",
"k",
",",
"param",
"in",
"arg_params",
".",
"items",
"(",
")",
":",
"new_arg_params",
"[",
"k",
"]",
"=",
"net_params",
"[",
"k",
"]",
".",
"data",
"(",
"net_params",
"[",
"k",
"]",
".",
"list_ctx",
"(",
")",
"[",
"0",
"]",
")",
"new_aux_params",
"=",
"{",
"}",
"for",
"k",
",",
"param",
"in",
"aux_params",
".",
"items",
"(",
")",
":",
"new_aux_params",
"[",
"k",
"]",
"=",
"net_params",
"[",
"k",
"]",
".",
"data",
"(",
"net_params",
"[",
"k",
"]",
".",
"list_ctx",
"(",
")",
"[",
"0",
"]",
")",
"mod",
".",
"set_params",
"(",
"new_arg_params",
",",
"new_aux_params",
")",
"input_names",
"=",
"[",
"self",
".",
"feature",
"]",
"input_dims",
"=",
"[",
"list",
"(",
"self",
".",
"input_image_shape",
")",
"]",
"input_types",
"=",
"[",
"datatypes",
".",
"Array",
"(",
"*",
"dim",
")",
"for",
"dim",
"in",
"input_dims",
"]",
"input_features",
"=",
"list",
"(",
"zip",
"(",
"input_names",
",",
"input_types",
")",
")",
"num_spatial",
"=",
"self",
".",
"_grid_shape",
"[",
"0",
"]",
"*",
"self",
".",
"_grid_shape",
"[",
"1",
"]",
"num_bounding_boxes",
"=",
"num_anchors",
"*",
"num_spatial",
"CONFIDENCE_STR",
"=",
"(",
"\"raw_confidence\"",
"if",
"include_non_maximum_suppression",
"else",
"\"confidence\"",
")",
"COORDINATES_STR",
"=",
"(",
"\"raw_coordinates\"",
"if",
"include_non_maximum_suppression",
"else",
"\"coordinates\"",
")",
"output_names",
"=",
"[",
"CONFIDENCE_STR",
",",
"COORDINATES_STR",
"]",
"output_dims",
"=",
"[",
"(",
"num_bounding_boxes",
",",
"num_classes",
")",
",",
"(",
"num_bounding_boxes",
",",
"4",
")",
",",
"]",
"output_types",
"=",
"[",
"datatypes",
".",
"Array",
"(",
"*",
"dim",
")",
"for",
"dim",
"in",
"output_dims",
"]",
"output_features",
"=",
"list",
"(",
"zip",
"(",
"output_names",
",",
"output_types",
")",
")",
"mode",
"=",
"None",
"builder",
"=",
"neural_network",
".",
"NeuralNetworkBuilder",
"(",
"input_features",
",",
"output_features",
",",
"mode",
")",
"_mxnet_converter",
".",
"convert",
"(",
"mod",
",",
"mode",
"=",
"None",
",",
"input_shape",
"=",
"[",
"(",
"self",
".",
"feature",
",",
"image_shape",
")",
"]",
",",
"builder",
"=",
"builder",
",",
"verbose",
"=",
"False",
")",
"prefix",
"=",
"'__tc__internal__'",
"# (1, B, C+5, S*S)",
"builder",
".",
"add_reshape",
"(",
"name",
"=",
"prefix",
"+",
"'ymap_sp_pre'",
",",
"target_shape",
"=",
"[",
"batch_size",
",",
"num_anchors",
",",
"preds_per_box",
",",
"num_spatial",
"]",
",",
"mode",
"=",
"0",
",",
"input_name",
"=",
"'conv8_fwd_output'",
",",
"output_name",
"=",
"prefix",
"+",
"'ymap_sp_pre'",
")",
"# (1, C+5, B, S*S)",
"builder",
".",
"add_permute",
"(",
"name",
"=",
"prefix",
"+",
"'ymap_sp'",
",",
"dim",
"=",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
",",
"input_name",
"=",
"prefix",
"+",
"'ymap_sp_pre'",
",",
"output_name",
"=",
"prefix",
"+",
"'ymap_sp'",
")",
"# POSITION: X/Y",
"# (1, 2, B, S*S)",
"builder",
".",
"add_slice",
"(",
"name",
"=",
"prefix",
"+",
"'raw_rel_xy_sp'",
",",
"axis",
"=",
"'channel'",
",",
"start_index",
"=",
"0",
",",
"end_index",
"=",
"2",
",",
"stride",
"=",
"1",
",",
"input_name",
"=",
"prefix",
"+",
"'ymap_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'raw_rel_xy_sp'",
")",
"# (1, 2, B, S*S)",
"builder",
".",
"add_activation",
"(",
"name",
"=",
"prefix",
"+",
"'rel_xy_sp'",
",",
"non_linearity",
"=",
"'SIGMOID'",
",",
"input_name",
"=",
"prefix",
"+",
"'raw_rel_xy_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'rel_xy_sp'",
")",
"# (1, 2, B*H*W, 1)",
"builder",
".",
"add_reshape",
"(",
"name",
"=",
"prefix",
"+",
"'rel_xy'",
",",
"target_shape",
"=",
"[",
"batch_size",
",",
"2",
",",
"num_bounding_boxes",
",",
"1",
"]",
",",
"mode",
"=",
"0",
",",
"input_name",
"=",
"prefix",
"+",
"'rel_xy_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'rel_xy'",
")",
"c_xy",
"=",
"_np",
".",
"array",
"(",
"_np",
".",
"meshgrid",
"(",
"_np",
".",
"arange",
"(",
"self",
".",
"_grid_shape",
"[",
"1",
"]",
")",
",",
"_np",
".",
"arange",
"(",
"self",
".",
"_grid_shape",
"[",
"0",
"]",
")",
")",
",",
"dtype",
"=",
"_np",
".",
"float32",
")",
"c_xy_reshaped",
"=",
"(",
"_np",
".",
"tile",
"(",
"c_xy",
"[",
":",
",",
"_np",
".",
"newaxis",
"]",
",",
"(",
"num_anchors",
",",
"1",
",",
"1",
")",
")",
".",
"reshape",
"(",
"2",
",",
"-",
"1",
")",
")",
"[",
"_np",
".",
"newaxis",
",",
"...",
",",
"_np",
".",
"newaxis",
"]",
"# (1, 2, B*H*W, 1)",
"builder",
".",
"add_load_constant",
"(",
"prefix",
"+",
"'constant_xy'",
",",
"constant_value",
"=",
"c_xy_reshaped",
",",
"shape",
"=",
"c_xy_reshaped",
".",
"shape",
"[",
"1",
":",
"]",
",",
"output_name",
"=",
"prefix",
"+",
"'constant_xy'",
")",
"# (1, 2, B*H*W, 1)",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"prefix",
"+",
"'xy'",
",",
"mode",
"=",
"'ADD'",
",",
"input_names",
"=",
"[",
"prefix",
"+",
"'constant_xy'",
",",
"prefix",
"+",
"'rel_xy'",
"]",
",",
"output_name",
"=",
"prefix",
"+",
"'xy'",
")",
"# SHAPE: WIDTH/HEIGHT",
"# (1, 2, B, S*S)",
"builder",
".",
"add_slice",
"(",
"name",
"=",
"prefix",
"+",
"'raw_rel_wh_sp'",
",",
"axis",
"=",
"'channel'",
",",
"start_index",
"=",
"2",
",",
"end_index",
"=",
"4",
",",
"stride",
"=",
"1",
",",
"input_name",
"=",
"prefix",
"+",
"'ymap_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'raw_rel_wh_sp'",
")",
"# (1, 2, B, S*S)",
"builder",
".",
"add_unary",
"(",
"name",
"=",
"prefix",
"+",
"'rel_wh_sp'",
",",
"mode",
"=",
"'exp'",
",",
"input_name",
"=",
"prefix",
"+",
"'raw_rel_wh_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'rel_wh_sp'",
")",
"# (1, 2*B, S, S)",
"builder",
".",
"add_reshape",
"(",
"name",
"=",
"prefix",
"+",
"'rel_wh'",
",",
"target_shape",
"=",
"[",
"batch_size",
",",
"2",
"*",
"num_anchors",
"]",
"+",
"list",
"(",
"self",
".",
"_grid_shape",
")",
",",
"mode",
"=",
"0",
",",
"input_name",
"=",
"prefix",
"+",
"'rel_wh_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'rel_wh'",
")",
"np_anchors",
"=",
"_np",
".",
"asarray",
"(",
"self",
".",
"anchors",
",",
"dtype",
"=",
"_np",
".",
"float32",
")",
".",
"T",
"anchors_0",
"=",
"_np",
".",
"tile",
"(",
"np_anchors",
".",
"reshape",
"(",
"[",
"2",
"*",
"num_anchors",
",",
"1",
",",
"1",
"]",
")",
",",
"self",
".",
"_grid_shape",
")",
"# (1, 2*B, S, S)",
"builder",
".",
"add_load_constant",
"(",
"name",
"=",
"prefix",
"+",
"'c_anchors'",
",",
"constant_value",
"=",
"anchors_0",
",",
"shape",
"=",
"anchors_0",
".",
"shape",
",",
"output_name",
"=",
"prefix",
"+",
"'c_anchors'",
")",
"# (1, 2*B, S, S)",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"prefix",
"+",
"'wh_pre'",
",",
"mode",
"=",
"'MULTIPLY'",
",",
"input_names",
"=",
"[",
"prefix",
"+",
"'c_anchors'",
",",
"prefix",
"+",
"'rel_wh'",
"]",
",",
"output_name",
"=",
"prefix",
"+",
"'wh_pre'",
")",
"# (1, 2, B*H*W, 1)",
"builder",
".",
"add_reshape",
"(",
"name",
"=",
"prefix",
"+",
"'wh'",
",",
"target_shape",
"=",
"[",
"1",
",",
"2",
",",
"num_bounding_boxes",
",",
"1",
"]",
",",
"mode",
"=",
"0",
",",
"input_name",
"=",
"prefix",
"+",
"'wh_pre'",
",",
"output_name",
"=",
"prefix",
"+",
"'wh'",
")",
"# (1, 4, B*H*W, 1)",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"prefix",
"+",
"'boxes_out_transposed'",
",",
"mode",
"=",
"'CONCAT'",
",",
"input_names",
"=",
"[",
"prefix",
"+",
"'xy'",
",",
"prefix",
"+",
"'wh'",
"]",
",",
"output_name",
"=",
"prefix",
"+",
"'boxes_out_transposed'",
")",
"# (1, B*H*W, 4, 1)",
"builder",
".",
"add_permute",
"(",
"name",
"=",
"prefix",
"+",
"'boxes_out'",
",",
"dim",
"=",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
",",
"input_name",
"=",
"prefix",
"+",
"'boxes_out_transposed'",
",",
"output_name",
"=",
"prefix",
"+",
"'boxes_out'",
")",
"scale",
"=",
"_np",
".",
"zeros",
"(",
"(",
"num_bounding_boxes",
",",
"4",
",",
"1",
")",
")",
"scale",
"[",
":",
",",
"0",
":",
":",
"2",
"]",
"=",
"1.0",
"/",
"self",
".",
"_grid_shape",
"[",
"1",
"]",
"scale",
"[",
":",
",",
"1",
":",
":",
"2",
"]",
"=",
"1.0",
"/",
"self",
".",
"_grid_shape",
"[",
"0",
"]",
"# (1, B*H*W, 4, 1)",
"builder",
".",
"add_scale",
"(",
"name",
"=",
"COORDINATES_STR",
",",
"W",
"=",
"scale",
",",
"b",
"=",
"0",
",",
"has_bias",
"=",
"False",
",",
"shape_scale",
"=",
"(",
"num_bounding_boxes",
",",
"4",
",",
"1",
")",
",",
"input_name",
"=",
"prefix",
"+",
"'boxes_out'",
",",
"output_name",
"=",
"COORDINATES_STR",
")",
"# CLASS PROBABILITIES AND OBJECT CONFIDENCE",
"# (1, C, B, H*W)",
"builder",
".",
"add_slice",
"(",
"name",
"=",
"prefix",
"+",
"'scores_sp'",
",",
"axis",
"=",
"'channel'",
",",
"start_index",
"=",
"5",
",",
"end_index",
"=",
"preds_per_box",
",",
"stride",
"=",
"1",
",",
"input_name",
"=",
"prefix",
"+",
"'ymap_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'scores_sp'",
")",
"# (1, C, B, H*W)",
"builder",
".",
"add_softmax",
"(",
"name",
"=",
"prefix",
"+",
"'probs_sp'",
",",
"input_name",
"=",
"prefix",
"+",
"'scores_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'probs_sp'",
")",
"# (1, 1, B, H*W)",
"builder",
".",
"add_slice",
"(",
"name",
"=",
"prefix",
"+",
"'logit_conf_sp'",
",",
"axis",
"=",
"'channel'",
",",
"start_index",
"=",
"4",
",",
"end_index",
"=",
"5",
",",
"stride",
"=",
"1",
",",
"input_name",
"=",
"prefix",
"+",
"'ymap_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'logit_conf_sp'",
")",
"# (1, 1, B, H*W)",
"builder",
".",
"add_activation",
"(",
"name",
"=",
"prefix",
"+",
"'conf_sp'",
",",
"non_linearity",
"=",
"'SIGMOID'",
",",
"input_name",
"=",
"prefix",
"+",
"'logit_conf_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'conf_sp'",
")",
"# (1, C, B, H*W)",
"if",
"num_classes",
">",
"1",
":",
"conf",
"=",
"prefix",
"+",
"'conf_tiled_sp'",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"prefix",
"+",
"'conf_tiled_sp'",
",",
"mode",
"=",
"'CONCAT'",
",",
"input_names",
"=",
"[",
"prefix",
"+",
"'conf_sp'",
"]",
"*",
"num_classes",
",",
"output_name",
"=",
"conf",
")",
"else",
":",
"conf",
"=",
"prefix",
"+",
"'conf_sp'",
"# (1, C, B, H*W)",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"prefix",
"+",
"'confprobs_sp'",
",",
"mode",
"=",
"'MULTIPLY'",
",",
"input_names",
"=",
"[",
"conf",
",",
"prefix",
"+",
"'probs_sp'",
"]",
",",
"output_name",
"=",
"prefix",
"+",
"'confprobs_sp'",
")",
"# (1, C, B*H*W, 1)",
"builder",
".",
"add_reshape",
"(",
"name",
"=",
"prefix",
"+",
"'confprobs_transposed'",
",",
"target_shape",
"=",
"[",
"1",
",",
"num_classes",
",",
"num_bounding_boxes",
",",
"1",
"]",
",",
"mode",
"=",
"0",
",",
"input_name",
"=",
"prefix",
"+",
"'confprobs_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'confprobs_transposed'",
")",
"# (1, B*H*W, C, 1)",
"builder",
".",
"add_permute",
"(",
"name",
"=",
"CONFIDENCE_STR",
",",
"dim",
"=",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
",",
"input_name",
"=",
"prefix",
"+",
"'confprobs_transposed'",
",",
"output_name",
"=",
"CONFIDENCE_STR",
")",
"_mxnet_converter",
".",
"_set_input_output_layers",
"(",
"builder",
",",
"input_names",
",",
"output_names",
")",
"builder",
".",
"set_input",
"(",
"input_names",
",",
"input_dims",
")",
"builder",
".",
"set_output",
"(",
"output_names",
",",
"output_dims",
")",
"builder",
".",
"set_pre_processing_parameters",
"(",
"image_input_names",
"=",
"self",
".",
"feature",
")",
"model",
"=",
"builder",
".",
"spec",
"if",
"include_non_maximum_suppression",
":",
"# Non-Maximum Suppression is a post-processing algorithm",
"# responsible for merging all detections that belong to the",
"# same object.",
"# Core ML schematic ",
"# +------------------------------------+",
"# | Pipeline |",
"# | |",
"# | +------------+ +-------------+ |",
"# | | Neural | | Non-maximum | |",
"# | | network +---> suppression +-----> confidences",
"# Image +----> | | | |",
"# | | +---> +-----> coordinates",
"# | | | | | |",
"# Optional inputs: | +------------+ +-^---^-------+ |",
"# | | | |",
"# IOU threshold +-----------------------+ | |",
"# | | |",
"# Confidence threshold +---------------------------+ |",
"# +------------------------------------+",
"model_neural_network",
"=",
"model",
".",
"neuralNetwork",
"model",
".",
"specificationVersion",
"=",
"3",
"model",
".",
"pipeline",
".",
"ParseFromString",
"(",
"b''",
")",
"model",
".",
"pipeline",
".",
"models",
".",
"add",
"(",
")",
"model",
".",
"pipeline",
".",
"models",
"[",
"0",
"]",
".",
"neuralNetwork",
".",
"ParseFromString",
"(",
"b''",
")",
"model",
".",
"pipeline",
".",
"models",
".",
"add",
"(",
")",
"model",
".",
"pipeline",
".",
"models",
"[",
"1",
"]",
".",
"nonMaximumSuppression",
".",
"ParseFromString",
"(",
"b''",
")",
"# begin: Neural network model",
"nn_model",
"=",
"model",
".",
"pipeline",
".",
"models",
"[",
"0",
"]",
"nn_model",
".",
"description",
".",
"ParseFromString",
"(",
"b''",
")",
"input_image",
"=",
"model",
".",
"description",
".",
"input",
"[",
"0",
"]",
"input_image",
".",
"type",
".",
"imageType",
".",
"width",
"=",
"self",
".",
"input_image_shape",
"[",
"1",
"]",
"input_image",
".",
"type",
".",
"imageType",
".",
"height",
"=",
"self",
".",
"input_image_shape",
"[",
"2",
"]",
"nn_model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"nn_model",
".",
"description",
".",
"input",
"[",
"0",
"]",
".",
"ParseFromString",
"(",
"input_image",
".",
"SerializeToString",
"(",
")",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"del",
"model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"type",
".",
"multiArrayType",
".",
"shape",
"[",
":",
"]",
"names",
"=",
"[",
"\"raw_confidence\"",
",",
"\"raw_coordinates\"",
"]",
"bounds",
"=",
"[",
"self",
".",
"num_classes",
",",
"4",
"]",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"output_i",
"=",
"model",
".",
"description",
".",
"output",
"[",
"i",
"]",
"output_i",
".",
"name",
"=",
"names",
"[",
"i",
"]",
"for",
"j",
"in",
"range",
"(",
"2",
")",
":",
"ma_type",
"=",
"output_i",
".",
"type",
".",
"multiArrayType",
"ma_type",
".",
"shapeRange",
".",
"sizeRanges",
".",
"add",
"(",
")",
"ma_type",
".",
"shapeRange",
".",
"sizeRanges",
"[",
"j",
"]",
".",
"lowerBound",
"=",
"(",
"bounds",
"[",
"i",
"]",
"if",
"j",
"==",
"1",
"else",
"0",
")",
"ma_type",
".",
"shapeRange",
".",
"sizeRanges",
"[",
"j",
"]",
".",
"upperBound",
"=",
"(",
"bounds",
"[",
"i",
"]",
"if",
"j",
"==",
"1",
"else",
"-",
"1",
")",
"nn_model",
".",
"description",
".",
"output",
".",
"add",
"(",
")",
"nn_model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"ParseFromString",
"(",
"output_i",
".",
"SerializeToString",
"(",
")",
")",
"ma_type",
"=",
"nn_model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"type",
".",
"multiArrayType",
"ma_type",
".",
"shape",
".",
"append",
"(",
"num_bounding_boxes",
")",
"ma_type",
".",
"shape",
".",
"append",
"(",
"bounds",
"[",
"i",
"]",
")",
"# Think more about this line",
"nn_model",
".",
"neuralNetwork",
".",
"ParseFromString",
"(",
"model_neural_network",
".",
"SerializeToString",
"(",
")",
")",
"nn_model",
".",
"specificationVersion",
"=",
"model",
".",
"specificationVersion",
"# end: Neural network model",
"# begin: Non maximum suppression model",
"nms_model",
"=",
"model",
".",
"pipeline",
".",
"models",
"[",
"1",
"]",
"nms_model_nonMaxSup",
"=",
"nms_model",
".",
"nonMaximumSuppression",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"output_i",
"=",
"model",
".",
"description",
".",
"output",
"[",
"i",
"]",
"nms_model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"nms_model",
".",
"description",
".",
"input",
"[",
"i",
"]",
".",
"ParseFromString",
"(",
"output_i",
".",
"SerializeToString",
"(",
")",
")",
"nms_model",
".",
"description",
".",
"output",
".",
"add",
"(",
")",
"nms_model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"ParseFromString",
"(",
"output_i",
".",
"SerializeToString",
"(",
")",
")",
"nms_model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"name",
"=",
"(",
"'confidence'",
"if",
"i",
"==",
"0",
"else",
"'coordinates'",
")",
"nms_model_nonMaxSup",
".",
"iouThreshold",
"=",
"iou_threshold",
"nms_model_nonMaxSup",
".",
"confidenceThreshold",
"=",
"confidence_threshold",
"nms_model_nonMaxSup",
".",
"confidenceInputFeatureName",
"=",
"'raw_confidence'",
"nms_model_nonMaxSup",
".",
"coordinatesInputFeatureName",
"=",
"'raw_coordinates'",
"nms_model_nonMaxSup",
".",
"confidenceOutputFeatureName",
"=",
"'confidence'",
"nms_model_nonMaxSup",
".",
"coordinatesOutputFeatureName",
"=",
"'coordinates'",
"nms_model",
".",
"specificationVersion",
"=",
"model",
".",
"specificationVersion",
"nms_model_nonMaxSup",
".",
"stringClassLabels",
".",
"vector",
".",
"extend",
"(",
"self",
".",
"classes",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"nms_model",
".",
"description",
".",
"input",
"[",
"i",
"]",
".",
"ParseFromString",
"(",
"nn_model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"SerializeToString",
"(",
")",
")",
"if",
"include_non_maximum_suppression",
":",
"# Iou Threshold",
"IOU_THRESHOLD_STRING",
"=",
"'iouThreshold'",
"model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"model",
".",
"description",
".",
"input",
"[",
"1",
"]",
".",
"type",
".",
"doubleType",
".",
"ParseFromString",
"(",
"b''",
")",
"model",
".",
"description",
".",
"input",
"[",
"1",
"]",
".",
"name",
"=",
"IOU_THRESHOLD_STRING",
"nms_model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"nms_model",
".",
"description",
".",
"input",
"[",
"2",
"]",
".",
"ParseFromString",
"(",
"model",
".",
"description",
".",
"input",
"[",
"1",
"]",
".",
"SerializeToString",
"(",
")",
")",
"nms_model_nonMaxSup",
".",
"iouThresholdInputFeatureName",
"=",
"IOU_THRESHOLD_STRING",
"# Confidence Threshold",
"CONFIDENCE_THRESHOLD_STRING",
"=",
"'confidenceThreshold'",
"model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"model",
".",
"description",
".",
"input",
"[",
"2",
"]",
".",
"type",
".",
"doubleType",
".",
"ParseFromString",
"(",
"b''",
")",
"model",
".",
"description",
".",
"input",
"[",
"2",
"]",
".",
"name",
"=",
"CONFIDENCE_THRESHOLD_STRING",
"nms_model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"nms_model",
".",
"description",
".",
"input",
"[",
"3",
"]",
".",
"ParseFromString",
"(",
"model",
".",
"description",
".",
"input",
"[",
"2",
"]",
".",
"SerializeToString",
"(",
")",
")",
"nms_model_nonMaxSup",
".",
"confidenceThresholdInputFeatureName",
"=",
"CONFIDENCE_THRESHOLD_STRING",
"# end: Non maximum suppression model",
"model",
".",
"description",
".",
"output",
"[",
"0",
"]",
".",
"name",
"=",
"'confidence'",
"model",
".",
"description",
".",
"output",
"[",
"1",
"]",
".",
"name",
"=",
"'coordinates'",
"iouThresholdString",
"=",
"'(optional) IOU Threshold override (default: {})'",
"confidenceThresholdString",
"=",
"(",
"'(optional)'",
"+",
"' Confidence Threshold override (default: {})'",
")",
"model_type",
"=",
"'object detector (%s)'",
"%",
"self",
".",
"model",
"if",
"include_non_maximum_suppression",
":",
"model_type",
"+=",
"' with non-maximum suppression'",
"model",
".",
"description",
".",
"metadata",
".",
"shortDescription",
"=",
"_coreml_utils",
".",
"_mlmodel_short_description",
"(",
"model_type",
")",
"model",
".",
"description",
".",
"input",
"[",
"0",
"]",
".",
"shortDescription",
"=",
"'Input image'",
"if",
"include_non_maximum_suppression",
":",
"iouThresholdString",
"=",
"'(optional) IOU Threshold override (default: {})'",
"model",
".",
"description",
".",
"input",
"[",
"1",
"]",
".",
"shortDescription",
"=",
"iouThresholdString",
".",
"format",
"(",
"iou_threshold",
")",
"confidenceThresholdString",
"=",
"(",
"'(optional)'",
"+",
"' Confidence Threshold override (default: {})'",
")",
"model",
".",
"description",
".",
"input",
"[",
"2",
"]",
".",
"shortDescription",
"=",
"confidenceThresholdString",
".",
"format",
"(",
"confidence_threshold",
")",
"model",
".",
"description",
".",
"output",
"[",
"0",
"]",
".",
"shortDescription",
"=",
"u'Boxes \\xd7 Class confidence (see user-defined metadata \"classes\")'",
"model",
".",
"description",
".",
"output",
"[",
"1",
"]",
".",
"shortDescription",
"=",
"u'Boxes \\xd7 [x, y, width, height] (relative to image size)'",
"version",
"=",
"ObjectDetector",
".",
"_PYTHON_OBJECT_DETECTOR_VERSION",
"partial_user_defined_metadata",
"=",
"{",
"'model'",
":",
"self",
".",
"model",
",",
"'max_iterations'",
":",
"str",
"(",
"self",
".",
"max_iterations",
")",
",",
"'training_iterations'",
":",
"str",
"(",
"self",
".",
"training_iterations",
")",
",",
"'include_non_maximum_suppression'",
":",
"str",
"(",
"include_non_maximum_suppression",
")",
",",
"'non_maximum_suppression_threshold'",
":",
"str",
"(",
"iou_threshold",
")",
",",
"'confidence_threshold'",
":",
"str",
"(",
"confidence_threshold",
")",
",",
"'iou_threshold'",
":",
"str",
"(",
"iou_threshold",
")",
",",
"'feature'",
":",
"self",
".",
"feature",
",",
"'annotations'",
":",
"self",
".",
"annotations",
",",
"'classes'",
":",
"','",
".",
"join",
"(",
"self",
".",
"classes",
")",
"}",
"user_defined_metadata",
"=",
"_coreml_utils",
".",
"_get_model_metadata",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"partial_user_defined_metadata",
",",
"version",
")",
"model",
".",
"description",
".",
"metadata",
".",
"userDefined",
".",
"update",
"(",
"user_defined_metadata",
")",
"from",
"coremltools",
".",
"models",
".",
"utils",
"import",
"save_spec",
"as",
"_save_spec",
"_save_spec",
"(",
"model",
",",
"filename",
")"
] | Save the model in Core ML format. The Core ML model takes an image of
fixed size as input and produces two output arrays: `confidence` and
`coordinates`.
The first one, `confidence` is an `N`-by-`C` array, where `N` is the
number of instances predicted and `C` is the number of classes. The
number `N` is fixed and will include many low-confidence predictions.
The instances are not sorted by confidence, so the first one will
generally not have the highest confidence (unlike in `predict`). Also
unlike the `predict` function, the instances have not undergone
what is called `non-maximum suppression`, which means there could be
several instances close in location and size that have all discovered
the same object instance. Confidences do not need to sum to 1 over the
classes; any remaining probability is implied as confidence there is no
object instance present at all at the given coordinates. The classes
appear in the array alphabetically sorted.
The second array `coordinates` is of size `N`-by-4, where the first
dimension `N` again represents instances and corresponds to the
`confidence` array. The second dimension represents `x`, `y`, `width`,
`height`, in that order. The values are represented in relative
coordinates, so (0.5, 0.5) represents the center of the image and (1,
1) the bottom right corner. You will need to multiply the relative
values with the original image size before you resized it to the fixed
input size to get pixel-value coordinates similar to `predict`.
See Also
--------
save
Parameters
----------
filename : string
The path of the file where we want to save the Core ML model.
include_non_maximum_suppression : bool
Non-maximum suppression is only available in iOS 12+.
A boolean parameter to indicate whether the Core ML model should be
saved with built-in non-maximum suppression or not.
This parameter is set to True by default.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
Examples
--------
>>> model.export_coreml('detector.mlmodel') | [
"Save",
"the",
"model",
"in",
"Core",
"ML",
"format",
".",
"The",
"Core",
"ML",
"model",
"takes",
"an",
"image",
"of",
"fixed",
"size",
"as",
"input",
"and",
"produces",
"two",
"output",
"arrays",
":",
"confidence",
"and",
"coordinates",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/object_detector.py#L1136-L1631 | train |
apple/turicreate | deps/src/cmake-3.13.4/Utilities/Sphinx/cmake.py | CMakeTransform.parse_title | def parse_title(self, docname):
"""Parse a document title as the first line starting in [A-Za-z0-9<]
or fall back to the document basename if no such line exists.
The cmake --help-*-list commands also depend on this convention.
Return the title or False if the document file does not exist.
"""
env = self.document.settings.env
title = self.titles.get(docname)
if title is None:
fname = os.path.join(env.srcdir, docname+'.rst')
try:
f = open(fname, 'r')
except IOError:
title = False
else:
for line in f:
if len(line) > 0 and (line[0].isalnum() or line[0] == '<'):
title = line.rstrip()
break
f.close()
if title is None:
title = os.path.basename(docname)
self.titles[docname] = title
return title | python | def parse_title(self, docname):
"""Parse a document title as the first line starting in [A-Za-z0-9<]
or fall back to the document basename if no such line exists.
The cmake --help-*-list commands also depend on this convention.
Return the title or False if the document file does not exist.
"""
env = self.document.settings.env
title = self.titles.get(docname)
if title is None:
fname = os.path.join(env.srcdir, docname+'.rst')
try:
f = open(fname, 'r')
except IOError:
title = False
else:
for line in f:
if len(line) > 0 and (line[0].isalnum() or line[0] == '<'):
title = line.rstrip()
break
f.close()
if title is None:
title = os.path.basename(docname)
self.titles[docname] = title
return title | [
"def",
"parse_title",
"(",
"self",
",",
"docname",
")",
":",
"env",
"=",
"self",
".",
"document",
".",
"settings",
".",
"env",
"title",
"=",
"self",
".",
"titles",
".",
"get",
"(",
"docname",
")",
"if",
"title",
"is",
"None",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env",
".",
"srcdir",
",",
"docname",
"+",
"'.rst'",
")",
"try",
":",
"f",
"=",
"open",
"(",
"fname",
",",
"'r'",
")",
"except",
"IOError",
":",
"title",
"=",
"False",
"else",
":",
"for",
"line",
"in",
"f",
":",
"if",
"len",
"(",
"line",
")",
">",
"0",
"and",
"(",
"line",
"[",
"0",
"]",
".",
"isalnum",
"(",
")",
"or",
"line",
"[",
"0",
"]",
"==",
"'<'",
")",
":",
"title",
"=",
"line",
".",
"rstrip",
"(",
")",
"break",
"f",
".",
"close",
"(",
")",
"if",
"title",
"is",
"None",
":",
"title",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"docname",
")",
"self",
".",
"titles",
"[",
"docname",
"]",
"=",
"title",
"return",
"title"
] | Parse a document title as the first line starting in [A-Za-z0-9<]
or fall back to the document basename if no such line exists.
The cmake --help-*-list commands also depend on this convention.
Return the title or False if the document file does not exist. | [
"Parse",
"a",
"document",
"title",
"as",
"the",
"first",
"line",
"starting",
"in",
"[",
"A",
"-",
"Za",
"-",
"z0",
"-",
"9<",
"]",
"or",
"fall",
"back",
"to",
"the",
"document",
"basename",
"if",
"no",
"such",
"line",
"exists",
".",
"The",
"cmake",
"--",
"help",
"-",
"*",
"-",
"list",
"commands",
"also",
"depend",
"on",
"this",
"convention",
".",
"Return",
"the",
"title",
"or",
"False",
"if",
"the",
"document",
"file",
"does",
"not",
"exist",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Utilities/Sphinx/cmake.py#L181-L204 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | registerErrorHandler | def registerErrorHandler(f, ctx):
"""Register a Python written function to for error reporting.
The function is called back as f(ctx, error). """
import sys
if 'libxslt' not in sys.modules:
# normal behaviour when libxslt is not imported
ret = libxml2mod.xmlRegisterErrorHandler(f,ctx)
else:
# when libxslt is already imported, one must
# use libxst's error handler instead
import libxslt
ret = libxslt.registerErrorHandler(f,ctx)
return ret | python | def registerErrorHandler(f, ctx):
"""Register a Python written function to for error reporting.
The function is called back as f(ctx, error). """
import sys
if 'libxslt' not in sys.modules:
# normal behaviour when libxslt is not imported
ret = libxml2mod.xmlRegisterErrorHandler(f,ctx)
else:
# when libxslt is already imported, one must
# use libxst's error handler instead
import libxslt
ret = libxslt.registerErrorHandler(f,ctx)
return ret | [
"def",
"registerErrorHandler",
"(",
"f",
",",
"ctx",
")",
":",
"import",
"sys",
"if",
"'libxslt'",
"not",
"in",
"sys",
".",
"modules",
":",
"# normal behaviour when libxslt is not imported",
"ret",
"=",
"libxml2mod",
".",
"xmlRegisterErrorHandler",
"(",
"f",
",",
"ctx",
")",
"else",
":",
"# when libxslt is already imported, one must",
"# use libxst's error handler instead",
"import",
"libxslt",
"ret",
"=",
"libxslt",
".",
"registerErrorHandler",
"(",
"f",
",",
"ctx",
")",
"return",
"ret"
] | Register a Python written function to for error reporting.
The function is called back as f(ctx, error). | [
"Register",
"a",
"Python",
"written",
"function",
"to",
"for",
"error",
"reporting",
".",
"The",
"function",
"is",
"called",
"back",
"as",
"f",
"(",
"ctx",
"error",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L630-L642 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | _xmlTextReaderErrorFunc | def _xmlTextReaderErrorFunc(xxx_todo_changeme,msg,severity,locator):
"""Intermediate callback to wrap the locator"""
(f,arg) = xxx_todo_changeme
return f(arg,msg,severity,xmlTextReaderLocator(locator)) | python | def _xmlTextReaderErrorFunc(xxx_todo_changeme,msg,severity,locator):
"""Intermediate callback to wrap the locator"""
(f,arg) = xxx_todo_changeme
return f(arg,msg,severity,xmlTextReaderLocator(locator)) | [
"def",
"_xmlTextReaderErrorFunc",
"(",
"xxx_todo_changeme",
",",
"msg",
",",
"severity",
",",
"locator",
")",
":",
"(",
"f",
",",
"arg",
")",
"=",
"xxx_todo_changeme",
"return",
"f",
"(",
"arg",
",",
"msg",
",",
"severity",
",",
"xmlTextReaderLocator",
"(",
"locator",
")",
")"
] | Intermediate callback to wrap the locator | [
"Intermediate",
"callback",
"to",
"wrap",
"the",
"locator"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L713-L716 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | htmlCreateMemoryParserCtxt | def htmlCreateMemoryParserCtxt(buffer, size):
"""Create a parser context for an HTML in-memory document. """
ret = libxml2mod.htmlCreateMemoryParserCtxt(buffer, size)
if ret is None:raise parserError('htmlCreateMemoryParserCtxt() failed')
return parserCtxt(_obj=ret) | python | def htmlCreateMemoryParserCtxt(buffer, size):
"""Create a parser context for an HTML in-memory document. """
ret = libxml2mod.htmlCreateMemoryParserCtxt(buffer, size)
if ret is None:raise parserError('htmlCreateMemoryParserCtxt() failed')
return parserCtxt(_obj=ret) | [
"def",
"htmlCreateMemoryParserCtxt",
"(",
"buffer",
",",
"size",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlCreateMemoryParserCtxt",
"(",
"buffer",
",",
"size",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'htmlCreateMemoryParserCtxt() failed'",
")",
"return",
"parserCtxt",
"(",
"_obj",
"=",
"ret",
")"
] | Create a parser context for an HTML in-memory document. | [
"Create",
"a",
"parser",
"context",
"for",
"an",
"HTML",
"in",
"-",
"memory",
"document",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L791-L795 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | htmlParseDoc | def htmlParseDoc(cur, encoding):
"""parse an HTML in-memory document and build a tree. """
ret = libxml2mod.htmlParseDoc(cur, encoding)
if ret is None:raise parserError('htmlParseDoc() failed')
return xmlDoc(_obj=ret) | python | def htmlParseDoc(cur, encoding):
"""parse an HTML in-memory document and build a tree. """
ret = libxml2mod.htmlParseDoc(cur, encoding)
if ret is None:raise parserError('htmlParseDoc() failed')
return xmlDoc(_obj=ret) | [
"def",
"htmlParseDoc",
"(",
"cur",
",",
"encoding",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlParseDoc",
"(",
"cur",
",",
"encoding",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'htmlParseDoc() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an HTML in-memory document and build a tree. | [
"parse",
"an",
"HTML",
"in",
"-",
"memory",
"document",
"and",
"build",
"a",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L814-L818 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | htmlParseFile | def htmlParseFile(filename, encoding):
"""parse an HTML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. """
ret = libxml2mod.htmlParseFile(filename, encoding)
if ret is None:raise parserError('htmlParseFile() failed')
return xmlDoc(_obj=ret) | python | def htmlParseFile(filename, encoding):
"""parse an HTML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. """
ret = libxml2mod.htmlParseFile(filename, encoding)
if ret is None:raise parserError('htmlParseFile() failed')
return xmlDoc(_obj=ret) | [
"def",
"htmlParseFile",
"(",
"filename",
",",
"encoding",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlParseFile",
"(",
"filename",
",",
"encoding",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'htmlParseFile() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an HTML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. | [
"parse",
"an",
"HTML",
"file",
"and",
"build",
"a",
"tree",
".",
"Automatic",
"support",
"for",
"ZLIB",
"/",
"Compress",
"compressed",
"document",
"is",
"provided",
"by",
"default",
"if",
"found",
"at",
"compile",
"-",
"time",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L820-L826 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | htmlReadDoc | def htmlReadDoc(cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.htmlReadDoc(cur, URL, encoding, options)
if ret is None:raise treeError('htmlReadDoc() failed')
return xmlDoc(_obj=ret) | python | def htmlReadDoc(cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.htmlReadDoc(cur, URL, encoding, options)
if ret is None:raise treeError('htmlReadDoc() failed')
return xmlDoc(_obj=ret) | [
"def",
"htmlReadDoc",
"(",
"cur",
",",
"URL",
",",
"encoding",
",",
"options",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlReadDoc",
"(",
"cur",
",",
"URL",
",",
"encoding",
",",
"options",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'htmlReadDoc() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an XML in-memory document and build a tree. | [
"parse",
"an",
"XML",
"in",
"-",
"memory",
"document",
"and",
"build",
"a",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L828-L832 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | htmlReadFd | def htmlReadFd(fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. """
ret = libxml2mod.htmlReadFd(fd, URL, encoding, options)
if ret is None:raise treeError('htmlReadFd() failed')
return xmlDoc(_obj=ret) | python | def htmlReadFd(fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. """
ret = libxml2mod.htmlReadFd(fd, URL, encoding, options)
if ret is None:raise treeError('htmlReadFd() failed')
return xmlDoc(_obj=ret) | [
"def",
"htmlReadFd",
"(",
"fd",
",",
"URL",
",",
"encoding",
",",
"options",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlReadFd",
"(",
"fd",
",",
"URL",
",",
"encoding",
",",
"options",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'htmlReadFd() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an XML from a file descriptor and build a tree. | [
"parse",
"an",
"XML",
"from",
"a",
"file",
"descriptor",
"and",
"build",
"a",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L834-L838 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | htmlReadFile | def htmlReadFile(filename, encoding, options):
"""parse an XML file from the filesystem or the network. """
ret = libxml2mod.htmlReadFile(filename, encoding, options)
if ret is None:raise treeError('htmlReadFile() failed')
return xmlDoc(_obj=ret) | python | def htmlReadFile(filename, encoding, options):
"""parse an XML file from the filesystem or the network. """
ret = libxml2mod.htmlReadFile(filename, encoding, options)
if ret is None:raise treeError('htmlReadFile() failed')
return xmlDoc(_obj=ret) | [
"def",
"htmlReadFile",
"(",
"filename",
",",
"encoding",
",",
"options",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlReadFile",
"(",
"filename",
",",
"encoding",
",",
"options",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'htmlReadFile() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an XML file from the filesystem or the network. | [
"parse",
"an",
"XML",
"file",
"from",
"the",
"filesystem",
"or",
"the",
"network",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L840-L844 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | htmlReadMemory | def htmlReadMemory(buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.htmlReadMemory(buffer, size, URL, encoding, options)
if ret is None:raise treeError('htmlReadMemory() failed')
return xmlDoc(_obj=ret) | python | def htmlReadMemory(buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.htmlReadMemory(buffer, size, URL, encoding, options)
if ret is None:raise treeError('htmlReadMemory() failed')
return xmlDoc(_obj=ret) | [
"def",
"htmlReadMemory",
"(",
"buffer",
",",
"size",
",",
"URL",
",",
"encoding",
",",
"options",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlReadMemory",
"(",
"buffer",
",",
"size",
",",
"URL",
",",
"encoding",
",",
"options",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'htmlReadMemory() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an XML in-memory document and build a tree. | [
"parse",
"an",
"XML",
"in",
"-",
"memory",
"document",
"and",
"build",
"a",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L846-L850 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | htmlNewDoc | def htmlNewDoc(URI, ExternalID):
"""Creates a new HTML document """
ret = libxml2mod.htmlNewDoc(URI, ExternalID)
if ret is None:raise treeError('htmlNewDoc() failed')
return xmlDoc(_obj=ret) | python | def htmlNewDoc(URI, ExternalID):
"""Creates a new HTML document """
ret = libxml2mod.htmlNewDoc(URI, ExternalID)
if ret is None:raise treeError('htmlNewDoc() failed')
return xmlDoc(_obj=ret) | [
"def",
"htmlNewDoc",
"(",
"URI",
",",
"ExternalID",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlNewDoc",
"(",
"URI",
",",
"ExternalID",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'htmlNewDoc() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | Creates a new HTML document | [
"Creates",
"a",
"new",
"HTML",
"document"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L861-L865 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | htmlNewDocNoDtD | def htmlNewDocNoDtD(URI, ExternalID):
"""Creates a new HTML document without a DTD node if @URI and
@ExternalID are None """
ret = libxml2mod.htmlNewDocNoDtD(URI, ExternalID)
if ret is None:raise treeError('htmlNewDocNoDtD() failed')
return xmlDoc(_obj=ret) | python | def htmlNewDocNoDtD(URI, ExternalID):
"""Creates a new HTML document without a DTD node if @URI and
@ExternalID are None """
ret = libxml2mod.htmlNewDocNoDtD(URI, ExternalID)
if ret is None:raise treeError('htmlNewDocNoDtD() failed')
return xmlDoc(_obj=ret) | [
"def",
"htmlNewDocNoDtD",
"(",
"URI",
",",
"ExternalID",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlNewDocNoDtD",
"(",
"URI",
",",
"ExternalID",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'htmlNewDocNoDtD() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | Creates a new HTML document without a DTD node if @URI and
@ExternalID are None | [
"Creates",
"a",
"new",
"HTML",
"document",
"without",
"a",
"DTD",
"node",
"if"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L867-L872 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | catalogAdd | def catalogAdd(type, orig, replace):
"""Add an entry in the catalog, it may overwrite existing but
different entries. If called before any other catalog
routine, allows to override the default shared catalog put
in place by xmlInitializeCatalog(); """
ret = libxml2mod.xmlCatalogAdd(type, orig, replace)
return ret | python | def catalogAdd(type, orig, replace):
"""Add an entry in the catalog, it may overwrite existing but
different entries. If called before any other catalog
routine, allows to override the default shared catalog put
in place by xmlInitializeCatalog(); """
ret = libxml2mod.xmlCatalogAdd(type, orig, replace)
return ret | [
"def",
"catalogAdd",
"(",
"type",
",",
"orig",
",",
"replace",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlCatalogAdd",
"(",
"type",
",",
"orig",
",",
"replace",
")",
"return",
"ret"
] | Add an entry in the catalog, it may overwrite existing but
different entries. If called before any other catalog
routine, allows to override the default shared catalog put
in place by xmlInitializeCatalog(); | [
"Add",
"an",
"entry",
"in",
"the",
"catalog",
"it",
"may",
"overwrite",
"existing",
"but",
"different",
"entries",
".",
"If",
"called",
"before",
"any",
"other",
"catalog",
"routine",
"allows",
"to",
"override",
"the",
"default",
"shared",
"catalog",
"put",
"in",
"place",
"by",
"xmlInitializeCatalog",
"()",
";"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L903-L909 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | loadACatalog | def loadACatalog(filename):
"""Load the catalog and build the associated data structures.
This can be either an XML Catalog or an SGML Catalog It
will recurse in SGML CATALOG entries. On the other hand XML
Catalogs are not handled recursively. """
ret = libxml2mod.xmlLoadACatalog(filename)
if ret is None:raise treeError('xmlLoadACatalog() failed')
return catalog(_obj=ret) | python | def loadACatalog(filename):
"""Load the catalog and build the associated data structures.
This can be either an XML Catalog or an SGML Catalog It
will recurse in SGML CATALOG entries. On the other hand XML
Catalogs are not handled recursively. """
ret = libxml2mod.xmlLoadACatalog(filename)
if ret is None:raise treeError('xmlLoadACatalog() failed')
return catalog(_obj=ret) | [
"def",
"loadACatalog",
"(",
"filename",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlLoadACatalog",
"(",
"filename",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlLoadACatalog() failed'",
")",
"return",
"catalog",
"(",
"_obj",
"=",
"ret",
")"
] | Load the catalog and build the associated data structures.
This can be either an XML Catalog or an SGML Catalog It
will recurse in SGML CATALOG entries. On the other hand XML
Catalogs are not handled recursively. | [
"Load",
"the",
"catalog",
"and",
"build",
"the",
"associated",
"data",
"structures",
".",
"This",
"can",
"be",
"either",
"an",
"XML",
"Catalog",
"or",
"an",
"SGML",
"Catalog",
"It",
"will",
"recurse",
"in",
"SGML",
"CATALOG",
"entries",
".",
"On",
"the",
"other",
"hand",
"XML",
"Catalogs",
"are",
"not",
"handled",
"recursively",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L975-L982 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | loadSGMLSuperCatalog | def loadSGMLSuperCatalog(filename):
"""Load an SGML super catalog. It won't expand CATALOG or
DELEGATE references. This is only needed for manipulating
SGML Super Catalogs like adding and removing CATALOG or
DELEGATE entries. """
ret = libxml2mod.xmlLoadSGMLSuperCatalog(filename)
if ret is None:raise treeError('xmlLoadSGMLSuperCatalog() failed')
return catalog(_obj=ret) | python | def loadSGMLSuperCatalog(filename):
"""Load an SGML super catalog. It won't expand CATALOG or
DELEGATE references. This is only needed for manipulating
SGML Super Catalogs like adding and removing CATALOG or
DELEGATE entries. """
ret = libxml2mod.xmlLoadSGMLSuperCatalog(filename)
if ret is None:raise treeError('xmlLoadSGMLSuperCatalog() failed')
return catalog(_obj=ret) | [
"def",
"loadSGMLSuperCatalog",
"(",
"filename",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlLoadSGMLSuperCatalog",
"(",
"filename",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlLoadSGMLSuperCatalog() failed'",
")",
"return",
"catalog",
"(",
"_obj",
"=",
"ret",
")"
] | Load an SGML super catalog. It won't expand CATALOG or
DELEGATE references. This is only needed for manipulating
SGML Super Catalogs like adding and removing CATALOG or
DELEGATE entries. | [
"Load",
"an",
"SGML",
"super",
"catalog",
".",
"It",
"won",
"t",
"expand",
"CATALOG",
"or",
"DELEGATE",
"references",
".",
"This",
"is",
"only",
"needed",
"for",
"manipulating",
"SGML",
"Super",
"Catalogs",
"like",
"adding",
"and",
"removing",
"CATALOG",
"or",
"DELEGATE",
"entries",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L999-L1006 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | newCatalog | def newCatalog(sgml):
"""create a new Catalog. """
ret = libxml2mod.xmlNewCatalog(sgml)
if ret is None:raise treeError('xmlNewCatalog() failed')
return catalog(_obj=ret) | python | def newCatalog(sgml):
"""create a new Catalog. """
ret = libxml2mod.xmlNewCatalog(sgml)
if ret is None:raise treeError('xmlNewCatalog() failed')
return catalog(_obj=ret) | [
"def",
"newCatalog",
"(",
"sgml",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlNewCatalog",
"(",
"sgml",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlNewCatalog() failed'",
")",
"return",
"catalog",
"(",
"_obj",
"=",
"ret",
")"
] | create a new Catalog. | [
"create",
"a",
"new",
"Catalog",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1008-L1012 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | parseCatalogFile | def parseCatalogFile(filename):
"""parse an XML file and build a tree. It's like
xmlParseFile() except it bypass all catalog lookups. """
ret = libxml2mod.xmlParseCatalogFile(filename)
if ret is None:raise parserError('xmlParseCatalogFile() failed')
return xmlDoc(_obj=ret) | python | def parseCatalogFile(filename):
"""parse an XML file and build a tree. It's like
xmlParseFile() except it bypass all catalog lookups. """
ret = libxml2mod.xmlParseCatalogFile(filename)
if ret is None:raise parserError('xmlParseCatalogFile() failed')
return xmlDoc(_obj=ret) | [
"def",
"parseCatalogFile",
"(",
"filename",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlParseCatalogFile",
"(",
"filename",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'xmlParseCatalogFile() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an XML file and build a tree. It's like
xmlParseFile() except it bypass all catalog lookups. | [
"parse",
"an",
"XML",
"file",
"and",
"build",
"a",
"tree",
".",
"It",
"s",
"like",
"xmlParseFile",
"()",
"except",
"it",
"bypass",
"all",
"catalog",
"lookups",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1014-L1019 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | debugDumpString | def debugDumpString(output, str):
"""Dumps informations about the string, shorten it if necessary """
if output is not None: output.flush()
libxml2mod.xmlDebugDumpString(output, str) | python | def debugDumpString(output, str):
"""Dumps informations about the string, shorten it if necessary """
if output is not None: output.flush()
libxml2mod.xmlDebugDumpString(output, str) | [
"def",
"debugDumpString",
"(",
"output",
",",
"str",
")",
":",
"if",
"output",
"is",
"not",
"None",
":",
"output",
".",
"flush",
"(",
")",
"libxml2mod",
".",
"xmlDebugDumpString",
"(",
"output",
",",
"str",
")"
] | Dumps informations about the string, shorten it if necessary | [
"Dumps",
"informations",
"about",
"the",
"string",
"shorten",
"it",
"if",
"necessary"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1080-L1083 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | predefinedEntity | def predefinedEntity(name):
"""Check whether this name is an predefined entity. """
ret = libxml2mod.xmlGetPredefinedEntity(name)
if ret is None:raise treeError('xmlGetPredefinedEntity() failed')
return xmlEntity(_obj=ret) | python | def predefinedEntity(name):
"""Check whether this name is an predefined entity. """
ret = libxml2mod.xmlGetPredefinedEntity(name)
if ret is None:raise treeError('xmlGetPredefinedEntity() failed')
return xmlEntity(_obj=ret) | [
"def",
"predefinedEntity",
"(",
"name",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlGetPredefinedEntity",
"(",
"name",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlGetPredefinedEntity() failed'",
")",
"return",
"xmlEntity",
"(",
"_obj",
"=",
"ret",
")"
] | Check whether this name is an predefined entity. | [
"Check",
"whether",
"this",
"name",
"is",
"an",
"predefined",
"entity",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1152-L1156 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | nanoFTPProxy | def nanoFTPProxy(host, port, user, passwd, type):
"""Setup the FTP proxy informations. This can also be done by
using ftp_proxy ftp_proxy_user and ftp_proxy_password
environment variables. """
libxml2mod.xmlNanoFTPProxy(host, port, user, passwd, type) | python | def nanoFTPProxy(host, port, user, passwd, type):
"""Setup the FTP proxy informations. This can also be done by
using ftp_proxy ftp_proxy_user and ftp_proxy_password
environment variables. """
libxml2mod.xmlNanoFTPProxy(host, port, user, passwd, type) | [
"def",
"nanoFTPProxy",
"(",
"host",
",",
"port",
",",
"user",
",",
"passwd",
",",
"type",
")",
":",
"libxml2mod",
".",
"xmlNanoFTPProxy",
"(",
"host",
",",
"port",
",",
"user",
",",
"passwd",
",",
"type",
")"
] | Setup the FTP proxy informations. This can also be done by
using ftp_proxy ftp_proxy_user and ftp_proxy_password
environment variables. | [
"Setup",
"the",
"FTP",
"proxy",
"informations",
".",
"This",
"can",
"also",
"be",
"done",
"by",
"using",
"ftp_proxy",
"ftp_proxy_user",
"and",
"ftp_proxy_password",
"environment",
"variables",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1232-L1236 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | createDocParserCtxt | def createDocParserCtxt(cur):
"""Creates a parser context for an XML in-memory document. """
ret = libxml2mod.xmlCreateDocParserCtxt(cur)
if ret is None:raise parserError('xmlCreateDocParserCtxt() failed')
return parserCtxt(_obj=ret) | python | def createDocParserCtxt(cur):
"""Creates a parser context for an XML in-memory document. """
ret = libxml2mod.xmlCreateDocParserCtxt(cur)
if ret is None:raise parserError('xmlCreateDocParserCtxt() failed')
return parserCtxt(_obj=ret) | [
"def",
"createDocParserCtxt",
"(",
"cur",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlCreateDocParserCtxt",
"(",
"cur",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'xmlCreateDocParserCtxt() failed'",
")",
"return",
"parserCtxt",
"(",
"_obj",
"=",
"ret",
")"
] | Creates a parser context for an XML in-memory document. | [
"Creates",
"a",
"parser",
"context",
"for",
"an",
"XML",
"in",
"-",
"memory",
"document",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1269-L1273 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | parseDTD | def parseDTD(ExternalID, SystemID):
"""Load and parse an external subset. """
ret = libxml2mod.xmlParseDTD(ExternalID, SystemID)
if ret is None:raise parserError('xmlParseDTD() failed')
return xmlDtd(_obj=ret) | python | def parseDTD(ExternalID, SystemID):
"""Load and parse an external subset. """
ret = libxml2mod.xmlParseDTD(ExternalID, SystemID)
if ret is None:raise parserError('xmlParseDTD() failed')
return xmlDtd(_obj=ret) | [
"def",
"parseDTD",
"(",
"ExternalID",
",",
"SystemID",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlParseDTD",
"(",
"ExternalID",
",",
"SystemID",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'xmlParseDTD() failed'",
")",
"return",
"xmlDtd",
"(",
"_obj",
"=",
"ret",
")"
] | Load and parse an external subset. | [
"Load",
"and",
"parse",
"an",
"external",
"subset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1316-L1320 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | parseDoc | def parseDoc(cur):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.xmlParseDoc(cur)
if ret is None:raise parserError('xmlParseDoc() failed')
return xmlDoc(_obj=ret) | python | def parseDoc(cur):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.xmlParseDoc(cur)
if ret is None:raise parserError('xmlParseDoc() failed')
return xmlDoc(_obj=ret) | [
"def",
"parseDoc",
"(",
"cur",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlParseDoc",
"(",
"cur",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'xmlParseDoc() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an XML in-memory document and build a tree. | [
"parse",
"an",
"XML",
"in",
"-",
"memory",
"document",
"and",
"build",
"a",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1322-L1326 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | parseEntity | def parseEntity(filename):
"""parse an XML external entity out of context and build a
tree. [78] extParsedEnt ::= TextDecl? content This
correspond to a "Well Balanced" chunk """
ret = libxml2mod.xmlParseEntity(filename)
if ret is None:raise parserError('xmlParseEntity() failed')
return xmlDoc(_obj=ret) | python | def parseEntity(filename):
"""parse an XML external entity out of context and build a
tree. [78] extParsedEnt ::= TextDecl? content This
correspond to a "Well Balanced" chunk """
ret = libxml2mod.xmlParseEntity(filename)
if ret is None:raise parserError('xmlParseEntity() failed')
return xmlDoc(_obj=ret) | [
"def",
"parseEntity",
"(",
"filename",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlParseEntity",
"(",
"filename",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'xmlParseEntity() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an XML external entity out of context and build a
tree. [78] extParsedEnt ::= TextDecl? content This
correspond to a "Well Balanced" chunk | [
"parse",
"an",
"XML",
"external",
"entity",
"out",
"of",
"context",
"and",
"build",
"a",
"tree",
".",
"[",
"78",
"]",
"extParsedEnt",
"::",
"=",
"TextDecl?",
"content",
"This",
"correspond",
"to",
"a",
"Well",
"Balanced",
"chunk"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1328-L1334 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | parseFile | def parseFile(filename):
"""parse an XML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. """
ret = libxml2mod.xmlParseFile(filename)
if ret is None:raise parserError('xmlParseFile() failed')
return xmlDoc(_obj=ret) | python | def parseFile(filename):
"""parse an XML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. """
ret = libxml2mod.xmlParseFile(filename)
if ret is None:raise parserError('xmlParseFile() failed')
return xmlDoc(_obj=ret) | [
"def",
"parseFile",
"(",
"filename",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlParseFile",
"(",
"filename",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'xmlParseFile() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an XML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. | [
"parse",
"an",
"XML",
"file",
"and",
"build",
"a",
"tree",
".",
"Automatic",
"support",
"for",
"ZLIB",
"/",
"Compress",
"compressed",
"document",
"is",
"provided",
"by",
"default",
"if",
"found",
"at",
"compile",
"-",
"time",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1336-L1342 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | parseMemory | def parseMemory(buffer, size):
"""parse an XML in-memory block and build a tree. """
ret = libxml2mod.xmlParseMemory(buffer, size)
if ret is None:raise parserError('xmlParseMemory() failed')
return xmlDoc(_obj=ret) | python | def parseMemory(buffer, size):
"""parse an XML in-memory block and build a tree. """
ret = libxml2mod.xmlParseMemory(buffer, size)
if ret is None:raise parserError('xmlParseMemory() failed')
return xmlDoc(_obj=ret) | [
"def",
"parseMemory",
"(",
"buffer",
",",
"size",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlParseMemory",
"(",
"buffer",
",",
"size",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"parserError",
"(",
"'xmlParseMemory() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an XML in-memory block and build a tree. | [
"parse",
"an",
"XML",
"in",
"-",
"memory",
"block",
"and",
"build",
"a",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1344-L1348 | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | readDoc | def readDoc(cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.xmlReadDoc(cur, URL, encoding, options)
if ret is None:raise treeError('xmlReadDoc() failed')
return xmlDoc(_obj=ret) | python | def readDoc(cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.xmlReadDoc(cur, URL, encoding, options)
if ret is None:raise treeError('xmlReadDoc() failed')
return xmlDoc(_obj=ret) | [
"def",
"readDoc",
"(",
"cur",
",",
"URL",
",",
"encoding",
",",
"options",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlReadDoc",
"(",
"cur",
",",
"URL",
",",
"encoding",
",",
"options",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlReadDoc() failed'",
")",
"return",
"xmlDoc",
"(",
"_obj",
"=",
"ret",
")"
] | parse an XML in-memory document and build a tree. | [
"parse",
"an",
"XML",
"in",
"-",
"memory",
"document",
"and",
"build",
"a",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1356-L1360 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.