repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
apple/turicreate | src/unity/python/turicreate/toolkits/_supervised_learning.py | create_classification_with_model_selector | def create_classification_with_model_selector(dataset, target, model_selector,
features=None, validation_set='auto', verbose=True):
"""
Create a :class:`~turicreate.toolkits.SupervisedLearningModel`,
This is generic function that allows you to create any model that
implements SupervisedLearningModel. This function is normally not called, call
specific model's create function instead.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be 0 or 1, of integer type.
model_name : string
Name of the model
model_selector: function
Provide a model selector.
features : list[string], optional
List of feature names used by feature column
verbose : boolean
whether print out messages during training
"""
# Perform error-checking and trim inputs to specified columns
dataset, validation_set = _validate_data(dataset, target, features,
validation_set)
# Sample the data
features_sframe = dataset
if features_sframe.num_rows() > 1e5:
fraction = 1.0 * 1e5 / features_sframe.num_rows()
features_sframe = features_sframe.sample(fraction, seed = 0)
# Get available models for this dataset
num_classes = len(dataset[target].unique())
selected_model_names = model_selector(num_classes, features_sframe)
# Create a validation set
if isinstance(validation_set, str):
if validation_set == 'auto':
if dataset.num_rows() >= 100:
if verbose:
print_validation_track_notification()
dataset, validation_set = dataset.random_split(.95, exact=True)
else:
validation_set = None
else:
raise TypeError('Unrecognized value for validation_set.')
# Match C++ model names with user model names
python_names = {'boosted_trees_classifier': 'BoostedTreesClassifier',
'random_forest_classifier': 'RandomForestClassifier',
'decision_tree_classifier': 'DecisionTreeClassifier',
'classifier_logistic_regression': 'LogisticClassifier',
'classifier_svm': 'SVMClassifier'}
# Print useful user-facing progress messages
if verbose:
print('PROGRESS: The following methods are available for this type of problem.')
print('PROGRESS: ' + ', '.join([python_names[x] for x in selected_model_names]))
if len(selected_model_names) > 1:
print('PROGRESS: The returned model will be chosen according to validation accuracy.')
models = {}
metrics = {}
for model_name in selected_model_names:
# Fit each of the available models
m = create_selected(model_name, dataset, target, features, validation_set, verbose)
models[model_name] = m
if 'validation_accuracy' in m._list_fields():
metrics[model_name] = m.validation_accuracy
elif 'training_accuracy' in m._list_fields():
metrics[model_name] = m.training_accuracy
# Most models have this.
elif 'progress' in m._list_fields():
prog = m.progress
validation_column = 'Validation Accuracy'
accuracy_column = 'Training Accuracy'
if validation_column in prog.column_names():
metrics[model_name] = float(prog[validation_column].tail(1)[0])
else:
metrics[model_name] = float(prog[accuracy_column].tail(1)[0])
else:
raise ValueError("Model does not have metrics that can be used for model selection.")
# Choose model based on either validation, if available.
best_model = None
best_acc = None
for model_name in selected_model_names:
if best_acc is None:
best_model = model_name
best_acc = metrics[model_name]
if best_acc is not None and best_acc < metrics[model_name]:
best_model = model_name
best_acc = metrics[model_name]
ret = []
width = 32
if len(selected_model_names) > 1:
ret.append('PROGRESS: Model selection based on validation accuracy:')
ret.append('---------------------------------------------')
key_str = '{:<{}}: {}'
for model_name in selected_model_names:
name = python_names[model_name]
row = key_str.format(name, width, str(metrics[model_name]))
ret.append(row)
ret.append('---------------------------------------------')
ret.append('Selecting ' + python_names[best_model] + ' based on validation set performance.')
if verbose:
print('\nPROGRESS: '.join(ret))
return models[best_model] | python | def create_classification_with_model_selector(dataset, target, model_selector,
features=None, validation_set='auto', verbose=True):
"""
Create a :class:`~turicreate.toolkits.SupervisedLearningModel`,
This is generic function that allows you to create any model that
implements SupervisedLearningModel. This function is normally not called, call
specific model's create function instead.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be 0 or 1, of integer type.
model_name : string
Name of the model
model_selector: function
Provide a model selector.
features : list[string], optional
List of feature names used by feature column
verbose : boolean
whether print out messages during training
"""
# Perform error-checking and trim inputs to specified columns
dataset, validation_set = _validate_data(dataset, target, features,
validation_set)
# Sample the data
features_sframe = dataset
if features_sframe.num_rows() > 1e5:
fraction = 1.0 * 1e5 / features_sframe.num_rows()
features_sframe = features_sframe.sample(fraction, seed = 0)
# Get available models for this dataset
num_classes = len(dataset[target].unique())
selected_model_names = model_selector(num_classes, features_sframe)
# Create a validation set
if isinstance(validation_set, str):
if validation_set == 'auto':
if dataset.num_rows() >= 100:
if verbose:
print_validation_track_notification()
dataset, validation_set = dataset.random_split(.95, exact=True)
else:
validation_set = None
else:
raise TypeError('Unrecognized value for validation_set.')
# Match C++ model names with user model names
python_names = {'boosted_trees_classifier': 'BoostedTreesClassifier',
'random_forest_classifier': 'RandomForestClassifier',
'decision_tree_classifier': 'DecisionTreeClassifier',
'classifier_logistic_regression': 'LogisticClassifier',
'classifier_svm': 'SVMClassifier'}
# Print useful user-facing progress messages
if verbose:
print('PROGRESS: The following methods are available for this type of problem.')
print('PROGRESS: ' + ', '.join([python_names[x] for x in selected_model_names]))
if len(selected_model_names) > 1:
print('PROGRESS: The returned model will be chosen according to validation accuracy.')
models = {}
metrics = {}
for model_name in selected_model_names:
# Fit each of the available models
m = create_selected(model_name, dataset, target, features, validation_set, verbose)
models[model_name] = m
if 'validation_accuracy' in m._list_fields():
metrics[model_name] = m.validation_accuracy
elif 'training_accuracy' in m._list_fields():
metrics[model_name] = m.training_accuracy
# Most models have this.
elif 'progress' in m._list_fields():
prog = m.progress
validation_column = 'Validation Accuracy'
accuracy_column = 'Training Accuracy'
if validation_column in prog.column_names():
metrics[model_name] = float(prog[validation_column].tail(1)[0])
else:
metrics[model_name] = float(prog[accuracy_column].tail(1)[0])
else:
raise ValueError("Model does not have metrics that can be used for model selection.")
# Choose model based on either validation, if available.
best_model = None
best_acc = None
for model_name in selected_model_names:
if best_acc is None:
best_model = model_name
best_acc = metrics[model_name]
if best_acc is not None and best_acc < metrics[model_name]:
best_model = model_name
best_acc = metrics[model_name]
ret = []
width = 32
if len(selected_model_names) > 1:
ret.append('PROGRESS: Model selection based on validation accuracy:')
ret.append('---------------------------------------------')
key_str = '{:<{}}: {}'
for model_name in selected_model_names:
name = python_names[model_name]
row = key_str.format(name, width, str(metrics[model_name]))
ret.append(row)
ret.append('---------------------------------------------')
ret.append('Selecting ' + python_names[best_model] + ' based on validation set performance.')
if verbose:
print('\nPROGRESS: '.join(ret))
return models[best_model] | [
"def",
"create_classification_with_model_selector",
"(",
"dataset",
",",
"target",
",",
"model_selector",
",",
"features",
"=",
"None",
",",
"validation_set",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
")",
":",
"# Perform error-checking and trim inputs to specified columns",
"dataset",
",",
"validation_set",
"=",
"_validate_data",
"(",
"dataset",
",",
"target",
",",
"features",
",",
"validation_set",
")",
"# Sample the data",
"features_sframe",
"=",
"dataset",
"if",
"features_sframe",
".",
"num_rows",
"(",
")",
">",
"1e5",
":",
"fraction",
"=",
"1.0",
"*",
"1e5",
"/",
"features_sframe",
".",
"num_rows",
"(",
")",
"features_sframe",
"=",
"features_sframe",
".",
"sample",
"(",
"fraction",
",",
"seed",
"=",
"0",
")",
"# Get available models for this dataset",
"num_classes",
"=",
"len",
"(",
"dataset",
"[",
"target",
"]",
".",
"unique",
"(",
")",
")",
"selected_model_names",
"=",
"model_selector",
"(",
"num_classes",
",",
"features_sframe",
")",
"# Create a validation set",
"if",
"isinstance",
"(",
"validation_set",
",",
"str",
")",
":",
"if",
"validation_set",
"==",
"'auto'",
":",
"if",
"dataset",
".",
"num_rows",
"(",
")",
">=",
"100",
":",
"if",
"verbose",
":",
"print_validation_track_notification",
"(",
")",
"dataset",
",",
"validation_set",
"=",
"dataset",
".",
"random_split",
"(",
".95",
",",
"exact",
"=",
"True",
")",
"else",
":",
"validation_set",
"=",
"None",
"else",
":",
"raise",
"TypeError",
"(",
"'Unrecognized value for validation_set.'",
")",
"# Match C++ model names with user model names",
"python_names",
"=",
"{",
"'boosted_trees_classifier'",
":",
"'BoostedTreesClassifier'",
",",
"'random_forest_classifier'",
":",
"'RandomForestClassifier'",
",",
"'decision_tree_classifier'",
":",
"'DecisionTreeClassifier'",
",",
"'classifier_logistic_regression'",
":",
"'LogisticClassifier'",
",",
"'classifier_svm'",
":",
"'SVMClassifier'",
"}",
"# Print useful user-facing progress messages",
"if",
"verbose",
":",
"print",
"(",
"'PROGRESS: The following methods are available for this type of problem.'",
")",
"print",
"(",
"'PROGRESS: '",
"+",
"', '",
".",
"join",
"(",
"[",
"python_names",
"[",
"x",
"]",
"for",
"x",
"in",
"selected_model_names",
"]",
")",
")",
"if",
"len",
"(",
"selected_model_names",
")",
">",
"1",
":",
"print",
"(",
"'PROGRESS: The returned model will be chosen according to validation accuracy.'",
")",
"models",
"=",
"{",
"}",
"metrics",
"=",
"{",
"}",
"for",
"model_name",
"in",
"selected_model_names",
":",
"# Fit each of the available models",
"m",
"=",
"create_selected",
"(",
"model_name",
",",
"dataset",
",",
"target",
",",
"features",
",",
"validation_set",
",",
"verbose",
")",
"models",
"[",
"model_name",
"]",
"=",
"m",
"if",
"'validation_accuracy'",
"in",
"m",
".",
"_list_fields",
"(",
")",
":",
"metrics",
"[",
"model_name",
"]",
"=",
"m",
".",
"validation_accuracy",
"elif",
"'training_accuracy'",
"in",
"m",
".",
"_list_fields",
"(",
")",
":",
"metrics",
"[",
"model_name",
"]",
"=",
"m",
".",
"training_accuracy",
"# Most models have this.",
"elif",
"'progress'",
"in",
"m",
".",
"_list_fields",
"(",
")",
":",
"prog",
"=",
"m",
".",
"progress",
"validation_column",
"=",
"'Validation Accuracy'",
"accuracy_column",
"=",
"'Training Accuracy'",
"if",
"validation_column",
"in",
"prog",
".",
"column_names",
"(",
")",
":",
"metrics",
"[",
"model_name",
"]",
"=",
"float",
"(",
"prog",
"[",
"validation_column",
"]",
".",
"tail",
"(",
"1",
")",
"[",
"0",
"]",
")",
"else",
":",
"metrics",
"[",
"model_name",
"]",
"=",
"float",
"(",
"prog",
"[",
"accuracy_column",
"]",
".",
"tail",
"(",
"1",
")",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Model does not have metrics that can be used for model selection.\"",
")",
"# Choose model based on either validation, if available.",
"best_model",
"=",
"None",
"best_acc",
"=",
"None",
"for",
"model_name",
"in",
"selected_model_names",
":",
"if",
"best_acc",
"is",
"None",
":",
"best_model",
"=",
"model_name",
"best_acc",
"=",
"metrics",
"[",
"model_name",
"]",
"if",
"best_acc",
"is",
"not",
"None",
"and",
"best_acc",
"<",
"metrics",
"[",
"model_name",
"]",
":",
"best_model",
"=",
"model_name",
"best_acc",
"=",
"metrics",
"[",
"model_name",
"]",
"ret",
"=",
"[",
"]",
"width",
"=",
"32",
"if",
"len",
"(",
"selected_model_names",
")",
">",
"1",
":",
"ret",
".",
"append",
"(",
"'PROGRESS: Model selection based on validation accuracy:'",
")",
"ret",
".",
"append",
"(",
"'---------------------------------------------'",
")",
"key_str",
"=",
"'{:<{}}: {}'",
"for",
"model_name",
"in",
"selected_model_names",
":",
"name",
"=",
"python_names",
"[",
"model_name",
"]",
"row",
"=",
"key_str",
".",
"format",
"(",
"name",
",",
"width",
",",
"str",
"(",
"metrics",
"[",
"model_name",
"]",
")",
")",
"ret",
".",
"append",
"(",
"row",
")",
"ret",
".",
"append",
"(",
"'---------------------------------------------'",
")",
"ret",
".",
"append",
"(",
"'Selecting '",
"+",
"python_names",
"[",
"best_model",
"]",
"+",
"' based on validation set performance.'",
")",
"if",
"verbose",
":",
"print",
"(",
"'\\nPROGRESS: '",
".",
"join",
"(",
"ret",
")",
")",
"return",
"models",
"[",
"best_model",
"]"
] | Create a :class:`~turicreate.toolkits.SupervisedLearningModel`,
This is generic function that allows you to create any model that
implements SupervisedLearningModel. This function is normally not called, call
specific model's create function instead.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be 0 or 1, of integer type.
model_name : string
Name of the model
model_selector: function
Provide a model selector.
features : list[string], optional
List of feature names used by feature column
verbose : boolean
whether print out messages during training | [
"Create",
"a",
":",
"class",
":",
"~turicreate",
".",
"toolkits",
".",
"SupervisedLearningModel"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_supervised_learning.py#L337-L460 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_supervised_learning.py | SupervisedLearningModel.predict | def predict(self, dataset, missing_value_action='auto',
output_type='', options={}, **kwargs):
"""
Return predictions for ``dataset``, using the trained supervised_learning
model. Predictions are generated as class labels (0 or
1).
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
output_type : str, optional
output type that maybe needed by some of the toolkits
options : dict
additional options to be passed in to prediction
kwargs : dict
additional options to be passed into prediction
Returns
-------
out : SArray
An SArray with model predictions.
"""
if missing_value_action == 'auto':
missing_value_action = select_default_missing_value_policy(self, 'predict')
# Low latency path
if isinstance(dataset, list):
return self.__proxy__.fast_predict(
dataset, missing_value_action, output_type)
if isinstance(dataset, dict):
return self.__proxy__.fast_predict(
[dataset], missing_value_action, output_type)
# Batch predictions path
else:
_raise_error_if_not_sframe(dataset, "dataset")
return self.__proxy__.predict(
dataset, missing_value_action, output_type) | python | def predict(self, dataset, missing_value_action='auto',
output_type='', options={}, **kwargs):
"""
Return predictions for ``dataset``, using the trained supervised_learning
model. Predictions are generated as class labels (0 or
1).
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
output_type : str, optional
output type that maybe needed by some of the toolkits
options : dict
additional options to be passed in to prediction
kwargs : dict
additional options to be passed into prediction
Returns
-------
out : SArray
An SArray with model predictions.
"""
if missing_value_action == 'auto':
missing_value_action = select_default_missing_value_policy(self, 'predict')
# Low latency path
if isinstance(dataset, list):
return self.__proxy__.fast_predict(
dataset, missing_value_action, output_type)
if isinstance(dataset, dict):
return self.__proxy__.fast_predict(
[dataset], missing_value_action, output_type)
# Batch predictions path
else:
_raise_error_if_not_sframe(dataset, "dataset")
return self.__proxy__.predict(
dataset, missing_value_action, output_type) | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
",",
"output_type",
"=",
"''",
",",
"options",
"=",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"missing_value_action",
"==",
"'auto'",
":",
"missing_value_action",
"=",
"select_default_missing_value_policy",
"(",
"self",
",",
"'predict'",
")",
"# Low latency path",
"if",
"isinstance",
"(",
"dataset",
",",
"list",
")",
":",
"return",
"self",
".",
"__proxy__",
".",
"fast_predict",
"(",
"dataset",
",",
"missing_value_action",
",",
"output_type",
")",
"if",
"isinstance",
"(",
"dataset",
",",
"dict",
")",
":",
"return",
"self",
".",
"__proxy__",
".",
"fast_predict",
"(",
"[",
"dataset",
"]",
",",
"missing_value_action",
",",
"output_type",
")",
"# Batch predictions path",
"else",
":",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"return",
"self",
".",
"__proxy__",
".",
"predict",
"(",
"dataset",
",",
"missing_value_action",
",",
"output_type",
")"
] | Return predictions for ``dataset``, using the trained supervised_learning
model. Predictions are generated as class labels (0 or
1).
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
output_type : str, optional
output type that maybe needed by some of the toolkits
options : dict
additional options to be passed in to prediction
kwargs : dict
additional options to be passed into prediction
Returns
-------
out : SArray
An SArray with model predictions. | [
"Return",
"predictions",
"for",
"dataset",
"using",
"the",
"trained",
"supervised_learning",
"model",
".",
"Predictions",
"are",
"generated",
"as",
"class",
"labels",
"(",
"0",
"or",
"1",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_supervised_learning.py#L59-L116 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_supervised_learning.py | SupervisedLearningModel.evaluate | def evaluate(self, dataset, metric="auto",
missing_value_action='auto', with_predictions=False, options={}, **kwargs):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, list[str]
Evaluation metric(s) to be computed.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
options : dict
additional options to be passed in to prediction
kwargs : dict
additional options to be passed into prediction
"""
if missing_value_action == 'auto':
missing_value_action = select_default_missing_value_policy(
self, 'evaluate')
_raise_error_if_not_sframe(dataset, "dataset")
results = self.__proxy__.evaluate(
dataset, missing_value_action, metric, with_predictions=with_predictions);
return results | python | def evaluate(self, dataset, metric="auto",
missing_value_action='auto', with_predictions=False, options={}, **kwargs):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, list[str]
Evaluation metric(s) to be computed.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
options : dict
additional options to be passed in to prediction
kwargs : dict
additional options to be passed into prediction
"""
if missing_value_action == 'auto':
missing_value_action = select_default_missing_value_policy(
self, 'evaluate')
_raise_error_if_not_sframe(dataset, "dataset")
results = self.__proxy__.evaluate(
dataset, missing_value_action, metric, with_predictions=with_predictions);
return results | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"\"auto\"",
",",
"missing_value_action",
"=",
"'auto'",
",",
"with_predictions",
"=",
"False",
",",
"options",
"=",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"missing_value_action",
"==",
"'auto'",
":",
"missing_value_action",
"=",
"select_default_missing_value_policy",
"(",
"self",
",",
"'evaluate'",
")",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"results",
"=",
"self",
".",
"__proxy__",
".",
"evaluate",
"(",
"dataset",
",",
"missing_value_action",
",",
"metric",
",",
"with_predictions",
"=",
"with_predictions",
")",
"return",
"results"
] | Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, list[str]
Evaluation metric(s) to be computed.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
options : dict
additional options to be passed in to prediction
kwargs : dict
additional options to be passed into prediction | [
"Evaluate",
"the",
"model",
"by",
"making",
"predictions",
"of",
"target",
"values",
"and",
"comparing",
"these",
"to",
"actual",
"values",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_supervised_learning.py#L118-L159 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_supervised_learning.py | Classifier.classify | def classify(self, dataset, missing_value_action='auto'):
"""
Return predictions for ``dataset``, using the trained supervised_learning
model. Predictions are generated as class labels (0 or
1).
Parameters
----------
dataset: SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose model dependent missing value action
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions.
"""
if (missing_value_action == 'auto'):
missing_value_action = select_default_missing_value_policy(self, 'classify')
# Low latency path
if isinstance(dataset, list):
return self.__proxy__.fast_classify(dataset, missing_value_action)
if isinstance(dataset, dict):
return self.__proxy__.fast_classify([dataset], missing_value_action)
_raise_error_if_not_sframe(dataset, "dataset")
return self.__proxy__.classify(dataset, missing_value_action) | python | def classify(self, dataset, missing_value_action='auto'):
"""
Return predictions for ``dataset``, using the trained supervised_learning
model. Predictions are generated as class labels (0 or
1).
Parameters
----------
dataset: SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose model dependent missing value action
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions.
"""
if (missing_value_action == 'auto'):
missing_value_action = select_default_missing_value_policy(self, 'classify')
# Low latency path
if isinstance(dataset, list):
return self.__proxy__.fast_classify(dataset, missing_value_action)
if isinstance(dataset, dict):
return self.__proxy__.fast_classify([dataset], missing_value_action)
_raise_error_if_not_sframe(dataset, "dataset")
return self.__proxy__.classify(dataset, missing_value_action) | [
"def",
"classify",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"if",
"(",
"missing_value_action",
"==",
"'auto'",
")",
":",
"missing_value_action",
"=",
"select_default_missing_value_policy",
"(",
"self",
",",
"'classify'",
")",
"# Low latency path",
"if",
"isinstance",
"(",
"dataset",
",",
"list",
")",
":",
"return",
"self",
".",
"__proxy__",
".",
"fast_classify",
"(",
"dataset",
",",
"missing_value_action",
")",
"if",
"isinstance",
"(",
"dataset",
",",
"dict",
")",
":",
"return",
"self",
".",
"__proxy__",
".",
"fast_classify",
"(",
"[",
"dataset",
"]",
",",
"missing_value_action",
")",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"return",
"self",
".",
"__proxy__",
".",
"classify",
"(",
"dataset",
",",
"missing_value_action",
")"
] | Return predictions for ``dataset``, using the trained supervised_learning
model. Predictions are generated as class labels (0 or
1).
Parameters
----------
dataset: SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose model dependent missing value action
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions. | [
"Return",
"predictions",
"for",
"dataset",
"using",
"the",
"trained",
"supervised_learning",
"model",
".",
"Predictions",
"are",
"generated",
"as",
"class",
"labels",
"(",
"0",
"or",
"1",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_supervised_learning.py#L206-L245 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/boosted_trees_regression.py | BoostedTreesRegression.evaluate | def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Can be one of:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
----------
create, predict
Examples
--------
..sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
"""
_raise_error_evaluation_metric_is_valid(
metric, ['auto', 'rmse', 'max_error'])
return super(BoostedTreesRegression, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric) | python | def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Can be one of:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
----------
create, predict
Examples
--------
..sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
"""
_raise_error_evaluation_metric_is_valid(
metric, ['auto', 'rmse', 'max_error'])
return super(BoostedTreesRegression, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric) | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"_raise_error_evaluation_metric_is_valid",
"(",
"metric",
",",
"[",
"'auto'",
",",
"'rmse'",
",",
"'max_error'",
"]",
")",
"return",
"super",
"(",
"BoostedTreesRegression",
",",
"self",
")",
".",
"evaluate",
"(",
"dataset",
",",
"missing_value_action",
"=",
"missing_value_action",
",",
"metric",
"=",
"metric",
")"
] | Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Can be one of:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
----------
create, predict
Examples
--------
..sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse') | [
"Evaluate",
"the",
"model",
"on",
"the",
"given",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/boosted_trees_regression.py#L152-L201 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/boosted_trees_regression.py | BoostedTreesRegression.predict | def predict(self, dataset, missing_value_action='auto'):
"""
Predict the target column of the given dataset.
The target column is provided during
:func:`~turicreate.boosted_trees_regression.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, predict
Examples
--------
>>> m.predict(testdata)
"""
return super(BoostedTreesRegression, self).predict(dataset, output_type='margin',
missing_value_action=missing_value_action) | python | def predict(self, dataset, missing_value_action='auto'):
"""
Predict the target column of the given dataset.
The target column is provided during
:func:`~turicreate.boosted_trees_regression.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, predict
Examples
--------
>>> m.predict(testdata)
"""
return super(BoostedTreesRegression, self).predict(dataset, output_type='margin',
missing_value_action=missing_value_action) | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"return",
"super",
"(",
"BoostedTreesRegression",
",",
"self",
")",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"'margin'",
",",
"missing_value_action",
"=",
"missing_value_action",
")"
] | Predict the target column of the given dataset.
The target column is provided during
:func:`~turicreate.boosted_trees_regression.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, predict
Examples
--------
>>> m.predict(testdata) | [
"Predict",
"the",
"target",
"column",
"of",
"the",
"given",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/boosted_trees_regression.py#L230-L271 | train |
apple/turicreate | src/unity/python/turicreate/meta/decompiler/disassemble.py | print_code | def print_code(co, lasti= -1, level=0):
"""Disassemble a code object."""
code = co.co_code
for constant in co.co_consts:
print( '| |' * level, end=' ')
print( 'constant:', constant)
labels = findlabels(code)
linestarts = dict(findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
have_inner = False
c = code[i]
op = co_ord(c)
if i in linestarts:
if i > 0:
print()
print( '| |' * level, end=' ')
print( "%3d" % linestarts[i], end=' ')
else:
print( '| |' * level, end=' ')
print(' ', end=' ')
if i == lasti: print( '-->',end=' ')
else: print( ' ', end=' ')
if i in labels: print( '>>', end=' ')
else: print( ' ',end=' ')
print(repr(i).rjust(4), end=' ')
print(opcode.opname[op].ljust(20), end=' ')
i = i + 1
if op >= opcode.HAVE_ARGUMENT:
oparg = co_ord(code[i]) + co_ord(code[i + 1]) * 256 + extended_arg
extended_arg = 0
i = i + 2
if op == opcode.EXTENDED_ARG:
extended_arg = oparg * 65536
print( repr(oparg).rjust(5), end=' ')
if op in opcode.hasconst:
print( '(' + repr(co.co_consts[oparg]) + ')', end=' ')
if type(co.co_consts[oparg]) == types.CodeType:
have_inner = co.co_consts[oparg]
elif op in opcode.hasname:
print( '(' + co.co_names[oparg] + ')',end=' ')
elif op in opcode.hasjrel:
print('(to ' + repr(i + oparg) + ')', end=' ')
elif op in opcode.haslocal:
print('(' + co.co_varnames[oparg] + ')', end=' ')
elif op in opcode.hascompare:
print('(' + opcode.cmp_op[oparg] + ')', end=' ')
elif op in opcode.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print('(' + free[oparg] + ')', end=' ')
print()
if have_inner is not False:
print_code(have_inner, level=level + 1) | python | def print_code(co, lasti= -1, level=0):
"""Disassemble a code object."""
code = co.co_code
for constant in co.co_consts:
print( '| |' * level, end=' ')
print( 'constant:', constant)
labels = findlabels(code)
linestarts = dict(findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
have_inner = False
c = code[i]
op = co_ord(c)
if i in linestarts:
if i > 0:
print()
print( '| |' * level, end=' ')
print( "%3d" % linestarts[i], end=' ')
else:
print( '| |' * level, end=' ')
print(' ', end=' ')
if i == lasti: print( '-->',end=' ')
else: print( ' ', end=' ')
if i in labels: print( '>>', end=' ')
else: print( ' ',end=' ')
print(repr(i).rjust(4), end=' ')
print(opcode.opname[op].ljust(20), end=' ')
i = i + 1
if op >= opcode.HAVE_ARGUMENT:
oparg = co_ord(code[i]) + co_ord(code[i + 1]) * 256 + extended_arg
extended_arg = 0
i = i + 2
if op == opcode.EXTENDED_ARG:
extended_arg = oparg * 65536
print( repr(oparg).rjust(5), end=' ')
if op in opcode.hasconst:
print( '(' + repr(co.co_consts[oparg]) + ')', end=' ')
if type(co.co_consts[oparg]) == types.CodeType:
have_inner = co.co_consts[oparg]
elif op in opcode.hasname:
print( '(' + co.co_names[oparg] + ')',end=' ')
elif op in opcode.hasjrel:
print('(to ' + repr(i + oparg) + ')', end=' ')
elif op in opcode.haslocal:
print('(' + co.co_varnames[oparg] + ')', end=' ')
elif op in opcode.hascompare:
print('(' + opcode.cmp_op[oparg] + ')', end=' ')
elif op in opcode.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print('(' + free[oparg] + ')', end=' ')
print()
if have_inner is not False:
print_code(have_inner, level=level + 1) | [
"def",
"print_code",
"(",
"co",
",",
"lasti",
"=",
"-",
"1",
",",
"level",
"=",
"0",
")",
":",
"code",
"=",
"co",
".",
"co_code",
"for",
"constant",
"in",
"co",
".",
"co_consts",
":",
"print",
"(",
"'| |'",
"*",
"level",
",",
"end",
"=",
"' '",
")",
"print",
"(",
"'constant:'",
",",
"constant",
")",
"labels",
"=",
"findlabels",
"(",
"code",
")",
"linestarts",
"=",
"dict",
"(",
"findlinestarts",
"(",
"co",
")",
")",
"n",
"=",
"len",
"(",
"code",
")",
"i",
"=",
"0",
"extended_arg",
"=",
"0",
"free",
"=",
"None",
"while",
"i",
"<",
"n",
":",
"have_inner",
"=",
"False",
"c",
"=",
"code",
"[",
"i",
"]",
"op",
"=",
"co_ord",
"(",
"c",
")",
"if",
"i",
"in",
"linestarts",
":",
"if",
"i",
">",
"0",
":",
"print",
"(",
")",
"print",
"(",
"'| |'",
"*",
"level",
",",
"end",
"=",
"' '",
")",
"print",
"(",
"\"%3d\"",
"%",
"linestarts",
"[",
"i",
"]",
",",
"end",
"=",
"' '",
")",
"else",
":",
"print",
"(",
"'| |'",
"*",
"level",
",",
"end",
"=",
"' '",
")",
"print",
"(",
"' '",
",",
"end",
"=",
"' '",
")",
"if",
"i",
"==",
"lasti",
":",
"print",
"(",
"'-->'",
",",
"end",
"=",
"' '",
")",
"else",
":",
"print",
"(",
"' '",
",",
"end",
"=",
"' '",
")",
"if",
"i",
"in",
"labels",
":",
"print",
"(",
"'>>'",
",",
"end",
"=",
"' '",
")",
"else",
":",
"print",
"(",
"' '",
",",
"end",
"=",
"' '",
")",
"print",
"(",
"repr",
"(",
"i",
")",
".",
"rjust",
"(",
"4",
")",
",",
"end",
"=",
"' '",
")",
"print",
"(",
"opcode",
".",
"opname",
"[",
"op",
"]",
".",
"ljust",
"(",
"20",
")",
",",
"end",
"=",
"' '",
")",
"i",
"=",
"i",
"+",
"1",
"if",
"op",
">=",
"opcode",
".",
"HAVE_ARGUMENT",
":",
"oparg",
"=",
"co_ord",
"(",
"code",
"[",
"i",
"]",
")",
"+",
"co_ord",
"(",
"code",
"[",
"i",
"+",
"1",
"]",
")",
"*",
"256",
"+",
"extended_arg",
"extended_arg",
"=",
"0",
"i",
"=",
"i",
"+",
"2",
"if",
"op",
"==",
"opcode",
".",
"EXTENDED_ARG",
":",
"extended_arg",
"=",
"oparg",
"*",
"65536",
"print",
"(",
"repr",
"(",
"oparg",
")",
".",
"rjust",
"(",
"5",
")",
",",
"end",
"=",
"' '",
")",
"if",
"op",
"in",
"opcode",
".",
"hasconst",
":",
"print",
"(",
"'('",
"+",
"repr",
"(",
"co",
".",
"co_consts",
"[",
"oparg",
"]",
")",
"+",
"')'",
",",
"end",
"=",
"' '",
")",
"if",
"type",
"(",
"co",
".",
"co_consts",
"[",
"oparg",
"]",
")",
"==",
"types",
".",
"CodeType",
":",
"have_inner",
"=",
"co",
".",
"co_consts",
"[",
"oparg",
"]",
"elif",
"op",
"in",
"opcode",
".",
"hasname",
":",
"print",
"(",
"'('",
"+",
"co",
".",
"co_names",
"[",
"oparg",
"]",
"+",
"')'",
",",
"end",
"=",
"' '",
")",
"elif",
"op",
"in",
"opcode",
".",
"hasjrel",
":",
"print",
"(",
"'(to '",
"+",
"repr",
"(",
"i",
"+",
"oparg",
")",
"+",
"')'",
",",
"end",
"=",
"' '",
")",
"elif",
"op",
"in",
"opcode",
".",
"haslocal",
":",
"print",
"(",
"'('",
"+",
"co",
".",
"co_varnames",
"[",
"oparg",
"]",
"+",
"')'",
",",
"end",
"=",
"' '",
")",
"elif",
"op",
"in",
"opcode",
".",
"hascompare",
":",
"print",
"(",
"'('",
"+",
"opcode",
".",
"cmp_op",
"[",
"oparg",
"]",
"+",
"')'",
",",
"end",
"=",
"' '",
")",
"elif",
"op",
"in",
"opcode",
".",
"hasfree",
":",
"if",
"free",
"is",
"None",
":",
"free",
"=",
"co",
".",
"co_cellvars",
"+",
"co",
".",
"co_freevars",
"print",
"(",
"'('",
"+",
"free",
"[",
"oparg",
"]",
"+",
"')'",
",",
"end",
"=",
"' '",
")",
"print",
"(",
")",
"if",
"have_inner",
"is",
"not",
"False",
":",
"print_code",
"(",
"have_inner",
",",
"level",
"=",
"level",
"+",
"1",
")"
] | Disassemble a code object. | [
"Disassemble",
"a",
"code",
"object",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/disassemble.py#L29-L92 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_decision_tree_regressor.py | convert | def convert(model, feature_names, target):
"""Convert a decision tree model to protobuf format.
Parameters
----------
decision_tree : DecisionTreeRegressor
A trained scikit-learn tree model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
_sklearn_util.check_expected_type(model, _tree.DecisionTreeRegressor)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'tree_') and model.tree_ is not None)
return _MLModel(_convert_tree_ensemble(model, feature_names, target)) | python | def convert(model, feature_names, target):
"""Convert a decision tree model to protobuf format.
Parameters
----------
decision_tree : DecisionTreeRegressor
A trained scikit-learn tree model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
_sklearn_util.check_expected_type(model, _tree.DecisionTreeRegressor)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'tree_') and model.tree_ is not None)
return _MLModel(_convert_tree_ensemble(model, feature_names, target)) | [
"def",
"convert",
"(",
"model",
",",
"feature_names",
",",
"target",
")",
":",
"if",
"not",
"(",
"_HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"_sklearn_util",
".",
"check_expected_type",
"(",
"model",
",",
"_tree",
".",
"DecisionTreeRegressor",
")",
"_sklearn_util",
".",
"check_fitted",
"(",
"model",
",",
"lambda",
"m",
":",
"hasattr",
"(",
"m",
",",
"'tree_'",
")",
"and",
"model",
".",
"tree_",
"is",
"not",
"None",
")",
"return",
"_MLModel",
"(",
"_convert_tree_ensemble",
"(",
"model",
",",
"feature_names",
",",
"target",
")",
")"
] | Convert a decision tree model to protobuf format.
Parameters
----------
decision_tree : DecisionTreeRegressor
A trained scikit-learn tree model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"decision",
"tree",
"model",
"to",
"protobuf",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_decision_tree_regressor.py#L18-L42 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | _check_prob_and_prob_vector | def _check_prob_and_prob_vector(predictions):
"""
Check that the predictionsa are either probabilities of prob-vectors.
"""
from .._deps import numpy
ptype = predictions.dtype
import array
if ptype not in [float, numpy.ndarray, array.array, int]:
err_msg = "Input `predictions` must be of numeric type (for binary "
err_msg += "classification) or array (of probability vectors) for "
err_msg += "multiclass classification."
raise TypeError(err_msg) | python | def _check_prob_and_prob_vector(predictions):
"""
Check that the predictionsa are either probabilities of prob-vectors.
"""
from .._deps import numpy
ptype = predictions.dtype
import array
if ptype not in [float, numpy.ndarray, array.array, int]:
err_msg = "Input `predictions` must be of numeric type (for binary "
err_msg += "classification) or array (of probability vectors) for "
err_msg += "multiclass classification."
raise TypeError(err_msg) | [
"def",
"_check_prob_and_prob_vector",
"(",
"predictions",
")",
":",
"from",
".",
".",
"_deps",
"import",
"numpy",
"ptype",
"=",
"predictions",
".",
"dtype",
"import",
"array",
"if",
"ptype",
"not",
"in",
"[",
"float",
",",
"numpy",
".",
"ndarray",
",",
"array",
".",
"array",
",",
"int",
"]",
":",
"err_msg",
"=",
"\"Input `predictions` must be of numeric type (for binary \"",
"err_msg",
"+=",
"\"classification) or array (of probability vectors) for \"",
"err_msg",
"+=",
"\"multiclass classification.\"",
"raise",
"TypeError",
"(",
"err_msg",
")"
] | Check that the predictionsa are either probabilities of prob-vectors. | [
"Check",
"that",
"the",
"predictionsa",
"are",
"either",
"probabilities",
"of",
"prob",
"-",
"vectors",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L36-L48 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | _supervised_evaluation_error_checking | def _supervised_evaluation_error_checking(targets, predictions):
"""
Perform basic error checking for the evaluation metrics. Check
types and sizes of the inputs.
"""
_raise_error_if_not_sarray(targets, "targets")
_raise_error_if_not_sarray(predictions, "predictions")
if (len(targets) != len(predictions)):
raise _ToolkitError(
"Input SArrays 'targets' and 'predictions' must be of the same length.") | python | def _supervised_evaluation_error_checking(targets, predictions):
"""
Perform basic error checking for the evaluation metrics. Check
types and sizes of the inputs.
"""
_raise_error_if_not_sarray(targets, "targets")
_raise_error_if_not_sarray(predictions, "predictions")
if (len(targets) != len(predictions)):
raise _ToolkitError(
"Input SArrays 'targets' and 'predictions' must be of the same length.") | [
"def",
"_supervised_evaluation_error_checking",
"(",
"targets",
",",
"predictions",
")",
":",
"_raise_error_if_not_sarray",
"(",
"targets",
",",
"\"targets\"",
")",
"_raise_error_if_not_sarray",
"(",
"predictions",
",",
"\"predictions\"",
")",
"if",
"(",
"len",
"(",
"targets",
")",
"!=",
"len",
"(",
"predictions",
")",
")",
":",
"raise",
"_ToolkitError",
"(",
"\"Input SArrays 'targets' and 'predictions' must be of the same length.\"",
")"
] | Perform basic error checking for the evaluation metrics. Check
types and sizes of the inputs. | [
"Perform",
"basic",
"error",
"checking",
"for",
"the",
"evaluation",
"metrics",
".",
"Check",
"types",
"and",
"sizes",
"of",
"the",
"inputs",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L50-L59 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | log_loss | def log_loss(targets, predictions, index_map=None):
r"""
Compute the logloss for the given targets and the given predicted
probabilities. This quantity is defined to be the negative of the sum
of the log probability of each observation, normalized by the number of
observations:
.. math::
\textrm{logloss} = - \frac{1}{N} \sum_{i \in 1,\ldots,N}
(y_i \log(p_i) + (1-y_i)\log(1-p_i)) ,
where y_i is the i'th target value and p_i is the i'th predicted
probability.
For multiclass situations, the definition is a slight generalization of the
above:
.. math::
\textrm{logloss} = - \frac{1}{N} \sum_{i \in 1,\ldots,N}
\sum_{j \in 1, \ldots, L}
(y_{ij} \log(p_{ij})) ,
where :math:`L` is the number of classes and :math:`y_{ij}` indicates that
observation `i` has class label `j`.
Parameters
----------
targets : SArray
Ground truth class labels. This can either contain integers or strings.
predictions : SArray
The predicted probability that corresponds to each target value. For
binary classification, the probability corresponds to the probability
of the "positive" label being predicted. For multi-class
classification, the predictions are expected to be an array of
predictions for each class.
index_map : dict[int], [None (default)]
For binary classification, a dictionary mapping the two target labels to
either 0 (negative) or 1 (positive). For multi-class classification, a
dictionary mapping potential target labels to the associated index into
the vectors in ``predictions``.
Returns
-------
out : float
The log_loss.
See Also
--------
accuracy
Notes
-----
- For binary classification, when the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case. This behavior can be overridden by providing
an explicit ``index_map``.
- For multi-class classification, when the target label is of type
"string", then the probability vector is assumed to be a vector of
probabilities of classes as sorted alphanumerically. Hence, for the
probability vector [0.1, 0.2, 0.7] for a dataset with classes "cat",
"dog", and "rat"; the 0.1 corresponds to "cat", the 0.2 to "dog" and the
0.7 to "rat". This behavior can be overridden by providing an explicit
``index_map``.
- Logloss is undefined when a probability value p = 0, or p = 1. Hence,
probabilities are clipped to max(EPSILON, min(1 - EPSILON, p)) where
EPSILON = 1e-15.
References
----------
https://www.kaggle.com/wiki/LogLoss
Examples
--------
.. sourcecode:: python
import turicreate as tc
targets = tc.SArray([0, 1, 1, 0])
predictions = tc.SArray([0.1, 0.35, 0.7, 0.99])
log_loss = tc.evaluation.log_loss(targets, predictions)
For binary classification, when the target label is of type "string", then
the labels are sorted alphanumerically and the largest label is chosen as
the "positive" label.
.. sourcecode:: python
import turicreate as tc
targets = tc.SArray(["cat", "dog", "dog", "cat"])
predictions = tc.SArray([0.1, 0.35, 0.7, 0.99])
log_loss = tc.evaluation.log_loss(targets, predictions)
In the multi-class setting, log-loss requires a vector of probabilities
(that sum to 1) for each class label in the input dataset. In this example,
there are three classes [0, 1, 2], and the vector of probabilities
correspond to the probability of prediction for each of the three classes.
.. sourcecode:: python
target = tc.SArray([ 1, 0, 2, 1])
predictions = tc.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
log_loss = tc.evaluation.log_loss(targets, predictions)
For multi-class classification, when the target label is of type "string",
then the probability vector is assumed to be a vector of probabilities of
class as sorted alphanumerically. Hence, for the probability vector [0.1,
0.2, 0.7] for a dataset with classes "cat", "dog", and "rat"; the 0.1
corresponds to "cat", the 0.2 to "dog" and the 0.7 to "rat".
.. sourcecode:: python
target = tc.SArray([ "dog", "cat", "foosa", "dog"])
predictions = tc.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
log_loss = tc.evaluation.log_loss(targets, predictions)
If the probability vectors contain predictions for labels not present among
the targets, an explicit index map must be provided.
.. sourcecode:: python
target = tc.SArray([ "dog", "cat", "cat", "dog"])
predictions = tc.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
index_map = {"cat": 0, "dog": 1, "foosa": 2}
log_loss = tc.evaluation.log_loss(targets, predictions, index_map=index_map)
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_prob_and_prob_vector(predictions)
_check_target_not_float(targets)
_check_index_map(index_map)
multiclass = predictions.dtype not in [float, int]
opts = {}
if index_map is not None:
opts['index_map'] = index_map
if multiclass:
result = _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "multiclass_logloss", opts)
else:
result = _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "binary_logloss", opts)
return result | python | def log_loss(targets, predictions, index_map=None):
r"""
Compute the logloss for the given targets and the given predicted
probabilities. This quantity is defined to be the negative of the sum
of the log probability of each observation, normalized by the number of
observations:
.. math::
\textrm{logloss} = - \frac{1}{N} \sum_{i \in 1,\ldots,N}
(y_i \log(p_i) + (1-y_i)\log(1-p_i)) ,
where y_i is the i'th target value and p_i is the i'th predicted
probability.
For multiclass situations, the definition is a slight generalization of the
above:
.. math::
\textrm{logloss} = - \frac{1}{N} \sum_{i \in 1,\ldots,N}
\sum_{j \in 1, \ldots, L}
(y_{ij} \log(p_{ij})) ,
where :math:`L` is the number of classes and :math:`y_{ij}` indicates that
observation `i` has class label `j`.
Parameters
----------
targets : SArray
Ground truth class labels. This can either contain integers or strings.
predictions : SArray
The predicted probability that corresponds to each target value. For
binary classification, the probability corresponds to the probability
of the "positive" label being predicted. For multi-class
classification, the predictions are expected to be an array of
predictions for each class.
index_map : dict[int], [None (default)]
For binary classification, a dictionary mapping the two target labels to
either 0 (negative) or 1 (positive). For multi-class classification, a
dictionary mapping potential target labels to the associated index into
the vectors in ``predictions``.
Returns
-------
out : float
The log_loss.
See Also
--------
accuracy
Notes
-----
- For binary classification, when the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case. This behavior can be overridden by providing
an explicit ``index_map``.
- For multi-class classification, when the target label is of type
"string", then the probability vector is assumed to be a vector of
probabilities of classes as sorted alphanumerically. Hence, for the
probability vector [0.1, 0.2, 0.7] for a dataset with classes "cat",
"dog", and "rat"; the 0.1 corresponds to "cat", the 0.2 to "dog" and the
0.7 to "rat". This behavior can be overridden by providing an explicit
``index_map``.
- Logloss is undefined when a probability value p = 0, or p = 1. Hence,
probabilities are clipped to max(EPSILON, min(1 - EPSILON, p)) where
EPSILON = 1e-15.
References
----------
https://www.kaggle.com/wiki/LogLoss
Examples
--------
.. sourcecode:: python
import turicreate as tc
targets = tc.SArray([0, 1, 1, 0])
predictions = tc.SArray([0.1, 0.35, 0.7, 0.99])
log_loss = tc.evaluation.log_loss(targets, predictions)
For binary classification, when the target label is of type "string", then
the labels are sorted alphanumerically and the largest label is chosen as
the "positive" label.
.. sourcecode:: python
import turicreate as tc
targets = tc.SArray(["cat", "dog", "dog", "cat"])
predictions = tc.SArray([0.1, 0.35, 0.7, 0.99])
log_loss = tc.evaluation.log_loss(targets, predictions)
In the multi-class setting, log-loss requires a vector of probabilities
(that sum to 1) for each class label in the input dataset. In this example,
there are three classes [0, 1, 2], and the vector of probabilities
correspond to the probability of prediction for each of the three classes.
.. sourcecode:: python
target = tc.SArray([ 1, 0, 2, 1])
predictions = tc.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
log_loss = tc.evaluation.log_loss(targets, predictions)
For multi-class classification, when the target label is of type "string",
then the probability vector is assumed to be a vector of probabilities of
class as sorted alphanumerically. Hence, for the probability vector [0.1,
0.2, 0.7] for a dataset with classes "cat", "dog", and "rat"; the 0.1
corresponds to "cat", the 0.2 to "dog" and the 0.7 to "rat".
.. sourcecode:: python
target = tc.SArray([ "dog", "cat", "foosa", "dog"])
predictions = tc.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
log_loss = tc.evaluation.log_loss(targets, predictions)
If the probability vectors contain predictions for labels not present among
the targets, an explicit index map must be provided.
.. sourcecode:: python
target = tc.SArray([ "dog", "cat", "cat", "dog"])
predictions = tc.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
index_map = {"cat": 0, "dog": 1, "foosa": 2}
log_loss = tc.evaluation.log_loss(targets, predictions, index_map=index_map)
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_prob_and_prob_vector(predictions)
_check_target_not_float(targets)
_check_index_map(index_map)
multiclass = predictions.dtype not in [float, int]
opts = {}
if index_map is not None:
opts['index_map'] = index_map
if multiclass:
result = _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "multiclass_logloss", opts)
else:
result = _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "binary_logloss", opts)
return result | [
"def",
"log_loss",
"(",
"targets",
",",
"predictions",
",",
"index_map",
"=",
"None",
")",
":",
"_supervised_evaluation_error_checking",
"(",
"targets",
",",
"predictions",
")",
"_check_prob_and_prob_vector",
"(",
"predictions",
")",
"_check_target_not_float",
"(",
"targets",
")",
"_check_index_map",
"(",
"index_map",
")",
"multiclass",
"=",
"predictions",
".",
"dtype",
"not",
"in",
"[",
"float",
",",
"int",
"]",
"opts",
"=",
"{",
"}",
"if",
"index_map",
"is",
"not",
"None",
":",
"opts",
"[",
"'index_map'",
"]",
"=",
"index_map",
"if",
"multiclass",
":",
"result",
"=",
"_turicreate",
".",
"extensions",
".",
"_supervised_streaming_evaluator",
"(",
"targets",
",",
"predictions",
",",
"\"multiclass_logloss\"",
",",
"opts",
")",
"else",
":",
"result",
"=",
"_turicreate",
".",
"extensions",
".",
"_supervised_streaming_evaluator",
"(",
"targets",
",",
"predictions",
",",
"\"binary_logloss\"",
",",
"opts",
")",
"return",
"result"
] | r"""
Compute the logloss for the given targets and the given predicted
probabilities. This quantity is defined to be the negative of the sum
of the log probability of each observation, normalized by the number of
observations:
.. math::
\textrm{logloss} = - \frac{1}{N} \sum_{i \in 1,\ldots,N}
(y_i \log(p_i) + (1-y_i)\log(1-p_i)) ,
where y_i is the i'th target value and p_i is the i'th predicted
probability.
For multiclass situations, the definition is a slight generalization of the
above:
.. math::
\textrm{logloss} = - \frac{1}{N} \sum_{i \in 1,\ldots,N}
\sum_{j \in 1, \ldots, L}
(y_{ij} \log(p_{ij})) ,
where :math:`L` is the number of classes and :math:`y_{ij}` indicates that
observation `i` has class label `j`.
Parameters
----------
targets : SArray
Ground truth class labels. This can either contain integers or strings.
predictions : SArray
The predicted probability that corresponds to each target value. For
binary classification, the probability corresponds to the probability
of the "positive" label being predicted. For multi-class
classification, the predictions are expected to be an array of
predictions for each class.
index_map : dict[int], [None (default)]
For binary classification, a dictionary mapping the two target labels to
either 0 (negative) or 1 (positive). For multi-class classification, a
dictionary mapping potential target labels to the associated index into
the vectors in ``predictions``.
Returns
-------
out : float
The log_loss.
See Also
--------
accuracy
Notes
-----
- For binary classification, when the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case. This behavior can be overridden by providing
an explicit ``index_map``.
- For multi-class classification, when the target label is of type
"string", then the probability vector is assumed to be a vector of
probabilities of classes as sorted alphanumerically. Hence, for the
probability vector [0.1, 0.2, 0.7] for a dataset with classes "cat",
"dog", and "rat"; the 0.1 corresponds to "cat", the 0.2 to "dog" and the
0.7 to "rat". This behavior can be overridden by providing an explicit
``index_map``.
- Logloss is undefined when a probability value p = 0, or p = 1. Hence,
probabilities are clipped to max(EPSILON, min(1 - EPSILON, p)) where
EPSILON = 1e-15.
References
----------
https://www.kaggle.com/wiki/LogLoss
Examples
--------
.. sourcecode:: python
import turicreate as tc
targets = tc.SArray([0, 1, 1, 0])
predictions = tc.SArray([0.1, 0.35, 0.7, 0.99])
log_loss = tc.evaluation.log_loss(targets, predictions)
For binary classification, when the target label is of type "string", then
the labels are sorted alphanumerically and the largest label is chosen as
the "positive" label.
.. sourcecode:: python
import turicreate as tc
targets = tc.SArray(["cat", "dog", "dog", "cat"])
predictions = tc.SArray([0.1, 0.35, 0.7, 0.99])
log_loss = tc.evaluation.log_loss(targets, predictions)
In the multi-class setting, log-loss requires a vector of probabilities
(that sum to 1) for each class label in the input dataset. In this example,
there are three classes [0, 1, 2], and the vector of probabilities
correspond to the probability of prediction for each of the three classes.
.. sourcecode:: python
target = tc.SArray([ 1, 0, 2, 1])
predictions = tc.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
log_loss = tc.evaluation.log_loss(targets, predictions)
For multi-class classification, when the target label is of type "string",
then the probability vector is assumed to be a vector of probabilities of
class as sorted alphanumerically. Hence, for the probability vector [0.1,
0.2, 0.7] for a dataset with classes "cat", "dog", and "rat"; the 0.1
corresponds to "cat", the 0.2 to "dog" and the 0.7 to "rat".
.. sourcecode:: python
target = tc.SArray([ "dog", "cat", "foosa", "dog"])
predictions = tc.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
log_loss = tc.evaluation.log_loss(targets, predictions)
If the probability vectors contain predictions for labels not present among
the targets, an explicit index map must be provided.
.. sourcecode:: python
target = tc.SArray([ "dog", "cat", "cat", "dog"])
predictions = tc.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
index_map = {"cat": 0, "dog": 1, "foosa": 2}
log_loss = tc.evaluation.log_loss(targets, predictions, index_map=index_map) | [
"r",
"Compute",
"the",
"logloss",
"for",
"the",
"given",
"targets",
"and",
"the",
"given",
"predicted",
"probabilities",
".",
"This",
"quantity",
"is",
"defined",
"to",
"be",
"the",
"negative",
"of",
"the",
"sum",
"of",
"the",
"log",
"probability",
"of",
"each",
"observation",
"normalized",
"by",
"the",
"number",
"of",
"observations",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L87-L245 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | max_error | def max_error(targets, predictions):
r"""
Compute the maximum absolute deviation between two SArrays.
Parameters
----------
targets : SArray[float or int]
An Sarray of ground truth target values.
predictions : SArray[float or int]
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``.
Returns
-------
out : float
The maximum absolute deviation error between the two SArrays.
See Also
--------
rmse
Notes
-----
The maximum absolute deviation between two vectors, x and y, is defined as:
.. math::
\textrm{max error} = \max_{i \in 1,\ldots,N} \|x_i - y_i\|
Examples
--------
>>> targets = turicreate.SArray([3.14, 0.1, 50, -2.5])
>>> predictions = turicreate.SArray([3.1, 0.5, 50.3, -5])
>>> turicreate.evaluation.max_error(targets, predictions)
2.5
"""
_supervised_evaluation_error_checking(targets, predictions)
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "max_error", {}) | python | def max_error(targets, predictions):
r"""
Compute the maximum absolute deviation between two SArrays.
Parameters
----------
targets : SArray[float or int]
An Sarray of ground truth target values.
predictions : SArray[float or int]
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``.
Returns
-------
out : float
The maximum absolute deviation error between the two SArrays.
See Also
--------
rmse
Notes
-----
The maximum absolute deviation between two vectors, x and y, is defined as:
.. math::
\textrm{max error} = \max_{i \in 1,\ldots,N} \|x_i - y_i\|
Examples
--------
>>> targets = turicreate.SArray([3.14, 0.1, 50, -2.5])
>>> predictions = turicreate.SArray([3.1, 0.5, 50.3, -5])
>>> turicreate.evaluation.max_error(targets, predictions)
2.5
"""
_supervised_evaluation_error_checking(targets, predictions)
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "max_error", {}) | [
"def",
"max_error",
"(",
"targets",
",",
"predictions",
")",
":",
"_supervised_evaluation_error_checking",
"(",
"targets",
",",
"predictions",
")",
"return",
"_turicreate",
".",
"extensions",
".",
"_supervised_streaming_evaluator",
"(",
"targets",
",",
"predictions",
",",
"\"max_error\"",
",",
"{",
"}",
")"
] | r"""
Compute the maximum absolute deviation between two SArrays.
Parameters
----------
targets : SArray[float or int]
An Sarray of ground truth target values.
predictions : SArray[float or int]
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``.
Returns
-------
out : float
The maximum absolute deviation error between the two SArrays.
See Also
--------
rmse
Notes
-----
The maximum absolute deviation between two vectors, x and y, is defined as:
.. math::
\textrm{max error} = \max_{i \in 1,\ldots,N} \|x_i - y_i\|
Examples
--------
>>> targets = turicreate.SArray([3.14, 0.1, 50, -2.5])
>>> predictions = turicreate.SArray([3.1, 0.5, 50.3, -5])
>>> turicreate.evaluation.max_error(targets, predictions)
2.5 | [
"r",
"Compute",
"the",
"maximum",
"absolute",
"deviation",
"between",
"two",
"SArrays",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L248-L288 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | rmse | def rmse(targets, predictions):
r"""
Compute the root mean squared error between two SArrays.
Parameters
----------
targets : SArray[float or int]
An Sarray of ground truth target values.
predictions : SArray[float or int]
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``.
Returns
-------
out : float
The RMSE between the two SArrays.
See Also
--------
max_error
Notes
-----
The root mean squared error between two vectors, x and y, is defined as:
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - y_i)^2}
References
----------
- `Wikipedia - root-mean-square deviation
<http://en.wikipedia.org/wiki/Root-mean-square_deviation>`_
Examples
--------
>>> targets = turicreate.SArray([3.14, 0.1, 50, -2.5])
>>> predictions = turicreate.SArray([3.1, 0.5, 50.3, -5])
>>> turicreate.evaluation.rmse(targets, predictions)
1.2749117616525465
"""
_supervised_evaluation_error_checking(targets, predictions)
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "rmse", {}) | python | def rmse(targets, predictions):
r"""
Compute the root mean squared error between two SArrays.
Parameters
----------
targets : SArray[float or int]
An Sarray of ground truth target values.
predictions : SArray[float or int]
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``.
Returns
-------
out : float
The RMSE between the two SArrays.
See Also
--------
max_error
Notes
-----
The root mean squared error between two vectors, x and y, is defined as:
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - y_i)^2}
References
----------
- `Wikipedia - root-mean-square deviation
<http://en.wikipedia.org/wiki/Root-mean-square_deviation>`_
Examples
--------
>>> targets = turicreate.SArray([3.14, 0.1, 50, -2.5])
>>> predictions = turicreate.SArray([3.1, 0.5, 50.3, -5])
>>> turicreate.evaluation.rmse(targets, predictions)
1.2749117616525465
"""
_supervised_evaluation_error_checking(targets, predictions)
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "rmse", {}) | [
"def",
"rmse",
"(",
"targets",
",",
"predictions",
")",
":",
"_supervised_evaluation_error_checking",
"(",
"targets",
",",
"predictions",
")",
"return",
"_turicreate",
".",
"extensions",
".",
"_supervised_streaming_evaluator",
"(",
"targets",
",",
"predictions",
",",
"\"rmse\"",
",",
"{",
"}",
")"
] | r"""
Compute the root mean squared error between two SArrays.
Parameters
----------
targets : SArray[float or int]
An Sarray of ground truth target values.
predictions : SArray[float or int]
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``.
Returns
-------
out : float
The RMSE between the two SArrays.
See Also
--------
max_error
Notes
-----
The root mean squared error between two vectors, x and y, is defined as:
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - y_i)^2}
References
----------
- `Wikipedia - root-mean-square deviation
<http://en.wikipedia.org/wiki/Root-mean-square_deviation>`_
Examples
--------
>>> targets = turicreate.SArray([3.14, 0.1, 50, -2.5])
>>> predictions = turicreate.SArray([3.1, 0.5, 50.3, -5])
>>> turicreate.evaluation.rmse(targets, predictions)
1.2749117616525465 | [
"r",
"Compute",
"the",
"root",
"mean",
"squared",
"error",
"between",
"two",
"SArrays",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L290-L336 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | confusion_matrix | def confusion_matrix(targets, predictions):
r"""
Compute the confusion matrix for classifier predictions.
Parameters
----------
targets : SArray
Ground truth class labels (cannot be of type float).
predictions : SArray
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``. The predictions
SArray cannot be of type float.
Returns
-------
out : SFrame
An SFrame containing counts for 'target_label', 'predicted_label' and
'count' corresponding to each pair of true and predicted labels.
See Also
--------
accuracy
Examples
--------
>>> targets = turicreate.SArray([0, 1, 1, 0])
>>> predictions = turicreate.SArray([1, 0, 1, 0])
>>> turicreate.evaluation.confusion_matrix(targets, predictions)
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_same_type_not_float(targets, predictions)
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "confusion_matrix_no_map", {}) | python | def confusion_matrix(targets, predictions):
r"""
Compute the confusion matrix for classifier predictions.
Parameters
----------
targets : SArray
Ground truth class labels (cannot be of type float).
predictions : SArray
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``. The predictions
SArray cannot be of type float.
Returns
-------
out : SFrame
An SFrame containing counts for 'target_label', 'predicted_label' and
'count' corresponding to each pair of true and predicted labels.
See Also
--------
accuracy
Examples
--------
>>> targets = turicreate.SArray([0, 1, 1, 0])
>>> predictions = turicreate.SArray([1, 0, 1, 0])
>>> turicreate.evaluation.confusion_matrix(targets, predictions)
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_same_type_not_float(targets, predictions)
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "confusion_matrix_no_map", {}) | [
"def",
"confusion_matrix",
"(",
"targets",
",",
"predictions",
")",
":",
"_supervised_evaluation_error_checking",
"(",
"targets",
",",
"predictions",
")",
"_check_same_type_not_float",
"(",
"targets",
",",
"predictions",
")",
"return",
"_turicreate",
".",
"extensions",
".",
"_supervised_streaming_evaluator",
"(",
"targets",
",",
"predictions",
",",
"\"confusion_matrix_no_map\"",
",",
"{",
"}",
")"
] | r"""
Compute the confusion matrix for classifier predictions.
Parameters
----------
targets : SArray
Ground truth class labels (cannot be of type float).
predictions : SArray
The prediction that corresponds to each target value.
This vector must have the same length as ``targets``. The predictions
SArray cannot be of type float.
Returns
-------
out : SFrame
An SFrame containing counts for 'target_label', 'predicted_label' and
'count' corresponding to each pair of true and predicted labels.
See Also
--------
accuracy
Examples
--------
>>> targets = turicreate.SArray([0, 1, 1, 0])
>>> predictions = turicreate.SArray([1, 0, 1, 0])
>>> turicreate.evaluation.confusion_matrix(targets, predictions) | [
"r",
"Compute",
"the",
"confusion",
"matrix",
"for",
"classifier",
"predictions",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L337-L372 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | accuracy | def accuracy(targets, predictions, average='micro'):
r"""
Compute the accuracy score; which measures the fraction of predictions made
by the classifier that are exactly correct. The score lies in the range [0,1]
with 0 being the worst and 1 being the best.
Parameters
----------
targets : SArray
An SArray of ground truth class labels. Can be of any type except
float.
predictions : SArray
The prediction that corresponds to each target value. This SArray must
have the same length as ``targets`` and must be of the same type as the
``targets`` SArray.
average : string, [None, 'micro' (default), 'macro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
For a more precise definition of `micro` and `macro` averaging refer
to [1] below.
Returns
-------
out : float (for binary classification) or dict[float] (for multi-class, average=None)
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
See Also
--------
confusion_matrix, precision, recall, f1_score, auc, log_loss, roc_curve
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'micro')
0.25
# Macro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'macro')
0.24305555555555558
# Accuracy score for each class.
>>> turicreate.evaluation.accuracy(targets, predictions, average = None)
{0: 0.0, 1: 0.4166666666666667, 2: 0.5555555555555556, 3: 0.0}
This metric also works when the targets are of type `str`
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(["cat", "dog", "foosa", "cat", "dog"])
>>> predictions = turicreate.SArray(["cat", "foosa", "dog", "cat", "foosa"])
# Micro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'micro')
0.4
# Macro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'macro')
0.6
# Accuracy score for each class.
>>> turicreate.evaluation.accuracy(targets, predictions, average = None)
{'cat': 1.0, 'dog': 0.4, 'foosa': 0.4}
References
----------
- [1] Sokolova, Marina, and Guy Lapalme. "A systematic analysis of
performance measures for classification tasks." Information Processing &
Management 45.4 (2009): 427-437.
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_same_type_not_float(targets, predictions)
opts = {"average": average}
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "flexible_accuracy", opts) | python | def accuracy(targets, predictions, average='micro'):
r"""
Compute the accuracy score; which measures the fraction of predictions made
by the classifier that are exactly correct. The score lies in the range [0,1]
with 0 being the worst and 1 being the best.
Parameters
----------
targets : SArray
An SArray of ground truth class labels. Can be of any type except
float.
predictions : SArray
The prediction that corresponds to each target value. This SArray must
have the same length as ``targets`` and must be of the same type as the
``targets`` SArray.
average : string, [None, 'micro' (default), 'macro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
For a more precise definition of `micro` and `macro` averaging refer
to [1] below.
Returns
-------
out : float (for binary classification) or dict[float] (for multi-class, average=None)
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
See Also
--------
confusion_matrix, precision, recall, f1_score, auc, log_loss, roc_curve
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'micro')
0.25
# Macro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'macro')
0.24305555555555558
# Accuracy score for each class.
>>> turicreate.evaluation.accuracy(targets, predictions, average = None)
{0: 0.0, 1: 0.4166666666666667, 2: 0.5555555555555556, 3: 0.0}
This metric also works when the targets are of type `str`
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(["cat", "dog", "foosa", "cat", "dog"])
>>> predictions = turicreate.SArray(["cat", "foosa", "dog", "cat", "foosa"])
# Micro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'micro')
0.4
# Macro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'macro')
0.6
# Accuracy score for each class.
>>> turicreate.evaluation.accuracy(targets, predictions, average = None)
{'cat': 1.0, 'dog': 0.4, 'foosa': 0.4}
References
----------
- [1] Sokolova, Marina, and Guy Lapalme. "A systematic analysis of
performance measures for classification tasks." Information Processing &
Management 45.4 (2009): 427-437.
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_same_type_not_float(targets, predictions)
opts = {"average": average}
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "flexible_accuracy", opts) | [
"def",
"accuracy",
"(",
"targets",
",",
"predictions",
",",
"average",
"=",
"'micro'",
")",
":",
"_supervised_evaluation_error_checking",
"(",
"targets",
",",
"predictions",
")",
"_check_same_type_not_float",
"(",
"targets",
",",
"predictions",
")",
"opts",
"=",
"{",
"\"average\"",
":",
"average",
"}",
"return",
"_turicreate",
".",
"extensions",
".",
"_supervised_streaming_evaluator",
"(",
"targets",
",",
"predictions",
",",
"\"flexible_accuracy\"",
",",
"opts",
")"
] | r"""
Compute the accuracy score; which measures the fraction of predictions made
by the classifier that are exactly correct. The score lies in the range [0,1]
with 0 being the worst and 1 being the best.
Parameters
----------
targets : SArray
An SArray of ground truth class labels. Can be of any type except
float.
predictions : SArray
The prediction that corresponds to each target value. This SArray must
have the same length as ``targets`` and must be of the same type as the
``targets`` SArray.
average : string, [None, 'micro' (default), 'macro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
For a more precise definition of `micro` and `macro` averaging refer
to [1] below.
Returns
-------
out : float (for binary classification) or dict[float] (for multi-class, average=None)
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
See Also
--------
confusion_matrix, precision, recall, f1_score, auc, log_loss, roc_curve
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'micro')
0.25
# Macro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'macro')
0.24305555555555558
# Accuracy score for each class.
>>> turicreate.evaluation.accuracy(targets, predictions, average = None)
{0: 0.0, 1: 0.4166666666666667, 2: 0.5555555555555556, 3: 0.0}
This metric also works when the targets are of type `str`
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(["cat", "dog", "foosa", "cat", "dog"])
>>> predictions = turicreate.SArray(["cat", "foosa", "dog", "cat", "foosa"])
# Micro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'micro')
0.4
# Macro average of the accuracy score.
>>> turicreate.evaluation.accuracy(targets, predictions, average = 'macro')
0.6
# Accuracy score for each class.
>>> turicreate.evaluation.accuracy(targets, predictions, average = None)
{'cat': 1.0, 'dog': 0.4, 'foosa': 0.4}
References
----------
- [1] Sokolova, Marina, and Guy Lapalme. "A systematic analysis of
performance measures for classification tasks." Information Processing &
Management 45.4 (2009): 427-437. | [
"r",
"Compute",
"the",
"accuracy",
"score",
";",
"which",
"measures",
"the",
"fraction",
"of",
"predictions",
"made",
"by",
"the",
"classifier",
"that",
"are",
"exactly",
"correct",
".",
"The",
"score",
"lies",
"in",
"the",
"range",
"[",
"0",
"1",
"]",
"with",
"0",
"being",
"the",
"worst",
"and",
"1",
"being",
"the",
"best",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L374-L471 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | fbeta_score | def fbeta_score(targets, predictions, beta=1.0, average='macro'):
r"""
Compute the F-beta score. The F-beta score is the weighted harmonic mean of
precision and recall. The score lies in the range [0,1] with 1 being ideal
and 0 being the worst.
The `beta` value is the weight given to `precision` vs `recall` in the
combined score. `beta=0` considers only precision, as `beta` increases, more
weight is given to recall with `beta > 1` favoring recall over precision.
The F-beta score is defined as:
.. math::
f_{\beta} = (1 + \beta^2) \times \frac{(p \times r)}{(\beta^2 p + r)}
Where :math:`p` is the precision and :math:`r` is the recall.
Parameters
----------
targets : SArray
An SArray of ground truth class labels. Can be of any type except
float.
predictions : SArray
The prediction that corresponds to each target value. This SArray must
have the same length as ``targets`` and must be of the same type
as the ``targets`` SArray.
beta: float
Weight of the `precision` term in the harmonic mean.
average : string, [None, 'macro' (default), 'micro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
For a more precise definition of `micro` and `macro` averaging refer
to [1] below.
Returns
-------
out : float (for binary classification) or dict[float] (for multi-class, average=None)
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
Notes
-----
- For binary classification, if the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case.
See Also
--------
confusion_matrix, accuracy, precision, recall, f1_score
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'micro')
0.25
# Macro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'macro')
0.24305555555555558
# F-Beta score for each class.
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = None)
{0: 0.0, 1: 0.4166666666666667, 2: 0.5555555555555556, 3: 0.0}
This metric also works when the targets are of type `str`
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(
... ["cat", "dog", "foosa", "snake", "cat", "dog", "foosa", "snake"])
>>> predictions = turicreate.SArray(
... ["dog", "cat", "foosa", "dog", "snake", "dog", "cat", "dog"])
# Micro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'micro')
0.25
# Macro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'macro')
0.24305555555555558
# F-Beta score for each class.
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = None)
{'cat': 0.0, 'dog': 0.4166666666666667, 'foosa': 0.5555555555555556, 'snake': 0.0}
References
----------
- [1] Sokolova, Marina, and Guy Lapalme. "A systematic analysis of
performance measures for classification tasks." Information Processing &
Management 45.4 (2009): 427-437.
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_categorical_option_type('average', average,
['micro', 'macro', None])
_check_same_type_not_float(targets, predictions)
opts = {"beta" : beta,
"average" : average}
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "fbeta_score", opts) | python | def fbeta_score(targets, predictions, beta=1.0, average='macro'):
r"""
Compute the F-beta score. The F-beta score is the weighted harmonic mean of
precision and recall. The score lies in the range [0,1] with 1 being ideal
and 0 being the worst.
The `beta` value is the weight given to `precision` vs `recall` in the
combined score. `beta=0` considers only precision, as `beta` increases, more
weight is given to recall with `beta > 1` favoring recall over precision.
The F-beta score is defined as:
.. math::
f_{\beta} = (1 + \beta^2) \times \frac{(p \times r)}{(\beta^2 p + r)}
Where :math:`p` is the precision and :math:`r` is the recall.
Parameters
----------
targets : SArray
An SArray of ground truth class labels. Can be of any type except
float.
predictions : SArray
The prediction that corresponds to each target value. This SArray must
have the same length as ``targets`` and must be of the same type
as the ``targets`` SArray.
beta: float
Weight of the `precision` term in the harmonic mean.
average : string, [None, 'macro' (default), 'micro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
For a more precise definition of `micro` and `macro` averaging refer
to [1] below.
Returns
-------
out : float (for binary classification) or dict[float] (for multi-class, average=None)
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
Notes
-----
- For binary classification, if the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case.
See Also
--------
confusion_matrix, accuracy, precision, recall, f1_score
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'micro')
0.25
# Macro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'macro')
0.24305555555555558
# F-Beta score for each class.
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = None)
{0: 0.0, 1: 0.4166666666666667, 2: 0.5555555555555556, 3: 0.0}
This metric also works when the targets are of type `str`
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(
... ["cat", "dog", "foosa", "snake", "cat", "dog", "foosa", "snake"])
>>> predictions = turicreate.SArray(
... ["dog", "cat", "foosa", "dog", "snake", "dog", "cat", "dog"])
# Micro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'micro')
0.25
# Macro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'macro')
0.24305555555555558
# F-Beta score for each class.
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = None)
{'cat': 0.0, 'dog': 0.4166666666666667, 'foosa': 0.5555555555555556, 'snake': 0.0}
References
----------
- [1] Sokolova, Marina, and Guy Lapalme. "A systematic analysis of
performance measures for classification tasks." Information Processing &
Management 45.4 (2009): 427-437.
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_categorical_option_type('average', average,
['micro', 'macro', None])
_check_same_type_not_float(targets, predictions)
opts = {"beta" : beta,
"average" : average}
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "fbeta_score", opts) | [
"def",
"fbeta_score",
"(",
"targets",
",",
"predictions",
",",
"beta",
"=",
"1.0",
",",
"average",
"=",
"'macro'",
")",
":",
"_supervised_evaluation_error_checking",
"(",
"targets",
",",
"predictions",
")",
"_check_categorical_option_type",
"(",
"'average'",
",",
"average",
",",
"[",
"'micro'",
",",
"'macro'",
",",
"None",
"]",
")",
"_check_same_type_not_float",
"(",
"targets",
",",
"predictions",
")",
"opts",
"=",
"{",
"\"beta\"",
":",
"beta",
",",
"\"average\"",
":",
"average",
"}",
"return",
"_turicreate",
".",
"extensions",
".",
"_supervised_streaming_evaluator",
"(",
"targets",
",",
"predictions",
",",
"\"fbeta_score\"",
",",
"opts",
")"
] | r"""
Compute the F-beta score. The F-beta score is the weighted harmonic mean of
precision and recall. The score lies in the range [0,1] with 1 being ideal
and 0 being the worst.
The `beta` value is the weight given to `precision` vs `recall` in the
combined score. `beta=0` considers only precision, as `beta` increases, more
weight is given to recall with `beta > 1` favoring recall over precision.
The F-beta score is defined as:
.. math::
f_{\beta} = (1 + \beta^2) \times \frac{(p \times r)}{(\beta^2 p + r)}
Where :math:`p` is the precision and :math:`r` is the recall.
Parameters
----------
targets : SArray
An SArray of ground truth class labels. Can be of any type except
float.
predictions : SArray
The prediction that corresponds to each target value. This SArray must
have the same length as ``targets`` and must be of the same type
as the ``targets`` SArray.
beta: float
Weight of the `precision` term in the harmonic mean.
average : string, [None, 'macro' (default), 'micro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
For a more precise definition of `micro` and `macro` averaging refer
to [1] below.
Returns
-------
out : float (for binary classification) or dict[float] (for multi-class, average=None)
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
Notes
-----
- For binary classification, if the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case.
See Also
--------
confusion_matrix, accuracy, precision, recall, f1_score
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'micro')
0.25
# Macro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'macro')
0.24305555555555558
# F-Beta score for each class.
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = None)
{0: 0.0, 1: 0.4166666666666667, 2: 0.5555555555555556, 3: 0.0}
This metric also works when the targets are of type `str`
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(
... ["cat", "dog", "foosa", "snake", "cat", "dog", "foosa", "snake"])
>>> predictions = turicreate.SArray(
... ["dog", "cat", "foosa", "dog", "snake", "dog", "cat", "dog"])
# Micro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'micro')
0.25
# Macro average of the F-Beta score
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = 'macro')
0.24305555555555558
# F-Beta score for each class.
>>> turicreate.evaluation.fbeta_score(targets, predictions,
... beta=2.0, average = None)
{'cat': 0.0, 'dog': 0.4166666666666667, 'foosa': 0.5555555555555556, 'snake': 0.0}
References
----------
- [1] Sokolova, Marina, and Guy Lapalme. "A systematic analysis of
performance measures for classification tasks." Information Processing &
Management 45.4 (2009): 427-437. | [
"r",
"Compute",
"the",
"F",
"-",
"beta",
"score",
".",
"The",
"F",
"-",
"beta",
"score",
"is",
"the",
"weighted",
"harmonic",
"mean",
"of",
"precision",
"and",
"recall",
".",
"The",
"score",
"lies",
"in",
"the",
"range",
"[",
"0",
"1",
"]",
"with",
"1",
"being",
"ideal",
"and",
"0",
"being",
"the",
"worst",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L474-L605 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | f1_score | def f1_score(targets, predictions, average='macro'):
r"""
Compute the F1 score (sometimes known as the balanced F-score or
F-measure). The F1 score is commonly interpreted as the average of
precision and recall. The score lies in the range [0,1] with 1 being ideal
and 0 being the worst.
The F1 score is defined as:
.. math::
f_{1} = \frac{2 \times p \times r}{p + r}
Where :math:`p` is the precision and :math:`r` is the recall.
Parameters
----------
targets : SArray
An SArray of ground truth class labels. Can be of any type except
float.
predictions : SArray
The class prediction that corresponds to each target value. This SArray
must have the same length as ``targets`` and must be of the same type
as the ``targets`` SArray.
average : string, [None, 'macro' (default), 'micro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
For a more precise definition of `micro` and `macro` averaging refer
to [1] below.
Returns
-------
out : float (for binary classification) or dict[float] (multi-class, average=None)
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
See Also
--------
confusion_matrix, accuracy, precision, recall, fbeta_score
Notes
-----
- For binary classification, when the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case.
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'micro')
0.25
# Macro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'macro')
0.25
# F-1 score for each class.
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = None)
{0: 0.0, 1: 0.4166666666666667, 2: 0.5555555555555556, 3: 0.0}
This metric also works for string classes.
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(
... ["cat", "dog", "foosa", "snake", "cat", "dog", "foosa", "snake"])
>>> predictions = turicreate.SArray(
... ["dog", "cat", "foosa", "dog", "snake", "dog", "cat", "dog"])
# Micro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'micro')
0.25
# Macro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'macro')
0.25
# F-1 score for each class.
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = None)
{'cat': 0.0, 'dog': 0.4166666666666667, 'foosa': 0.5555555555555556, 'snake': 0.0}
References
----------
- [1] Sokolova, Marina, and Guy Lapalme. "A systematic analysis of
performance measures for classification tasks." Information Processing &
Management 45.4 (2009): 427-437.
"""
return fbeta_score(targets, predictions, beta = 1.0, average = average) | python | def f1_score(targets, predictions, average='macro'):
r"""
Compute the F1 score (sometimes known as the balanced F-score or
F-measure). The F1 score is commonly interpreted as the average of
precision and recall. The score lies in the range [0,1] with 1 being ideal
and 0 being the worst.
The F1 score is defined as:
.. math::
f_{1} = \frac{2 \times p \times r}{p + r}
Where :math:`p` is the precision and :math:`r` is the recall.
Parameters
----------
targets : SArray
An SArray of ground truth class labels. Can be of any type except
float.
predictions : SArray
The class prediction that corresponds to each target value. This SArray
must have the same length as ``targets`` and must be of the same type
as the ``targets`` SArray.
average : string, [None, 'macro' (default), 'micro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
For a more precise definition of `micro` and `macro` averaging refer
to [1] below.
Returns
-------
out : float (for binary classification) or dict[float] (multi-class, average=None)
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
See Also
--------
confusion_matrix, accuracy, precision, recall, fbeta_score
Notes
-----
- For binary classification, when the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case.
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'micro')
0.25
# Macro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'macro')
0.25
# F-1 score for each class.
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = None)
{0: 0.0, 1: 0.4166666666666667, 2: 0.5555555555555556, 3: 0.0}
This metric also works for string classes.
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(
... ["cat", "dog", "foosa", "snake", "cat", "dog", "foosa", "snake"])
>>> predictions = turicreate.SArray(
... ["dog", "cat", "foosa", "dog", "snake", "dog", "cat", "dog"])
# Micro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'micro')
0.25
# Macro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'macro')
0.25
# F-1 score for each class.
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = None)
{'cat': 0.0, 'dog': 0.4166666666666667, 'foosa': 0.5555555555555556, 'snake': 0.0}
References
----------
- [1] Sokolova, Marina, and Guy Lapalme. "A systematic analysis of
performance measures for classification tasks." Information Processing &
Management 45.4 (2009): 427-437.
"""
return fbeta_score(targets, predictions, beta = 1.0, average = average) | [
"def",
"f1_score",
"(",
"targets",
",",
"predictions",
",",
"average",
"=",
"'macro'",
")",
":",
"return",
"fbeta_score",
"(",
"targets",
",",
"predictions",
",",
"beta",
"=",
"1.0",
",",
"average",
"=",
"average",
")"
] | r"""
Compute the F1 score (sometimes known as the balanced F-score or
F-measure). The F1 score is commonly interpreted as the average of
precision and recall. The score lies in the range [0,1] with 1 being ideal
and 0 being the worst.
The F1 score is defined as:
.. math::
f_{1} = \frac{2 \times p \times r}{p + r}
Where :math:`p` is the precision and :math:`r` is the recall.
Parameters
----------
targets : SArray
An SArray of ground truth class labels. Can be of any type except
float.
predictions : SArray
The class prediction that corresponds to each target value. This SArray
must have the same length as ``targets`` and must be of the same type
as the ``targets`` SArray.
average : string, [None, 'macro' (default), 'micro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
For a more precise definition of `micro` and `macro` averaging refer
to [1] below.
Returns
-------
out : float (for binary classification) or dict[float] (multi-class, average=None)
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
See Also
--------
confusion_matrix, accuracy, precision, recall, fbeta_score
Notes
-----
- For binary classification, when the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case.
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'micro')
0.25
# Macro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'macro')
0.25
# F-1 score for each class.
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = None)
{0: 0.0, 1: 0.4166666666666667, 2: 0.5555555555555556, 3: 0.0}
This metric also works for string classes.
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(
... ["cat", "dog", "foosa", "snake", "cat", "dog", "foosa", "snake"])
>>> predictions = turicreate.SArray(
... ["dog", "cat", "foosa", "dog", "snake", "dog", "cat", "dog"])
# Micro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'micro')
0.25
# Macro average of the F-1 score
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = 'macro')
0.25
# F-1 score for each class.
>>> turicreate.evaluation.f1_score(targets, predictions,
... average = None)
{'cat': 0.0, 'dog': 0.4166666666666667, 'foosa': 0.5555555555555556, 'snake': 0.0}
References
----------
- [1] Sokolova, Marina, and Guy Lapalme. "A systematic analysis of
performance measures for classification tasks." Information Processing &
Management 45.4 (2009): 427-437. | [
"r",
"Compute",
"the",
"F1",
"score",
"(",
"sometimes",
"known",
"as",
"the",
"balanced",
"F",
"-",
"score",
"or",
"F",
"-",
"measure",
")",
".",
"The",
"F1",
"score",
"is",
"commonly",
"interpreted",
"as",
"the",
"average",
"of",
"precision",
"and",
"recall",
".",
"The",
"score",
"lies",
"in",
"the",
"range",
"[",
"0",
"1",
"]",
"with",
"1",
"being",
"ideal",
"and",
"0",
"being",
"the",
"worst",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L607-L723 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | precision | def precision(targets, predictions, average='macro'):
r"""
Compute the precision score for classification tasks. The precision score
quantifies the ability of a classifier to not label a `negative` example as
`positive`. The precision score can be interpreted as the probability that
a `positive` prediction made by the classifier is `positive`. The score is
in the range [0,1] with 0 being the worst, and 1 being perfect.
The precision score is defined as the ratio:
.. math::
\frac{tp}{tp + fp}
where `tp` is the number of true positives and `fp` the number of false
positives.
Parameters
----------
targets : SArray
Ground truth class labels.
predictions : SArray
The prediction that corresponds to each target value. This SArray must
have the same length as ``targets`` and must be of the same type
as the ``targets`` SArray.
average : string, [None, 'macro' (default), 'micro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
Returns
-------
out : float (for binary classification) or dict[float]
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
Notes
-----
- For binary classification, when the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case.
See Also
--------
confusion_matrix, accuracy, recall, f1_score
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'micro')
0.25
# Macro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'macro')
0.3125
# Precision score for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = None)
{0: 0.0, 1: 0.25, 2: 1.0, 3: 0.0}
This metric also works for string classes.
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(
... ["cat", "dog", "foosa", "snake", "cat", "dog", "foosa", "snake"])
>>> predictions = turicreate.SArray(
... ["dog", "cat", "foosa", "dog", "snake", "dog", "cat", "dog"])
# Micro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'micro')
0.25
# Macro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'macro')
0.3125
# Precision score for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = None)
{0: 0.0, 1: 0.25, 2: 1.0, 3: 0.0}
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_categorical_option_type('average', average,
['micro', 'macro', None])
_check_same_type_not_float(targets, predictions)
opts = {"average": average}
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "precision", opts) | python | def precision(targets, predictions, average='macro'):
r"""
Compute the precision score for classification tasks. The precision score
quantifies the ability of a classifier to not label a `negative` example as
`positive`. The precision score can be interpreted as the probability that
a `positive` prediction made by the classifier is `positive`. The score is
in the range [0,1] with 0 being the worst, and 1 being perfect.
The precision score is defined as the ratio:
.. math::
\frac{tp}{tp + fp}
where `tp` is the number of true positives and `fp` the number of false
positives.
Parameters
----------
targets : SArray
Ground truth class labels.
predictions : SArray
The prediction that corresponds to each target value. This SArray must
have the same length as ``targets`` and must be of the same type
as the ``targets`` SArray.
average : string, [None, 'macro' (default), 'micro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
Returns
-------
out : float (for binary classification) or dict[float]
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
Notes
-----
- For binary classification, when the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case.
See Also
--------
confusion_matrix, accuracy, recall, f1_score
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'micro')
0.25
# Macro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'macro')
0.3125
# Precision score for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = None)
{0: 0.0, 1: 0.25, 2: 1.0, 3: 0.0}
This metric also works for string classes.
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(
... ["cat", "dog", "foosa", "snake", "cat", "dog", "foosa", "snake"])
>>> predictions = turicreate.SArray(
... ["dog", "cat", "foosa", "dog", "snake", "dog", "cat", "dog"])
# Micro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'micro')
0.25
# Macro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'macro')
0.3125
# Precision score for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = None)
{0: 0.0, 1: 0.25, 2: 1.0, 3: 0.0}
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_categorical_option_type('average', average,
['micro', 'macro', None])
_check_same_type_not_float(targets, predictions)
opts = {"average": average}
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "precision", opts) | [
"def",
"precision",
"(",
"targets",
",",
"predictions",
",",
"average",
"=",
"'macro'",
")",
":",
"_supervised_evaluation_error_checking",
"(",
"targets",
",",
"predictions",
")",
"_check_categorical_option_type",
"(",
"'average'",
",",
"average",
",",
"[",
"'micro'",
",",
"'macro'",
",",
"None",
"]",
")",
"_check_same_type_not_float",
"(",
"targets",
",",
"predictions",
")",
"opts",
"=",
"{",
"\"average\"",
":",
"average",
"}",
"return",
"_turicreate",
".",
"extensions",
".",
"_supervised_streaming_evaluator",
"(",
"targets",
",",
"predictions",
",",
"\"precision\"",
",",
"opts",
")"
] | r"""
Compute the precision score for classification tasks. The precision score
quantifies the ability of a classifier to not label a `negative` example as
`positive`. The precision score can be interpreted as the probability that
a `positive` prediction made by the classifier is `positive`. The score is
in the range [0,1] with 0 being the worst, and 1 being perfect.
The precision score is defined as the ratio:
.. math::
\frac{tp}{tp + fp}
where `tp` is the number of true positives and `fp` the number of false
positives.
Parameters
----------
targets : SArray
Ground truth class labels.
predictions : SArray
The prediction that corresponds to each target value. This SArray must
have the same length as ``targets`` and must be of the same type
as the ``targets`` SArray.
average : string, [None, 'macro' (default), 'micro']
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'micro': Calculate metrics globally by counting the total true
positives, and false positives.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
Returns
-------
out : float (for binary classification) or dict[float]
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
Notes
-----
- For binary classification, when the target label is of type "string",
then the labels are sorted alphanumerically and the largest label is
chosen as the "positive" label. For example, if the classifier labels
are {"cat", "dog"}, then "dog" is chosen as the positive label for the
binary classification case.
See Also
--------
confusion_matrix, accuracy, recall, f1_score
Examples
--------
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([0, 1, 2, 3, 0, 1, 2, 3])
>>> predictions = turicreate.SArray([1, 0, 2, 1, 3, 1, 0, 1])
# Micro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'micro')
0.25
# Macro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'macro')
0.3125
# Precision score for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = None)
{0: 0.0, 1: 0.25, 2: 1.0, 3: 0.0}
This metric also works for string classes.
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray(
... ["cat", "dog", "foosa", "snake", "cat", "dog", "foosa", "snake"])
>>> predictions = turicreate.SArray(
... ["dog", "cat", "foosa", "dog", "snake", "dog", "cat", "dog"])
# Micro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'micro')
0.25
# Macro average of the precision scores for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = 'macro')
0.3125
# Precision score for each class.
>>> turicreate.evaluation.precision(targets, predictions,
... average = None)
{0: 0.0, 1: 0.25, 2: 1.0, 3: 0.0} | [
"r"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L725-L839 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/evaluation.py | auc | def auc(targets, predictions, average='macro', index_map=None):
r"""
Compute the area under the ROC curve for the given targets and predictions.
Parameters
----------
targets : SArray
An SArray containing the observed values. For binary classification,
the alpha-numerically first category is considered the reference
category.
predictions : SArray
Prediction probability that corresponds to each target value. This must
be of same length as ``targets``.
average : string, [None, 'macro' (default)]
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
index_map : dict[int], [None (default)]
For binary classification, a dictionary mapping the two target labels to
either 0 (negative) or 1 (positive). For multi-class classification, a
dictionary mapping potential target labels to the associated index into
the vectors in ``predictions``.
Returns
-------
out : float (for binary classification) or dict[float]
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
See Also
--------
roc_curve, confusion_matrix
Examples
--------
.. sourcecode:: python
>>> targets = turicreate.SArray([0, 1, 1, 0])
>>> predictions = turicreate.SArray([0.1, 0.35, 0.7, 0.99])
# Calculate the auc-score
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.5
This metric also works when the targets are strings (Here "cat" is chosen
as the reference class).
.. sourcecode:: python
>>> targets = turicreate.SArray(["cat", "dog", "dog", "cat"])
>>> predictions = turicreate.SArray([0.1, 0.35, 0.7, 0.99])
# Calculate the auc-score
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.5
For the multi-class setting, the auc-score can be averaged.
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([ 1, 0, 2, 1])
>>> predictions = turicreate.SArray([[.1, .8, 0.1],
... [.9, .1, 0.0],
... [.8, .1, 0.1],
... [.3, .6, 0.1]])
# Macro average of the scores for each class.
>>> turicreate.evaluation.auc(targets, predictions, average = 'macro')
0.8888888888888888
# Scores for each class.
>>> turicreate.evaluation.auc(targets, predictions, average = None)
{0: 1.0, 1: 1.0, 2: 0.6666666666666666}
This metric also works for "string" targets in the multi-class setting
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([ "dog", "cat", "foosa", "dog"])
>>> predictions = turicreate.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
# Macro average.
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.8888888888888888
# Score for each class.
>>> auc = turicreate.evaluation.auc(targets, predictions, average=None)
{'cat': 1.0, 'dog': 1.0, 'foosa': 0.6666666666666666}
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_categorical_option_type('average', average,
['macro', None])
_check_prob_and_prob_vector(predictions)
_check_target_not_float(targets)
_check_index_map(index_map)
opts = {"average": average,
"binary": predictions.dtype in [int, float]}
if index_map is not None:
opts['index_map'] = index_map
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "auc", opts) | python | def auc(targets, predictions, average='macro', index_map=None):
r"""
Compute the area under the ROC curve for the given targets and predictions.
Parameters
----------
targets : SArray
An SArray containing the observed values. For binary classification,
the alpha-numerically first category is considered the reference
category.
predictions : SArray
Prediction probability that corresponds to each target value. This must
be of same length as ``targets``.
average : string, [None, 'macro' (default)]
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
index_map : dict[int], [None (default)]
For binary classification, a dictionary mapping the two target labels to
either 0 (negative) or 1 (positive). For multi-class classification, a
dictionary mapping potential target labels to the associated index into
the vectors in ``predictions``.
Returns
-------
out : float (for binary classification) or dict[float]
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
See Also
--------
roc_curve, confusion_matrix
Examples
--------
.. sourcecode:: python
>>> targets = turicreate.SArray([0, 1, 1, 0])
>>> predictions = turicreate.SArray([0.1, 0.35, 0.7, 0.99])
# Calculate the auc-score
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.5
This metric also works when the targets are strings (Here "cat" is chosen
as the reference class).
.. sourcecode:: python
>>> targets = turicreate.SArray(["cat", "dog", "dog", "cat"])
>>> predictions = turicreate.SArray([0.1, 0.35, 0.7, 0.99])
# Calculate the auc-score
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.5
For the multi-class setting, the auc-score can be averaged.
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([ 1, 0, 2, 1])
>>> predictions = turicreate.SArray([[.1, .8, 0.1],
... [.9, .1, 0.0],
... [.8, .1, 0.1],
... [.3, .6, 0.1]])
# Macro average of the scores for each class.
>>> turicreate.evaluation.auc(targets, predictions, average = 'macro')
0.8888888888888888
# Scores for each class.
>>> turicreate.evaluation.auc(targets, predictions, average = None)
{0: 1.0, 1: 1.0, 2: 0.6666666666666666}
This metric also works for "string" targets in the multi-class setting
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([ "dog", "cat", "foosa", "dog"])
>>> predictions = turicreate.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
# Macro average.
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.8888888888888888
# Score for each class.
>>> auc = turicreate.evaluation.auc(targets, predictions, average=None)
{'cat': 1.0, 'dog': 1.0, 'foosa': 0.6666666666666666}
"""
_supervised_evaluation_error_checking(targets, predictions)
_check_categorical_option_type('average', average,
['macro', None])
_check_prob_and_prob_vector(predictions)
_check_target_not_float(targets)
_check_index_map(index_map)
opts = {"average": average,
"binary": predictions.dtype in [int, float]}
if index_map is not None:
opts['index_map'] = index_map
return _turicreate.extensions._supervised_streaming_evaluator(targets,
predictions, "auc", opts) | [
"def",
"auc",
"(",
"targets",
",",
"predictions",
",",
"average",
"=",
"'macro'",
",",
"index_map",
"=",
"None",
")",
":",
"_supervised_evaluation_error_checking",
"(",
"targets",
",",
"predictions",
")",
"_check_categorical_option_type",
"(",
"'average'",
",",
"average",
",",
"[",
"'macro'",
",",
"None",
"]",
")",
"_check_prob_and_prob_vector",
"(",
"predictions",
")",
"_check_target_not_float",
"(",
"targets",
")",
"_check_index_map",
"(",
"index_map",
")",
"opts",
"=",
"{",
"\"average\"",
":",
"average",
",",
"\"binary\"",
":",
"predictions",
".",
"dtype",
"in",
"[",
"int",
",",
"float",
"]",
"}",
"if",
"index_map",
"is",
"not",
"None",
":",
"opts",
"[",
"'index_map'",
"]",
"=",
"index_map",
"return",
"_turicreate",
".",
"extensions",
".",
"_supervised_streaming_evaluator",
"(",
"targets",
",",
"predictions",
",",
"\"auc\"",
",",
"opts",
")"
] | r"""
Compute the area under the ROC curve for the given targets and predictions.
Parameters
----------
targets : SArray
An SArray containing the observed values. For binary classification,
the alpha-numerically first category is considered the reference
category.
predictions : SArray
Prediction probability that corresponds to each target value. This must
be of same length as ``targets``.
average : string, [None, 'macro' (default)]
Metric averaging strategies for multiclass classification. Averaging
strategies can be one of the following:
- None: No averaging is performed and a single metric is returned
for each class.
- 'macro': Calculate metrics for each label, and find their
unweighted mean. This does not take label imbalance into account.
index_map : dict[int], [None (default)]
For binary classification, a dictionary mapping the two target labels to
either 0 (negative) or 1 (positive). For multi-class classification, a
dictionary mapping potential target labels to the associated index into
the vectors in ``predictions``.
Returns
-------
out : float (for binary classification) or dict[float]
Score for the positive class (for binary classification) or an average
score for each class for multi-class classification. If
`average=None`, then a dictionary is returned where the key is the
class label and the value is the score for the corresponding class
label.
See Also
--------
roc_curve, confusion_matrix
Examples
--------
.. sourcecode:: python
>>> targets = turicreate.SArray([0, 1, 1, 0])
>>> predictions = turicreate.SArray([0.1, 0.35, 0.7, 0.99])
# Calculate the auc-score
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.5
This metric also works when the targets are strings (Here "cat" is chosen
as the reference class).
.. sourcecode:: python
>>> targets = turicreate.SArray(["cat", "dog", "dog", "cat"])
>>> predictions = turicreate.SArray([0.1, 0.35, 0.7, 0.99])
# Calculate the auc-score
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.5
For the multi-class setting, the auc-score can be averaged.
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([ 1, 0, 2, 1])
>>> predictions = turicreate.SArray([[.1, .8, 0.1],
... [.9, .1, 0.0],
... [.8, .1, 0.1],
... [.3, .6, 0.1]])
# Macro average of the scores for each class.
>>> turicreate.evaluation.auc(targets, predictions, average = 'macro')
0.8888888888888888
# Scores for each class.
>>> turicreate.evaluation.auc(targets, predictions, average = None)
{0: 1.0, 1: 1.0, 2: 0.6666666666666666}
This metric also works for "string" targets in the multi-class setting
.. sourcecode:: python
# Targets and Predictions
>>> targets = turicreate.SArray([ "dog", "cat", "foosa", "dog"])
>>> predictions = turicreate.SArray([[.1, .8, 0.1],
[.9, .1, 0.0],
[.8, .1, 0.1],
[.3, .6, 0.1]])
# Macro average.
>>> auc = turicreate.evaluation.auc(targets, predictions)
0.8888888888888888
# Score for each class.
>>> auc = turicreate.evaluation.auc(targets, predictions, average=None)
{'cat': 1.0, 'dog': 1.0, 'foosa': 0.6666666666666666} | [
"r",
"Compute",
"the",
"area",
"under",
"the",
"ROC",
"curve",
"for",
"the",
"given",
"targets",
"and",
"predictions",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/evaluation.py#L1150-L1269 | train |
apple/turicreate | deps/src/boost_1_68_0/status/boost_check_library.py | check_library.get_library_meta | def get_library_meta(self):
'''
Fetches the meta data for the current library. The data could be in
the superlib meta data file. If we can't find the data None is returned.
'''
parent_dir = os.path.dirname(self.library_dir)
if self.test_file_exists(os.path.join(self.library_dir,'meta'),['libraries.json']):
with open(os.path.join(self.library_dir,'meta','libraries.json'),'r') as f:
meta_data = json.load(f)
if isinstance(meta_data,list):
for lib in meta_data:
if lib['key'] == self.library_key:
return lib
elif 'key' in meta_data and meta_data['key'] == self.library_key:
return meta_data
if not self.test_dir_exists(os.path.join(self.library_dir,'meta')) \
and self.test_file_exists(os.path.join(parent_dir,'meta'),['libraries.json']):
with open(os.path.join(parent_dir,'meta','libraries.json'),'r') as f:
libraries_json = json.load(f)
if isinstance(libraries_json,list):
for lib in libraries_json:
if lib['key'] == self.library_key:
return lib
return None | python | def get_library_meta(self):
'''
Fetches the meta data for the current library. The data could be in
the superlib meta data file. If we can't find the data None is returned.
'''
parent_dir = os.path.dirname(self.library_dir)
if self.test_file_exists(os.path.join(self.library_dir,'meta'),['libraries.json']):
with open(os.path.join(self.library_dir,'meta','libraries.json'),'r') as f:
meta_data = json.load(f)
if isinstance(meta_data,list):
for lib in meta_data:
if lib['key'] == self.library_key:
return lib
elif 'key' in meta_data and meta_data['key'] == self.library_key:
return meta_data
if not self.test_dir_exists(os.path.join(self.library_dir,'meta')) \
and self.test_file_exists(os.path.join(parent_dir,'meta'),['libraries.json']):
with open(os.path.join(parent_dir,'meta','libraries.json'),'r') as f:
libraries_json = json.load(f)
if isinstance(libraries_json,list):
for lib in libraries_json:
if lib['key'] == self.library_key:
return lib
return None | [
"def",
"get_library_meta",
"(",
"self",
")",
":",
"parent_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"library_dir",
")",
"if",
"self",
".",
"test_file_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"library_dir",
",",
"'meta'",
")",
",",
"[",
"'libraries.json'",
"]",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"library_dir",
",",
"'meta'",
",",
"'libraries.json'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"meta_data",
"=",
"json",
".",
"load",
"(",
"f",
")",
"if",
"isinstance",
"(",
"meta_data",
",",
"list",
")",
":",
"for",
"lib",
"in",
"meta_data",
":",
"if",
"lib",
"[",
"'key'",
"]",
"==",
"self",
".",
"library_key",
":",
"return",
"lib",
"elif",
"'key'",
"in",
"meta_data",
"and",
"meta_data",
"[",
"'key'",
"]",
"==",
"self",
".",
"library_key",
":",
"return",
"meta_data",
"if",
"not",
"self",
".",
"test_dir_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"library_dir",
",",
"'meta'",
")",
")",
"and",
"self",
".",
"test_file_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"'meta'",
")",
",",
"[",
"'libraries.json'",
"]",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"'meta'",
",",
"'libraries.json'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"libraries_json",
"=",
"json",
".",
"load",
"(",
"f",
")",
"if",
"isinstance",
"(",
"libraries_json",
",",
"list",
")",
":",
"for",
"lib",
"in",
"libraries_json",
":",
"if",
"lib",
"[",
"'key'",
"]",
"==",
"self",
".",
"library_key",
":",
"return",
"lib",
"return",
"None"
] | Fetches the meta data for the current library. The data could be in
the superlib meta data file. If we can't find the data None is returned. | [
"Fetches",
"the",
"meta",
"data",
"for",
"the",
"current",
"library",
".",
"The",
"data",
"could",
"be",
"in",
"the",
"superlib",
"meta",
"data",
"file",
".",
"If",
"we",
"can",
"t",
"find",
"the",
"data",
"None",
"is",
"returned",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/status/boost_check_library.py#L182-L205 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/xgboost/_tree.py | convert | def convert(model, feature_names = None, target = 'target', force_32bit_float = True):
"""
Convert a trained XGBoost model to Core ML format.
Parameters
----------
decision_tree : Booster
A trained XGboost tree model.
feature_names: [str] | str
Names of input features that will be exposed in the Core ML model
interface.
Can be set to one of the following:
- None for using the feature names from the model.
- List of names of the input features that should be exposed in the
interface to the Core ML model. These input features are in the same
order as the XGboost model.
target: str
Name of the output feature name exposed to the Core ML model.
force_32bit_float: bool
If True, then the resulting CoreML model will use 32 bit floats internally.
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.xgboost.convert(model)
# Saving the Core ML model to a file.
>>> coremltools.save('my_model.mlmodel')
"""
return _MLModel(_convert_tree_ensemble(model, feature_names, target, force_32bit_float = force_32bit_float)) | python | def convert(model, feature_names = None, target = 'target', force_32bit_float = True):
"""
Convert a trained XGBoost model to Core ML format.
Parameters
----------
decision_tree : Booster
A trained XGboost tree model.
feature_names: [str] | str
Names of input features that will be exposed in the Core ML model
interface.
Can be set to one of the following:
- None for using the feature names from the model.
- List of names of the input features that should be exposed in the
interface to the Core ML model. These input features are in the same
order as the XGboost model.
target: str
Name of the output feature name exposed to the Core ML model.
force_32bit_float: bool
If True, then the resulting CoreML model will use 32 bit floats internally.
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.xgboost.convert(model)
# Saving the Core ML model to a file.
>>> coremltools.save('my_model.mlmodel')
"""
return _MLModel(_convert_tree_ensemble(model, feature_names, target, force_32bit_float = force_32bit_float)) | [
"def",
"convert",
"(",
"model",
",",
"feature_names",
"=",
"None",
",",
"target",
"=",
"'target'",
",",
"force_32bit_float",
"=",
"True",
")",
":",
"return",
"_MLModel",
"(",
"_convert_tree_ensemble",
"(",
"model",
",",
"feature_names",
",",
"target",
",",
"force_32bit_float",
"=",
"force_32bit_float",
")",
")"
] | Convert a trained XGBoost model to Core ML format.
Parameters
----------
decision_tree : Booster
A trained XGboost tree model.
feature_names: [str] | str
Names of input features that will be exposed in the Core ML model
interface.
Can be set to one of the following:
- None for using the feature names from the model.
- List of names of the input features that should be exposed in the
interface to the Core ML model. These input features are in the same
order as the XGboost model.
target: str
Name of the output feature name exposed to the Core ML model.
force_32bit_float: bool
If True, then the resulting CoreML model will use 32 bit floats internally.
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.xgboost.convert(model)
# Saving the Core ML model to a file.
>>> coremltools.save('my_model.mlmodel') | [
"Convert",
"a",
"trained",
"XGBoost",
"model",
"to",
"Core",
"ML",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/xgboost/_tree.py#L9-L51 | train |
apple/turicreate | src/unity/python/turicreate/_json.py | dumps | def dumps(obj):
"""
Dumps a serializable object to JSON. This API maps to the Python built-in
json dumps method, with a few differences:
* The return value is always valid JSON according to RFC 7159.
* The input can be any of the following types:
- SFrame
- SArray
- SGraph
- single flexible_type (Image, int, long, float, datetime.datetime)
- recursive flexible_type (list, dict, array.array)
- recursive variant_type (list or dict of all of the above)
* Serialized result includes both data and schema. Deserialization requires
valid schema information to disambiguate various other wrapped types
(like Image) from dict.
"""
(data, schema) = to_serializable(obj)
return _json.dumps({'data': data, 'schema': schema}) | python | def dumps(obj):
"""
Dumps a serializable object to JSON. This API maps to the Python built-in
json dumps method, with a few differences:
* The return value is always valid JSON according to RFC 7159.
* The input can be any of the following types:
- SFrame
- SArray
- SGraph
- single flexible_type (Image, int, long, float, datetime.datetime)
- recursive flexible_type (list, dict, array.array)
- recursive variant_type (list or dict of all of the above)
* Serialized result includes both data and schema. Deserialization requires
valid schema information to disambiguate various other wrapped types
(like Image) from dict.
"""
(data, schema) = to_serializable(obj)
return _json.dumps({'data': data, 'schema': schema}) | [
"def",
"dumps",
"(",
"obj",
")",
":",
"(",
"data",
",",
"schema",
")",
"=",
"to_serializable",
"(",
"obj",
")",
"return",
"_json",
".",
"dumps",
"(",
"{",
"'data'",
":",
"data",
",",
"'schema'",
":",
"schema",
"}",
")"
] | Dumps a serializable object to JSON. This API maps to the Python built-in
json dumps method, with a few differences:
* The return value is always valid JSON according to RFC 7159.
* The input can be any of the following types:
- SFrame
- SArray
- SGraph
- single flexible_type (Image, int, long, float, datetime.datetime)
- recursive flexible_type (list, dict, array.array)
- recursive variant_type (list or dict of all of the above)
* Serialized result includes both data and schema. Deserialization requires
valid schema information to disambiguate various other wrapped types
(like Image) from dict. | [
"Dumps",
"a",
"serializable",
"object",
"to",
"JSON",
".",
"This",
"API",
"maps",
"to",
"the",
"Python",
"built",
"-",
"in",
"json",
"dumps",
"method",
"with",
"a",
"few",
"differences",
":"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_json.py#L20-L38 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/drawing_classifier/util/_visualization.py | draw_strokes | def draw_strokes(stroke_based_drawings):
"""
Visualizes drawings (ground truth or predictions) by
returning images to represent the stroke-based data from
the user.
Parameters
----------
stroke_based_drawings: SArray or list
An `SArray` of type `list`. Each element in the SArray
should be a list of strokes, where each stroke is a list
of points, and each point is represented as a dictionary
with two keys, "x" and "y". A single stroke-based drawing
is also supported, in which case, the type of the input
would be list.
Returns
-------
drawings: SArray or _tc.Image
Each stroke-based drawing is converted into a 28x28
grayscale drawing for the user to visualize what their
strokes traced.
"""
single_input = False
if (not isinstance(stroke_based_drawings, _tc.SArray)
and not isinstance(stroke_based_drawings, list)):
raise _ToolkitError("Input to draw_strokes must be of type "
+ "turicreate.SArray or list (for a single stroke-based drawing)")
if (isinstance(stroke_based_drawings, _tc.SArray)
and stroke_based_drawings.dtype != list):
raise _ToolkitError("SArray input to draw_strokes must have dtype "
+ "list. Each element in the SArray should be a list of strokes, "
+ "where each stroke is a list of points, "
+ "and each point is represented as a dictionary "
+ "with two keys, \"x\" and \"y\".")
if isinstance(stroke_based_drawings, list):
single_input = True
stroke_based_drawings = _tc.SArray([stroke_based_drawings])
sf = _tc.SFrame({"drawings": stroke_based_drawings})
sf_with_drawings = _extensions._drawing_classifier_prepare_data(
sf, "drawings")
if single_input:
return sf_with_drawings["drawings"][0]
return sf_with_drawings["drawings"] | python | def draw_strokes(stroke_based_drawings):
"""
Visualizes drawings (ground truth or predictions) by
returning images to represent the stroke-based data from
the user.
Parameters
----------
stroke_based_drawings: SArray or list
An `SArray` of type `list`. Each element in the SArray
should be a list of strokes, where each stroke is a list
of points, and each point is represented as a dictionary
with two keys, "x" and "y". A single stroke-based drawing
is also supported, in which case, the type of the input
would be list.
Returns
-------
drawings: SArray or _tc.Image
Each stroke-based drawing is converted into a 28x28
grayscale drawing for the user to visualize what their
strokes traced.
"""
single_input = False
if (not isinstance(stroke_based_drawings, _tc.SArray)
and not isinstance(stroke_based_drawings, list)):
raise _ToolkitError("Input to draw_strokes must be of type "
+ "turicreate.SArray or list (for a single stroke-based drawing)")
if (isinstance(stroke_based_drawings, _tc.SArray)
and stroke_based_drawings.dtype != list):
raise _ToolkitError("SArray input to draw_strokes must have dtype "
+ "list. Each element in the SArray should be a list of strokes, "
+ "where each stroke is a list of points, "
+ "and each point is represented as a dictionary "
+ "with two keys, \"x\" and \"y\".")
if isinstance(stroke_based_drawings, list):
single_input = True
stroke_based_drawings = _tc.SArray([stroke_based_drawings])
sf = _tc.SFrame({"drawings": stroke_based_drawings})
sf_with_drawings = _extensions._drawing_classifier_prepare_data(
sf, "drawings")
if single_input:
return sf_with_drawings["drawings"][0]
return sf_with_drawings["drawings"] | [
"def",
"draw_strokes",
"(",
"stroke_based_drawings",
")",
":",
"single_input",
"=",
"False",
"if",
"(",
"not",
"isinstance",
"(",
"stroke_based_drawings",
",",
"_tc",
".",
"SArray",
")",
"and",
"not",
"isinstance",
"(",
"stroke_based_drawings",
",",
"list",
")",
")",
":",
"raise",
"_ToolkitError",
"(",
"\"Input to draw_strokes must be of type \"",
"+",
"\"turicreate.SArray or list (for a single stroke-based drawing)\"",
")",
"if",
"(",
"isinstance",
"(",
"stroke_based_drawings",
",",
"_tc",
".",
"SArray",
")",
"and",
"stroke_based_drawings",
".",
"dtype",
"!=",
"list",
")",
":",
"raise",
"_ToolkitError",
"(",
"\"SArray input to draw_strokes must have dtype \"",
"+",
"\"list. Each element in the SArray should be a list of strokes, \"",
"+",
"\"where each stroke is a list of points, \"",
"+",
"\"and each point is represented as a dictionary \"",
"+",
"\"with two keys, \\\"x\\\" and \\\"y\\\".\"",
")",
"if",
"isinstance",
"(",
"stroke_based_drawings",
",",
"list",
")",
":",
"single_input",
"=",
"True",
"stroke_based_drawings",
"=",
"_tc",
".",
"SArray",
"(",
"[",
"stroke_based_drawings",
"]",
")",
"sf",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"\"drawings\"",
":",
"stroke_based_drawings",
"}",
")",
"sf_with_drawings",
"=",
"_extensions",
".",
"_drawing_classifier_prepare_data",
"(",
"sf",
",",
"\"drawings\"",
")",
"if",
"single_input",
":",
"return",
"sf_with_drawings",
"[",
"\"drawings\"",
"]",
"[",
"0",
"]",
"return",
"sf_with_drawings",
"[",
"\"drawings\"",
"]"
] | Visualizes drawings (ground truth or predictions) by
returning images to represent the stroke-based data from
the user.
Parameters
----------
stroke_based_drawings: SArray or list
An `SArray` of type `list`. Each element in the SArray
should be a list of strokes, where each stroke is a list
of points, and each point is represented as a dictionary
with two keys, "x" and "y". A single stroke-based drawing
is also supported, in which case, the type of the input
would be list.
Returns
-------
drawings: SArray or _tc.Image
Each stroke-based drawing is converted into a 28x28
grayscale drawing for the user to visualize what their
strokes traced. | [
"Visualizes",
"drawings",
"(",
"ground",
"truth",
"or",
"predictions",
")",
"by",
"returning",
"images",
"to",
"represent",
"the",
"stroke",
"-",
"based",
"data",
"from",
"the",
"user",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/util/_visualization.py#L10-L54 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_feature_engineering.py | Transformer.fit | def fit(self, data):
"""
Fit a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted version of the object)
See Also
--------
transform, fit_transform
Examples
--------
.. sourcecode:: python
{examples}
"""
_raise_error_if_not_sframe(data, "data")
self.__proxy__.fit(data)
return self | python | def fit(self, data):
"""
Fit a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted version of the object)
See Also
--------
transform, fit_transform
Examples
--------
.. sourcecode:: python
{examples}
"""
_raise_error_if_not_sframe(data, "data")
self.__proxy__.fit(data)
return self | [
"def",
"fit",
"(",
"self",
",",
"data",
")",
":",
"_raise_error_if_not_sframe",
"(",
"data",
",",
"\"data\"",
")",
"self",
".",
"__proxy__",
".",
"fit",
"(",
"data",
")",
"return",
"self"
] | Fit a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted version of the object)
See Also
--------
transform, fit_transform
Examples
--------
.. sourcecode:: python
{examples} | [
"Fit",
"a",
"transformer",
"using",
"the",
"SFrame",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_feature_engineering.py#L236-L262 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_feature_engineering.py | _SampleTransformer._get_summary_struct | def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
section = []
section_titles = ['Attributes']
for f in self._list_fields():
section.append( ("%s" % f,"%s"% f) )
return ([section], section_titles) | python | def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
section = []
section_titles = ['Attributes']
for f in self._list_fields():
section.append( ("%s" % f,"%s"% f) )
return ([section], section_titles) | [
"def",
"_get_summary_struct",
"(",
"self",
")",
":",
"section",
"=",
"[",
"]",
"section_titles",
"=",
"[",
"'Attributes'",
"]",
"for",
"f",
"in",
"self",
".",
"_list_fields",
"(",
")",
":",
"section",
".",
"append",
"(",
"(",
"\"%s\"",
"%",
"f",
",",
"\"%s\"",
"%",
"f",
")",
")",
"return",
"(",
"[",
"section",
"]",
",",
"section_titles",
")"
] | Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object. | [
"Returns",
"a",
"structured",
"description",
"of",
"the",
"model",
"including",
"(",
"where",
"relevant",
")",
"the",
"schema",
"of",
"the",
"training",
"data",
"description",
"of",
"the",
"training",
"data",
"training",
"statistics",
"and",
"model",
"hyperparameters",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_feature_engineering.py#L392-L415 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_tree_model_mixin.py | TreeModelMixin.extract_features | def extract_features(self, dataset, missing_value_action='auto'):
"""
For each example in the dataset, extract the leaf indices of
each tree as features.
For multiclass classification, each leaf index contains #num_class
numbers.
The returned feature vectors can be used as input to train another
supervised learning model such as a
:py:class:`~turicreate.logistic_classifier.LogisticClassifier`,
an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle
missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SArray
An SArray of dtype array.array containing extracted features.
Examples
--------
>>> data = turicreate.SFrame(
'https://static.turi.com/datasets/regression/houses.csv')
>>> # Regression Tree Models
>>> data['regression_tree_features'] = model.extract_features(data)
>>> # Classification Tree Models
>>> data['classification_tree_features'] = model.extract_features(data)
"""
_raise_error_if_not_sframe(dataset, "dataset")
if missing_value_action == 'auto':
missing_value_action = select_default_missing_value_policy(self,
'extract_features')
return self.__proxy__.extract_features(dataset, missing_value_action) | python | def extract_features(self, dataset, missing_value_action='auto'):
"""
For each example in the dataset, extract the leaf indices of
each tree as features.
For multiclass classification, each leaf index contains #num_class
numbers.
The returned feature vectors can be used as input to train another
supervised learning model such as a
:py:class:`~turicreate.logistic_classifier.LogisticClassifier`,
an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle
missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SArray
An SArray of dtype array.array containing extracted features.
Examples
--------
>>> data = turicreate.SFrame(
'https://static.turi.com/datasets/regression/houses.csv')
>>> # Regression Tree Models
>>> data['regression_tree_features'] = model.extract_features(data)
>>> # Classification Tree Models
>>> data['classification_tree_features'] = model.extract_features(data)
"""
_raise_error_if_not_sframe(dataset, "dataset")
if missing_value_action == 'auto':
missing_value_action = select_default_missing_value_policy(self,
'extract_features')
return self.__proxy__.extract_features(dataset, missing_value_action) | [
"def",
"extract_features",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"if",
"missing_value_action",
"==",
"'auto'",
":",
"missing_value_action",
"=",
"select_default_missing_value_policy",
"(",
"self",
",",
"'extract_features'",
")",
"return",
"self",
".",
"__proxy__",
".",
"extract_features",
"(",
"dataset",
",",
"missing_value_action",
")"
] | For each example in the dataset, extract the leaf indices of
each tree as features.
For multiclass classification, each leaf index contains #num_class
numbers.
The returned feature vectors can be used as input to train another
supervised learning model such as a
:py:class:`~turicreate.logistic_classifier.LogisticClassifier`,
an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle
missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SArray
An SArray of dtype array.array containing extracted features.
Examples
--------
>>> data = turicreate.SFrame(
'https://static.turi.com/datasets/regression/houses.csv')
>>> # Regression Tree Models
>>> data['regression_tree_features'] = model.extract_features(data)
>>> # Classification Tree Models
>>> data['classification_tree_features'] = model.extract_features(data) | [
"For",
"each",
"example",
"in",
"the",
"dataset",
"extract",
"the",
"leaf",
"indices",
"of",
"each",
"tree",
"as",
"features",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_tree_model_mixin.py#L65-L120 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_tree_model_mixin.py | TreeModelMixin._extract_features_with_missing | def _extract_features_with_missing(self, dataset, tree_id = 0,
missing_value_action = 'auto'):
"""
Extract features along with all the missing features associated with
a dataset.
Parameters
----------
dataset: bool
Dataset on which to make predictions.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle
missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
A table with two columns:
- leaf_id : Leaf id of the corresponding tree.
- missing_features : A list of missing feature, index pairs
"""
# Extract the features from only one tree.
sf = dataset
sf['leaf_id'] = self.extract_features(dataset, missing_value_action)\
.vector_slice(tree_id)\
.astype(int)
tree = self._get_tree(tree_id)
type_map = dict(zip(dataset.column_names(), dataset.column_types()))
def get_missing_features(row):
x = row['leaf_id']
path = tree.get_prediction_path(x)
missing_id = [] # List of "missing_id" children.
# For each node in the prediction path.
for p in path:
fname = p['feature']
idx = p['index']
f = row[fname]
if type_map[fname] in [int, float]:
if f is None:
missing_id.append(p['child_id'])
elif type_map[fname] in [dict]:
if f is None:
missing_id.append(p['child_id'])
if idx not in f:
missing_id.append(p['child_id'])
else:
pass
return missing_id
sf['missing_id'] = sf.apply(get_missing_features, list)
return sf[['leaf_id', 'missing_id']] | python | def _extract_features_with_missing(self, dataset, tree_id = 0,
missing_value_action = 'auto'):
"""
Extract features along with all the missing features associated with
a dataset.
Parameters
----------
dataset: bool
Dataset on which to make predictions.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle
missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
A table with two columns:
- leaf_id : Leaf id of the corresponding tree.
- missing_features : A list of missing feature, index pairs
"""
# Extract the features from only one tree.
sf = dataset
sf['leaf_id'] = self.extract_features(dataset, missing_value_action)\
.vector_slice(tree_id)\
.astype(int)
tree = self._get_tree(tree_id)
type_map = dict(zip(dataset.column_names(), dataset.column_types()))
def get_missing_features(row):
x = row['leaf_id']
path = tree.get_prediction_path(x)
missing_id = [] # List of "missing_id" children.
# For each node in the prediction path.
for p in path:
fname = p['feature']
idx = p['index']
f = row[fname]
if type_map[fname] in [int, float]:
if f is None:
missing_id.append(p['child_id'])
elif type_map[fname] in [dict]:
if f is None:
missing_id.append(p['child_id'])
if idx not in f:
missing_id.append(p['child_id'])
else:
pass
return missing_id
sf['missing_id'] = sf.apply(get_missing_features, list)
return sf[['leaf_id', 'missing_id']] | [
"def",
"_extract_features_with_missing",
"(",
"self",
",",
"dataset",
",",
"tree_id",
"=",
"0",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"# Extract the features from only one tree.",
"sf",
"=",
"dataset",
"sf",
"[",
"'leaf_id'",
"]",
"=",
"self",
".",
"extract_features",
"(",
"dataset",
",",
"missing_value_action",
")",
".",
"vector_slice",
"(",
"tree_id",
")",
".",
"astype",
"(",
"int",
")",
"tree",
"=",
"self",
".",
"_get_tree",
"(",
"tree_id",
")",
"type_map",
"=",
"dict",
"(",
"zip",
"(",
"dataset",
".",
"column_names",
"(",
")",
",",
"dataset",
".",
"column_types",
"(",
")",
")",
")",
"def",
"get_missing_features",
"(",
"row",
")",
":",
"x",
"=",
"row",
"[",
"'leaf_id'",
"]",
"path",
"=",
"tree",
".",
"get_prediction_path",
"(",
"x",
")",
"missing_id",
"=",
"[",
"]",
"# List of \"missing_id\" children.",
"# For each node in the prediction path.",
"for",
"p",
"in",
"path",
":",
"fname",
"=",
"p",
"[",
"'feature'",
"]",
"idx",
"=",
"p",
"[",
"'index'",
"]",
"f",
"=",
"row",
"[",
"fname",
"]",
"if",
"type_map",
"[",
"fname",
"]",
"in",
"[",
"int",
",",
"float",
"]",
":",
"if",
"f",
"is",
"None",
":",
"missing_id",
".",
"append",
"(",
"p",
"[",
"'child_id'",
"]",
")",
"elif",
"type_map",
"[",
"fname",
"]",
"in",
"[",
"dict",
"]",
":",
"if",
"f",
"is",
"None",
":",
"missing_id",
".",
"append",
"(",
"p",
"[",
"'child_id'",
"]",
")",
"if",
"idx",
"not",
"in",
"f",
":",
"missing_id",
".",
"append",
"(",
"p",
"[",
"'child_id'",
"]",
")",
"else",
":",
"pass",
"return",
"missing_id",
"sf",
"[",
"'missing_id'",
"]",
"=",
"sf",
".",
"apply",
"(",
"get_missing_features",
",",
"list",
")",
"return",
"sf",
"[",
"[",
"'leaf_id'",
",",
"'missing_id'",
"]",
"]"
] | Extract features along with all the missing features associated with
a dataset.
Parameters
----------
dataset: bool
Dataset on which to make predictions.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle
missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
A table with two columns:
- leaf_id : Leaf id of the corresponding tree.
- missing_features : A list of missing feature, index pairs | [
"Extract",
"features",
"along",
"with",
"all",
"the",
"missing",
"features",
"associated",
"with",
"a",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_tree_model_mixin.py#L122-L189 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_tree_model_mixin.py | TreeModelMixin._dump_to_text | def _dump_to_text(self, with_stats):
"""
Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order.
"""
return tc.extensions._xgboost_dump_model(self.__proxy__, with_stats=with_stats, format='text') | python | def _dump_to_text(self, with_stats):
"""
Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order.
"""
return tc.extensions._xgboost_dump_model(self.__proxy__, with_stats=with_stats, format='text') | [
"def",
"_dump_to_text",
"(",
"self",
",",
"with_stats",
")",
":",
"return",
"tc",
".",
"extensions",
".",
"_xgboost_dump_model",
"(",
"self",
".",
"__proxy__",
",",
"with_stats",
"=",
"with_stats",
",",
"format",
"=",
"'text'",
")"
] | Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order. | [
"Dump",
"the",
"models",
"into",
"a",
"list",
"of",
"strings",
".",
"Each",
"string",
"is",
"a",
"text",
"representation",
"of",
"a",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_tree_model_mixin.py#L192-L208 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_tree_model_mixin.py | TreeModelMixin._dump_to_json | def _dump_to_json(self, with_stats):
"""
Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order.
"""
import json
trees_json_str = tc.extensions._xgboost_dump_model(self.__proxy__, with_stats=with_stats, format='json')
trees_json = [json.loads(x) for x in trees_json_str]
# To avoid lose precision when using libjson, _dump_model with json format encode
# numerical values in hexadecimal (little endian).
# Now we need to convert them back to floats, using unpack. '<f' means single precision float
# in little endian
import struct
import sys
def hexadecimal_to_float(s):
if sys.version_info[0] >= 3:
return struct.unpack('<f', bytes.fromhex(s))[0] # unpack always return a tuple
else:
return struct.unpack('<f', s.decode('hex'))[0] # unpack always return a tuple
for d in trees_json:
nodes = d['vertices']
for n in nodes:
if 'value_hexadecimal' in n:
n['value'] = hexadecimal_to_float(n['value_hexadecimal'])
return trees_json | python | def _dump_to_json(self, with_stats):
"""
Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order.
"""
import json
trees_json_str = tc.extensions._xgboost_dump_model(self.__proxy__, with_stats=with_stats, format='json')
trees_json = [json.loads(x) for x in trees_json_str]
# To avoid lose precision when using libjson, _dump_model with json format encode
# numerical values in hexadecimal (little endian).
# Now we need to convert them back to floats, using unpack. '<f' means single precision float
# in little endian
import struct
import sys
def hexadecimal_to_float(s):
if sys.version_info[0] >= 3:
return struct.unpack('<f', bytes.fromhex(s))[0] # unpack always return a tuple
else:
return struct.unpack('<f', s.decode('hex'))[0] # unpack always return a tuple
for d in trees_json:
nodes = d['vertices']
for n in nodes:
if 'value_hexadecimal' in n:
n['value'] = hexadecimal_to_float(n['value_hexadecimal'])
return trees_json | [
"def",
"_dump_to_json",
"(",
"self",
",",
"with_stats",
")",
":",
"import",
"json",
"trees_json_str",
"=",
"tc",
".",
"extensions",
".",
"_xgboost_dump_model",
"(",
"self",
".",
"__proxy__",
",",
"with_stats",
"=",
"with_stats",
",",
"format",
"=",
"'json'",
")",
"trees_json",
"=",
"[",
"json",
".",
"loads",
"(",
"x",
")",
"for",
"x",
"in",
"trees_json_str",
"]",
"# To avoid lose precision when using libjson, _dump_model with json format encode",
"# numerical values in hexadecimal (little endian).",
"# Now we need to convert them back to floats, using unpack. '<f' means single precision float",
"# in little endian",
"import",
"struct",
"import",
"sys",
"def",
"hexadecimal_to_float",
"(",
"s",
")",
":",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"return",
"struct",
".",
"unpack",
"(",
"'<f'",
",",
"bytes",
".",
"fromhex",
"(",
"s",
")",
")",
"[",
"0",
"]",
"# unpack always return a tuple",
"else",
":",
"return",
"struct",
".",
"unpack",
"(",
"'<f'",
",",
"s",
".",
"decode",
"(",
"'hex'",
")",
")",
"[",
"0",
"]",
"# unpack always return a tuple",
"for",
"d",
"in",
"trees_json",
":",
"nodes",
"=",
"d",
"[",
"'vertices'",
"]",
"for",
"n",
"in",
"nodes",
":",
"if",
"'value_hexadecimal'",
"in",
"n",
":",
"n",
"[",
"'value'",
"]",
"=",
"hexadecimal_to_float",
"(",
"n",
"[",
"'value_hexadecimal'",
"]",
")",
"return",
"trees_json"
] | Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order. | [
"Dump",
"the",
"models",
"into",
"a",
"list",
"of",
"strings",
".",
"Each",
"string",
"is",
"a",
"text",
"representation",
"of",
"a",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_tree_model_mixin.py#L210-L247 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_tree_model_mixin.py | TreeModelMixin._get_summary_struct | def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
data_fields = [
('Number of examples', 'num_examples'),
('Number of feature columns', 'num_features'),
('Number of unpacked features', 'num_unpacked_features')]
if 'num_classes' in self._list_fields():
data_fields.append(('Number of classes', 'num_classes'))
training_fields = [
("Number of trees", 'num_trees'),
("Max tree depth", 'max_depth'),
("Training time (sec)", 'training_time')]
for m in ['accuracy', 'log_loss', 'auc', 'rmse', 'max_error']:
if 'training_%s' % m in self._list_fields():
training_fields.append(('Training %s' % m, 'training_%s' % m))
if 'validation_%s' % m in self._list_fields():
training_fields.append(('Validation %s' % m,
'validation_%s' % m))
return ([data_fields, training_fields], ["Schema", "Settings"]) | python | def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
data_fields = [
('Number of examples', 'num_examples'),
('Number of feature columns', 'num_features'),
('Number of unpacked features', 'num_unpacked_features')]
if 'num_classes' in self._list_fields():
data_fields.append(('Number of classes', 'num_classes'))
training_fields = [
("Number of trees", 'num_trees'),
("Max tree depth", 'max_depth'),
("Training time (sec)", 'training_time')]
for m in ['accuracy', 'log_loss', 'auc', 'rmse', 'max_error']:
if 'training_%s' % m in self._list_fields():
training_fields.append(('Training %s' % m, 'training_%s' % m))
if 'validation_%s' % m in self._list_fields():
training_fields.append(('Validation %s' % m,
'validation_%s' % m))
return ([data_fields, training_fields], ["Schema", "Settings"]) | [
"def",
"_get_summary_struct",
"(",
"self",
")",
":",
"data_fields",
"=",
"[",
"(",
"'Number of examples'",
",",
"'num_examples'",
")",
",",
"(",
"'Number of feature columns'",
",",
"'num_features'",
")",
",",
"(",
"'Number of unpacked features'",
",",
"'num_unpacked_features'",
")",
"]",
"if",
"'num_classes'",
"in",
"self",
".",
"_list_fields",
"(",
")",
":",
"data_fields",
".",
"append",
"(",
"(",
"'Number of classes'",
",",
"'num_classes'",
")",
")",
"training_fields",
"=",
"[",
"(",
"\"Number of trees\"",
",",
"'num_trees'",
")",
",",
"(",
"\"Max tree depth\"",
",",
"'max_depth'",
")",
",",
"(",
"\"Training time (sec)\"",
",",
"'training_time'",
")",
"]",
"for",
"m",
"in",
"[",
"'accuracy'",
",",
"'log_loss'",
",",
"'auc'",
",",
"'rmse'",
",",
"'max_error'",
"]",
":",
"if",
"'training_%s'",
"%",
"m",
"in",
"self",
".",
"_list_fields",
"(",
")",
":",
"training_fields",
".",
"append",
"(",
"(",
"'Training %s'",
"%",
"m",
",",
"'training_%s'",
"%",
"m",
")",
")",
"if",
"'validation_%s'",
"%",
"m",
"in",
"self",
".",
"_list_fields",
"(",
")",
":",
"training_fields",
".",
"append",
"(",
"(",
"'Validation %s'",
"%",
"m",
",",
"'validation_%s'",
"%",
"m",
")",
")",
"return",
"(",
"[",
"data_fields",
",",
"training_fields",
"]",
",",
"[",
"\"Schema\"",
",",
"\"Settings\"",
"]",
")"
] | Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object. | [
"Returns",
"a",
"structured",
"description",
"of",
"the",
"model",
"including",
"(",
"where",
"relevant",
")",
"the",
"schema",
"of",
"the",
"training",
"data",
"description",
"of",
"the",
"training",
"data",
"training",
"statistics",
"and",
"model",
"hyperparameters",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_tree_model_mixin.py#L279-L315 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_gradient_boosting_regressor.py | convert | def convert(model, input_features, output_features):
"""Convert a boosted tree model to protobuf format.
Parameters
----------
decision_tree : GradientBoostingRegressor
A trained scikit-learn tree model.
input_feature: [str]
Name of the input columns.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
_sklearn_util.check_expected_type(model, _ensemble.GradientBoostingRegressor)
def is_gbr_model(m):
if len(m.estimators_) == 0:
return False
if hasattr(m, 'estimators_') and m.estimators_ is not None:
for t in m.estimators_.flatten():
if not hasattr(t, 'tree_') or t.tree_ is None:
return False
return True
else:
return False
_sklearn_util.check_fitted(model, is_gbr_model)
base_prediction = model.init_.mean
return _MLModel(_convert_tree_ensemble(model, input_features, output_features,
base_prediction = base_prediction)) | python | def convert(model, input_features, output_features):
"""Convert a boosted tree model to protobuf format.
Parameters
----------
decision_tree : GradientBoostingRegressor
A trained scikit-learn tree model.
input_feature: [str]
Name of the input columns.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
_sklearn_util.check_expected_type(model, _ensemble.GradientBoostingRegressor)
def is_gbr_model(m):
if len(m.estimators_) == 0:
return False
if hasattr(m, 'estimators_') and m.estimators_ is not None:
for t in m.estimators_.flatten():
if not hasattr(t, 'tree_') or t.tree_ is None:
return False
return True
else:
return False
_sklearn_util.check_fitted(model, is_gbr_model)
base_prediction = model.init_.mean
return _MLModel(_convert_tree_ensemble(model, input_features, output_features,
base_prediction = base_prediction)) | [
"def",
"convert",
"(",
"model",
",",
"input_features",
",",
"output_features",
")",
":",
"if",
"not",
"(",
"_HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"_sklearn_util",
".",
"check_expected_type",
"(",
"model",
",",
"_ensemble",
".",
"GradientBoostingRegressor",
")",
"def",
"is_gbr_model",
"(",
"m",
")",
":",
"if",
"len",
"(",
"m",
".",
"estimators_",
")",
"==",
"0",
":",
"return",
"False",
"if",
"hasattr",
"(",
"m",
",",
"'estimators_'",
")",
"and",
"m",
".",
"estimators_",
"is",
"not",
"None",
":",
"for",
"t",
"in",
"m",
".",
"estimators_",
".",
"flatten",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"t",
",",
"'tree_'",
")",
"or",
"t",
".",
"tree_",
"is",
"None",
":",
"return",
"False",
"return",
"True",
"else",
":",
"return",
"False",
"_sklearn_util",
".",
"check_fitted",
"(",
"model",
",",
"is_gbr_model",
")",
"base_prediction",
"=",
"model",
".",
"init_",
".",
"mean",
"return",
"_MLModel",
"(",
"_convert_tree_ensemble",
"(",
"model",
",",
"input_features",
",",
"output_features",
",",
"base_prediction",
"=",
"base_prediction",
")",
")"
] | Convert a boosted tree model to protobuf format.
Parameters
----------
decision_tree : GradientBoostingRegressor
A trained scikit-learn tree model.
input_feature: [str]
Name of the input columns.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"boosted",
"tree",
"model",
"to",
"protobuf",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_gradient_boosting_regressor.py#L19-L58 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py | _sort_topk_votes | def _sort_topk_votes(x, k):
"""
Sort a dictionary of classes and corresponding vote totals according to the
votes, then truncate to the highest 'k' classes.
"""
y = sorted(x.items(), key=lambda x: x[1], reverse=True)[:k]
return [{'class': i[0], 'votes': i[1]} for i in y] | python | def _sort_topk_votes(x, k):
"""
Sort a dictionary of classes and corresponding vote totals according to the
votes, then truncate to the highest 'k' classes.
"""
y = sorted(x.items(), key=lambda x: x[1], reverse=True)[:k]
return [{'class': i[0], 'votes': i[1]} for i in y] | [
"def",
"_sort_topk_votes",
"(",
"x",
",",
"k",
")",
":",
"y",
"=",
"sorted",
"(",
"x",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"[",
":",
"k",
"]",
"return",
"[",
"{",
"'class'",
":",
"i",
"[",
"0",
"]",
",",
"'votes'",
":",
"i",
"[",
"1",
"]",
"}",
"for",
"i",
"in",
"y",
"]"
] | Sort a dictionary of classes and corresponding vote totals according to the
votes, then truncate to the highest 'k' classes. | [
"Sort",
"a",
"dictionary",
"of",
"classes",
"and",
"corresponding",
"vote",
"totals",
"according",
"to",
"the",
"votes",
"then",
"truncate",
"to",
"the",
"highest",
"k",
"classes",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py#L33-L39 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py | _construct_auto_distance | def _construct_auto_distance(features, column_types):
"""
Construct a composite distance function for a set of features, based on the
types of those features.
NOTE: This function is very similar to
`:func:_nearest_neighbors.choose_auto_distance`. The function is separate
because the auto-distance logic different than for each nearest
neighbors-based toolkit.
Parameters
----------
features : list[str]
Names of for which to construct a distance function.
column_types : dict(string, type)
Names and types of all columns.
Returns
-------
dist : list[list]
A composite distance function. Each element of the inner list has three
elements: a list of feature names (strings), a distance function name
(string), and a weight (float).
"""
## Put input features into buckets based on type.
numeric_ftrs = []
string_ftrs = []
dict_ftrs = []
for ftr in features:
try:
ftr_type = column_types[ftr]
except:
raise ValueError("The specified feature does not exist in the " +
"input data.")
if ftr_type == str:
string_ftrs.append(ftr)
elif ftr_type == dict:
dict_ftrs.append(ftr)
elif ftr_type in [int, float, _array.array]:
numeric_ftrs.append(ftr)
else:
raise TypeError("Unable to automatically construct a distance " +
"function for feature '{}'. ".format(ftr) +
"For the nearest neighbor classifier, features " +
"must be of type integer, float, string, dictionary, " +
"or array.array.")
## Construct the distance function
dist = []
for ftr in string_ftrs:
dist.append([[ftr], 'levenshtein', 1])
if len(dict_ftrs) > 0:
dist.append([dict_ftrs, 'weighted_jaccard', len(dict_ftrs)])
if len(numeric_ftrs) > 0:
dist.append([numeric_ftrs, 'euclidean', len(numeric_ftrs)])
return dist | python | def _construct_auto_distance(features, column_types):
"""
Construct a composite distance function for a set of features, based on the
types of those features.
NOTE: This function is very similar to
`:func:_nearest_neighbors.choose_auto_distance`. The function is separate
because the auto-distance logic different than for each nearest
neighbors-based toolkit.
Parameters
----------
features : list[str]
Names of for which to construct a distance function.
column_types : dict(string, type)
Names and types of all columns.
Returns
-------
dist : list[list]
A composite distance function. Each element of the inner list has three
elements: a list of feature names (strings), a distance function name
(string), and a weight (float).
"""
## Put input features into buckets based on type.
numeric_ftrs = []
string_ftrs = []
dict_ftrs = []
for ftr in features:
try:
ftr_type = column_types[ftr]
except:
raise ValueError("The specified feature does not exist in the " +
"input data.")
if ftr_type == str:
string_ftrs.append(ftr)
elif ftr_type == dict:
dict_ftrs.append(ftr)
elif ftr_type in [int, float, _array.array]:
numeric_ftrs.append(ftr)
else:
raise TypeError("Unable to automatically construct a distance " +
"function for feature '{}'. ".format(ftr) +
"For the nearest neighbor classifier, features " +
"must be of type integer, float, string, dictionary, " +
"or array.array.")
## Construct the distance function
dist = []
for ftr in string_ftrs:
dist.append([[ftr], 'levenshtein', 1])
if len(dict_ftrs) > 0:
dist.append([dict_ftrs, 'weighted_jaccard', len(dict_ftrs)])
if len(numeric_ftrs) > 0:
dist.append([numeric_ftrs, 'euclidean', len(numeric_ftrs)])
return dist | [
"def",
"_construct_auto_distance",
"(",
"features",
",",
"column_types",
")",
":",
"## Put input features into buckets based on type.",
"numeric_ftrs",
"=",
"[",
"]",
"string_ftrs",
"=",
"[",
"]",
"dict_ftrs",
"=",
"[",
"]",
"for",
"ftr",
"in",
"features",
":",
"try",
":",
"ftr_type",
"=",
"column_types",
"[",
"ftr",
"]",
"except",
":",
"raise",
"ValueError",
"(",
"\"The specified feature does not exist in the \"",
"+",
"\"input data.\"",
")",
"if",
"ftr_type",
"==",
"str",
":",
"string_ftrs",
".",
"append",
"(",
"ftr",
")",
"elif",
"ftr_type",
"==",
"dict",
":",
"dict_ftrs",
".",
"append",
"(",
"ftr",
")",
"elif",
"ftr_type",
"in",
"[",
"int",
",",
"float",
",",
"_array",
".",
"array",
"]",
":",
"numeric_ftrs",
".",
"append",
"(",
"ftr",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unable to automatically construct a distance \"",
"+",
"\"function for feature '{}'. \"",
".",
"format",
"(",
"ftr",
")",
"+",
"\"For the nearest neighbor classifier, features \"",
"+",
"\"must be of type integer, float, string, dictionary, \"",
"+",
"\"or array.array.\"",
")",
"## Construct the distance function",
"dist",
"=",
"[",
"]",
"for",
"ftr",
"in",
"string_ftrs",
":",
"dist",
".",
"append",
"(",
"[",
"[",
"ftr",
"]",
",",
"'levenshtein'",
",",
"1",
"]",
")",
"if",
"len",
"(",
"dict_ftrs",
")",
">",
"0",
":",
"dist",
".",
"append",
"(",
"[",
"dict_ftrs",
",",
"'weighted_jaccard'",
",",
"len",
"(",
"dict_ftrs",
")",
"]",
")",
"if",
"len",
"(",
"numeric_ftrs",
")",
">",
"0",
":",
"dist",
".",
"append",
"(",
"[",
"numeric_ftrs",
",",
"'euclidean'",
",",
"len",
"(",
"numeric_ftrs",
")",
"]",
")",
"return",
"dist"
] | Construct a composite distance function for a set of features, based on the
types of those features.
NOTE: This function is very similar to
`:func:_nearest_neighbors.choose_auto_distance`. The function is separate
because the auto-distance logic different than for each nearest
neighbors-based toolkit.
Parameters
----------
features : list[str]
Names of for which to construct a distance function.
column_types : dict(string, type)
Names and types of all columns.
Returns
-------
dist : list[list]
A composite distance function. Each element of the inner list has three
elements: a list of feature names (strings), a distance function name
(string), and a weight (float). | [
"Construct",
"a",
"composite",
"distance",
"function",
"for",
"a",
"set",
"of",
"features",
"based",
"on",
"the",
"types",
"of",
"those",
"features",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py#L42-L108 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py | create | def create(dataset, target, features=None, distance=None, verbose=True):
"""
Create a
:class:`~turicreate.nearest_neighbor_classifier.NearestNeighborClassifier`
model. This model predicts the class of a query instance by finding the most
common class among the query's nearest neighbors.
.. warning::
The 'dot_product' distance is deprecated and will be removed in future
versions of Turi Create. Please use 'transformed_dot_product'
distance instead, although note that this is more than a name change; it
is a *different* transformation of the dot product of two vectors.
Please see the distances module documentation for more details.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : str
Name of the column containing the target variable. The values in this
column must be of string or integer type.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns except the target variable
should be used. Please note: if `distance` is specified as a composite
distance, then that parameter controls which features are used in the
model. Each column can be one of the following types:
- *Numeric*: values of numeric type integer or float.
- *Array*: array of numeric (integer or float) values. Each array
element is treated as a separate variable in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values.
Each key indicates a separate variable in the model.
- *String*: string values.
Please note: if `distance` is specified as a composite distance, then
that parameter controls which features are used in the model.
distance : str, function, or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of three types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Function*: a function handle from the
:mod:`~turicreate.toolkits.distances` module.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified or set to 'auto', a composite distance
is constructed automatically based on feature types.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : NearestNeighborClassifier
A trained model of type
:class:`~turicreate.nearest_neighbor_classifier.NearestNeighborClassifier`.
See Also
--------
NearestNeighborClassifier
turicreate.toolkits.nearest_neighbors
turicreate.toolkits.distances
References
----------
- `Wikipedia - nearest neighbors classifier
<http://en.wikipedia.org/wiki/Nearest_neighbour_classifiers>`_
- Hastie, T., Tibshirani, R., Friedman, J. (2009). `The Elements of
Statistical Learning <https://web.stanford.edu/~hastie/ElemStatLearn/>`_.
Vol. 2. New York. Springer. pp. 463-481.
Examples
--------
>>> sf = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> model = turicreate.nearest_neighbor_classifier.create(sf, target='species')
As with the nearest neighbors toolkit, the nearest neighbor classifier
accepts composite distance functions.
>>> my_dist = [[('height', 'weight'), 'euclidean', 2.7],
... [('height', 'weight'), 'manhattan', 1.6]]
...
>>> model = turicreate.nearest_neighbor_classifier.create(sf, target='species',
... distance=my_dist)
"""
## Set up
## ------
start_time = _time.time()
## Validation and preprocessing
## ----------------------------
## 'dataset' must be a non-empty SFrame
_raise_error_if_not_sframe(dataset, "dataset")
_raise_error_if_sframe_empty(dataset, "dataset")
## 'target' must be a string, in 'dataset', and the type of the target must
# be string or integer.
if not isinstance(target, str) or target not in dataset.column_names():
raise _ToolkitError("The 'target' parameter must be the name of a "
"column in the input dataset.")
if not dataset[target].dtype == str and not dataset[target].dtype == int:
raise TypeError("The target column must contain integers or strings.")
## Warn that 'None' values in the target may lead to ambiguous predictions.
if dataset[target].countna() > 0:
_logging.warning("Missing values detected in the target column. This " +
"may lead to ambiguous 'None' predictions, if the " +
"'radius' parameter is set too small in the prediction, " +
"classification, or evaluation methods.")
## convert features and distance arguments into a composite distance
## NOTE: this is done here instead of in the nearest neighbors toolkit
# because the automatic distance construction may be different for the two
# toolkits.
if features is None:
_features = [x for x in dataset.column_names() if x != target]
else:
_features = [x for x in features if x != target]
if isinstance(distance, list):
distance = _copy.deepcopy(distance)
elif (hasattr(distance, '__call__') or
(isinstance(distance, str) and not distance == 'auto')):
distance = [[_features, distance, 1]]
elif distance is None or distance == 'auto':
col_types = {k: v for k, v in zip(dataset.column_names(),
dataset.column_types())}
distance = _construct_auto_distance(_features, col_types)
else:
raise TypeError("Input 'distance' not understood. The 'distance' " +
"parameter must be a string or a composite distance, " +
" or left unspecified.")
## Construct and query the nearest neighbors model
## -----------------------------------------------
knn_model = _tc.nearest_neighbors.create(dataset, label=target,
distance=distance,
verbose=verbose)
## Postprocessing and formatting
## -----------------------------
state = {
'verbose' : verbose,
'distance' : knn_model.distance,
'num_distance_components' : knn_model.num_distance_components,
'num_examples' : dataset.num_rows(),
'features' : knn_model.features,
'target': target,
'num_classes': len(dataset[target].unique()),
'num_features': knn_model.num_features,
'num_unpacked_features': knn_model.num_unpacked_features,
'training_time': _time.time() - start_time,
'_target_type': dataset[target].dtype,
}
model = NearestNeighborClassifier(knn_model, state)
return model | python | def create(dataset, target, features=None, distance=None, verbose=True):
"""
Create a
:class:`~turicreate.nearest_neighbor_classifier.NearestNeighborClassifier`
model. This model predicts the class of a query instance by finding the most
common class among the query's nearest neighbors.
.. warning::
The 'dot_product' distance is deprecated and will be removed in future
versions of Turi Create. Please use 'transformed_dot_product'
distance instead, although note that this is more than a name change; it
is a *different* transformation of the dot product of two vectors.
Please see the distances module documentation for more details.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : str
Name of the column containing the target variable. The values in this
column must be of string or integer type.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns except the target variable
should be used. Please note: if `distance` is specified as a composite
distance, then that parameter controls which features are used in the
model. Each column can be one of the following types:
- *Numeric*: values of numeric type integer or float.
- *Array*: array of numeric (integer or float) values. Each array
element is treated as a separate variable in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values.
Each key indicates a separate variable in the model.
- *String*: string values.
Please note: if `distance` is specified as a composite distance, then
that parameter controls which features are used in the model.
distance : str, function, or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of three types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Function*: a function handle from the
:mod:`~turicreate.toolkits.distances` module.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified or set to 'auto', a composite distance
is constructed automatically based on feature types.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : NearestNeighborClassifier
A trained model of type
:class:`~turicreate.nearest_neighbor_classifier.NearestNeighborClassifier`.
See Also
--------
NearestNeighborClassifier
turicreate.toolkits.nearest_neighbors
turicreate.toolkits.distances
References
----------
- `Wikipedia - nearest neighbors classifier
<http://en.wikipedia.org/wiki/Nearest_neighbour_classifiers>`_
- Hastie, T., Tibshirani, R., Friedman, J. (2009). `The Elements of
Statistical Learning <https://web.stanford.edu/~hastie/ElemStatLearn/>`_.
Vol. 2. New York. Springer. pp. 463-481.
Examples
--------
>>> sf = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> model = turicreate.nearest_neighbor_classifier.create(sf, target='species')
As with the nearest neighbors toolkit, the nearest neighbor classifier
accepts composite distance functions.
>>> my_dist = [[('height', 'weight'), 'euclidean', 2.7],
... [('height', 'weight'), 'manhattan', 1.6]]
...
>>> model = turicreate.nearest_neighbor_classifier.create(sf, target='species',
... distance=my_dist)
"""
## Set up
## ------
start_time = _time.time()
## Validation and preprocessing
## ----------------------------
## 'dataset' must be a non-empty SFrame
_raise_error_if_not_sframe(dataset, "dataset")
_raise_error_if_sframe_empty(dataset, "dataset")
## 'target' must be a string, in 'dataset', and the type of the target must
# be string or integer.
if not isinstance(target, str) or target not in dataset.column_names():
raise _ToolkitError("The 'target' parameter must be the name of a "
"column in the input dataset.")
if not dataset[target].dtype == str and not dataset[target].dtype == int:
raise TypeError("The target column must contain integers or strings.")
## Warn that 'None' values in the target may lead to ambiguous predictions.
if dataset[target].countna() > 0:
_logging.warning("Missing values detected in the target column. This " +
"may lead to ambiguous 'None' predictions, if the " +
"'radius' parameter is set too small in the prediction, " +
"classification, or evaluation methods.")
## convert features and distance arguments into a composite distance
## NOTE: this is done here instead of in the nearest neighbors toolkit
# because the automatic distance construction may be different for the two
# toolkits.
if features is None:
_features = [x for x in dataset.column_names() if x != target]
else:
_features = [x for x in features if x != target]
if isinstance(distance, list):
distance = _copy.deepcopy(distance)
elif (hasattr(distance, '__call__') or
(isinstance(distance, str) and not distance == 'auto')):
distance = [[_features, distance, 1]]
elif distance is None or distance == 'auto':
col_types = {k: v for k, v in zip(dataset.column_names(),
dataset.column_types())}
distance = _construct_auto_distance(_features, col_types)
else:
raise TypeError("Input 'distance' not understood. The 'distance' " +
"parameter must be a string or a composite distance, " +
" or left unspecified.")
## Construct and query the nearest neighbors model
## -----------------------------------------------
knn_model = _tc.nearest_neighbors.create(dataset, label=target,
distance=distance,
verbose=verbose)
## Postprocessing and formatting
## -----------------------------
state = {
'verbose' : verbose,
'distance' : knn_model.distance,
'num_distance_components' : knn_model.num_distance_components,
'num_examples' : dataset.num_rows(),
'features' : knn_model.features,
'target': target,
'num_classes': len(dataset[target].unique()),
'num_features': knn_model.num_features,
'num_unpacked_features': knn_model.num_unpacked_features,
'training_time': _time.time() - start_time,
'_target_type': dataset[target].dtype,
}
model = NearestNeighborClassifier(knn_model, state)
return model | [
"def",
"create",
"(",
"dataset",
",",
"target",
",",
"features",
"=",
"None",
",",
"distance",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"## Set up",
"## ------",
"start_time",
"=",
"_time",
".",
"time",
"(",
")",
"## Validation and preprocessing",
"## ----------------------------",
"## 'dataset' must be a non-empty SFrame",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"_raise_error_if_sframe_empty",
"(",
"dataset",
",",
"\"dataset\"",
")",
"## 'target' must be a string, in 'dataset', and the type of the target must",
"# be string or integer.",
"if",
"not",
"isinstance",
"(",
"target",
",",
"str",
")",
"or",
"target",
"not",
"in",
"dataset",
".",
"column_names",
"(",
")",
":",
"raise",
"_ToolkitError",
"(",
"\"The 'target' parameter must be the name of a \"",
"\"column in the input dataset.\"",
")",
"if",
"not",
"dataset",
"[",
"target",
"]",
".",
"dtype",
"==",
"str",
"and",
"not",
"dataset",
"[",
"target",
"]",
".",
"dtype",
"==",
"int",
":",
"raise",
"TypeError",
"(",
"\"The target column must contain integers or strings.\"",
")",
"## Warn that 'None' values in the target may lead to ambiguous predictions.",
"if",
"dataset",
"[",
"target",
"]",
".",
"countna",
"(",
")",
">",
"0",
":",
"_logging",
".",
"warning",
"(",
"\"Missing values detected in the target column. This \"",
"+",
"\"may lead to ambiguous 'None' predictions, if the \"",
"+",
"\"'radius' parameter is set too small in the prediction, \"",
"+",
"\"classification, or evaluation methods.\"",
")",
"## convert features and distance arguments into a composite distance",
"## NOTE: this is done here instead of in the nearest neighbors toolkit",
"# because the automatic distance construction may be different for the two",
"# toolkits.",
"if",
"features",
"is",
"None",
":",
"_features",
"=",
"[",
"x",
"for",
"x",
"in",
"dataset",
".",
"column_names",
"(",
")",
"if",
"x",
"!=",
"target",
"]",
"else",
":",
"_features",
"=",
"[",
"x",
"for",
"x",
"in",
"features",
"if",
"x",
"!=",
"target",
"]",
"if",
"isinstance",
"(",
"distance",
",",
"list",
")",
":",
"distance",
"=",
"_copy",
".",
"deepcopy",
"(",
"distance",
")",
"elif",
"(",
"hasattr",
"(",
"distance",
",",
"'__call__'",
")",
"or",
"(",
"isinstance",
"(",
"distance",
",",
"str",
")",
"and",
"not",
"distance",
"==",
"'auto'",
")",
")",
":",
"distance",
"=",
"[",
"[",
"_features",
",",
"distance",
",",
"1",
"]",
"]",
"elif",
"distance",
"is",
"None",
"or",
"distance",
"==",
"'auto'",
":",
"col_types",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"dataset",
".",
"column_names",
"(",
")",
",",
"dataset",
".",
"column_types",
"(",
")",
")",
"}",
"distance",
"=",
"_construct_auto_distance",
"(",
"_features",
",",
"col_types",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Input 'distance' not understood. The 'distance' \"",
"+",
"\"parameter must be a string or a composite distance, \"",
"+",
"\" or left unspecified.\"",
")",
"## Construct and query the nearest neighbors model",
"## -----------------------------------------------",
"knn_model",
"=",
"_tc",
".",
"nearest_neighbors",
".",
"create",
"(",
"dataset",
",",
"label",
"=",
"target",
",",
"distance",
"=",
"distance",
",",
"verbose",
"=",
"verbose",
")",
"## Postprocessing and formatting",
"## -----------------------------",
"state",
"=",
"{",
"'verbose'",
":",
"verbose",
",",
"'distance'",
":",
"knn_model",
".",
"distance",
",",
"'num_distance_components'",
":",
"knn_model",
".",
"num_distance_components",
",",
"'num_examples'",
":",
"dataset",
".",
"num_rows",
"(",
")",
",",
"'features'",
":",
"knn_model",
".",
"features",
",",
"'target'",
":",
"target",
",",
"'num_classes'",
":",
"len",
"(",
"dataset",
"[",
"target",
"]",
".",
"unique",
"(",
")",
")",
",",
"'num_features'",
":",
"knn_model",
".",
"num_features",
",",
"'num_unpacked_features'",
":",
"knn_model",
".",
"num_unpacked_features",
",",
"'training_time'",
":",
"_time",
".",
"time",
"(",
")",
"-",
"start_time",
",",
"'_target_type'",
":",
"dataset",
"[",
"target",
"]",
".",
"dtype",
",",
"}",
"model",
"=",
"NearestNeighborClassifier",
"(",
"knn_model",
",",
"state",
")",
"return",
"model"
] | Create a
:class:`~turicreate.nearest_neighbor_classifier.NearestNeighborClassifier`
model. This model predicts the class of a query instance by finding the most
common class among the query's nearest neighbors.
.. warning::
The 'dot_product' distance is deprecated and will be removed in future
versions of Turi Create. Please use 'transformed_dot_product'
distance instead, although note that this is more than a name change; it
is a *different* transformation of the dot product of two vectors.
Please see the distances module documentation for more details.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : str
Name of the column containing the target variable. The values in this
column must be of string or integer type.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns except the target variable
should be used. Please note: if `distance` is specified as a composite
distance, then that parameter controls which features are used in the
model. Each column can be one of the following types:
- *Numeric*: values of numeric type integer or float.
- *Array*: array of numeric (integer or float) values. Each array
element is treated as a separate variable in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values.
Each key indicates a separate variable in the model.
- *String*: string values.
Please note: if `distance` is specified as a composite distance, then
that parameter controls which features are used in the model.
distance : str, function, or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of three types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Function*: a function handle from the
:mod:`~turicreate.toolkits.distances` module.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified or set to 'auto', a composite distance
is constructed automatically based on feature types.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : NearestNeighborClassifier
A trained model of type
:class:`~turicreate.nearest_neighbor_classifier.NearestNeighborClassifier`.
See Also
--------
NearestNeighborClassifier
turicreate.toolkits.nearest_neighbors
turicreate.toolkits.distances
References
----------
- `Wikipedia - nearest neighbors classifier
<http://en.wikipedia.org/wiki/Nearest_neighbour_classifiers>`_
- Hastie, T., Tibshirani, R., Friedman, J. (2009). `The Elements of
Statistical Learning <https://web.stanford.edu/~hastie/ElemStatLearn/>`_.
Vol. 2. New York. Springer. pp. 463-481.
Examples
--------
>>> sf = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> model = turicreate.nearest_neighbor_classifier.create(sf, target='species')
As with the nearest neighbors toolkit, the nearest neighbor classifier
accepts composite distance functions.
>>> my_dist = [[('height', 'weight'), 'euclidean', 2.7],
... [('height', 'weight'), 'manhattan', 1.6]]
...
>>> model = turicreate.nearest_neighbor_classifier.create(sf, target='species',
... distance=my_dist) | [
"Create",
"a",
":",
"class",
":",
"~turicreate",
".",
"nearest_neighbor_classifier",
".",
"NearestNeighborClassifier",
"model",
".",
"This",
"model",
"predicts",
"the",
"class",
"of",
"a",
"query",
"instance",
"by",
"finding",
"the",
"most",
"common",
"class",
"among",
"the",
"query",
"s",
"nearest",
"neighbors",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py#L115-L314 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py | NearestNeighborClassifier._load_version | def _load_version(cls, state, version):
"""
A function to load a previously saved NearestNeighborClassifier model.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer.
"""
assert(version == cls._PYTHON_NN_CLASSIFIER_MODEL_VERSION)
knn_model = _tc.nearest_neighbors.NearestNeighborsModel(state['knn_model'])
del state['knn_model']
state['_target_type'] = eval(state['_target_type'])
return cls(knn_model, state) | python | def _load_version(cls, state, version):
"""
A function to load a previously saved NearestNeighborClassifier model.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer.
"""
assert(version == cls._PYTHON_NN_CLASSIFIER_MODEL_VERSION)
knn_model = _tc.nearest_neighbors.NearestNeighborsModel(state['knn_model'])
del state['knn_model']
state['_target_type'] = eval(state['_target_type'])
return cls(knn_model, state) | [
"def",
"_load_version",
"(",
"cls",
",",
"state",
",",
"version",
")",
":",
"assert",
"(",
"version",
"==",
"cls",
".",
"_PYTHON_NN_CLASSIFIER_MODEL_VERSION",
")",
"knn_model",
"=",
"_tc",
".",
"nearest_neighbors",
".",
"NearestNeighborsModel",
"(",
"state",
"[",
"'knn_model'",
"]",
")",
"del",
"state",
"[",
"'knn_model'",
"]",
"state",
"[",
"'_target_type'",
"]",
"=",
"eval",
"(",
"state",
"[",
"'_target_type'",
"]",
")",
"return",
"cls",
"(",
"knn_model",
",",
"state",
")"
] | A function to load a previously saved NearestNeighborClassifier model.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer. | [
"A",
"function",
"to",
"load",
"a",
"previously",
"saved",
"NearestNeighborClassifier",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py#L353-L369 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py | NearestNeighborClassifier.classify | def classify(self, dataset, max_neighbors=10, radius=None, verbose=True):
"""
Return the predicted class for each observation in *dataset*. This
prediction is made based on the closest neighbors stored in the nearest
neighbors classifier model.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
verbose : bool, optional
If True, print progress updates.
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
Returns
-------
out : SFrame
An SFrame with model predictions. The first column is the most
likely class according to the model, and the second column is the
predicted probability for that class.
See Also
--------
create, predict, predict_topk
Notes
-----
- If the 'radius' parameter is small, it is possible that a query point
has no qualified neighbors in the training dataset. In this case, the
resulting class and probability for that query are 'None' in the
SFrame output by this method. If the target column in the training
dataset has missing values, these predictions will be ambiguous.
- Ties between predicted classes are broken randomly.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> sf_new = turicreate.SFrame({'height': [26, 19],
... 'weight': [25, 35]})
...
>>> m = turicreate.nearest_neighbor_classifier.create(sf, target='species')
>>> ystar = m.classify(sf_new, max_neighbors=2)
>>> print ystar
+-------+-------------+
| class | probability |
+-------+-------------+
| dog | 1.0 |
| fossa | 0.5 |
+-------+-------------+
"""
## Validate the query 'dataset'. Note that the 'max_neighbors' and
# 'radius' parameters are validated by the nearest neighbor model's
# query method.
_raise_error_if_not_sframe(dataset, "dataset")
_raise_error_if_sframe_empty(dataset, "dataset")
n_query = dataset.num_rows()
## Validate neighborhood parameters 'max_neighbors'.
# - NOTE: when the parameter name is changed in nearest neighbors, the
# query call will do this itself, and this block can be removed.
if max_neighbors is not None:
if not isinstance(max_neighbors, int):
raise ValueError("Input 'max_neighbors' must be an integer.")
if max_neighbors <= 0:
raise ValueError("Input 'max_neighbors' must be larger than 0.")
## Find the nearest neighbors for each query and count the number of
# votes for each class.
knn = self._knn_model.query(dataset, k=max_neighbors, radius=radius,
verbose=verbose)
## If there are *no* results for *any* query make an SFrame of nothing.
if knn.num_rows() == 0:
ystar = _tc.SFrame(
{'class': _tc.SArray([None] * n_query, self._target_type),
'probability': _tc.SArray([None] * n_query, int)})
else:
## Find the class with the most votes for each query and postprocess.
grp = knn.groupby(['query_label', 'reference_label'], _tc.aggregate.COUNT)
ystar = grp.groupby('query_label',
{'class': _tc.aggregate.ARGMAX('Count', 'reference_label'),
'max_votes': _tc.aggregate.MAX('Count'),
'total_votes': _tc.aggregate.SUM('Count')})
ystar['probability'] = ystar['max_votes'] / ystar['total_votes']
## Fill in 'None' for query points that don't have any near neighbors.
row_ids = _tc.SFrame({'query_label': range(n_query)})
ystar = ystar.join(row_ids, how='right')
## Sort by row number (because row number is not returned) and return
ystar = ystar.sort('query_label', ascending=True)
ystar = ystar[['class', 'probability']]
return ystar | python | def classify(self, dataset, max_neighbors=10, radius=None, verbose=True):
"""
Return the predicted class for each observation in *dataset*. This
prediction is made based on the closest neighbors stored in the nearest
neighbors classifier model.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
verbose : bool, optional
If True, print progress updates.
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
Returns
-------
out : SFrame
An SFrame with model predictions. The first column is the most
likely class according to the model, and the second column is the
predicted probability for that class.
See Also
--------
create, predict, predict_topk
Notes
-----
- If the 'radius' parameter is small, it is possible that a query point
has no qualified neighbors in the training dataset. In this case, the
resulting class and probability for that query are 'None' in the
SFrame output by this method. If the target column in the training
dataset has missing values, these predictions will be ambiguous.
- Ties between predicted classes are broken randomly.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> sf_new = turicreate.SFrame({'height': [26, 19],
... 'weight': [25, 35]})
...
>>> m = turicreate.nearest_neighbor_classifier.create(sf, target='species')
>>> ystar = m.classify(sf_new, max_neighbors=2)
>>> print ystar
+-------+-------------+
| class | probability |
+-------+-------------+
| dog | 1.0 |
| fossa | 0.5 |
+-------+-------------+
"""
## Validate the query 'dataset'. Note that the 'max_neighbors' and
# 'radius' parameters are validated by the nearest neighbor model's
# query method.
_raise_error_if_not_sframe(dataset, "dataset")
_raise_error_if_sframe_empty(dataset, "dataset")
n_query = dataset.num_rows()
## Validate neighborhood parameters 'max_neighbors'.
# - NOTE: when the parameter name is changed in nearest neighbors, the
# query call will do this itself, and this block can be removed.
if max_neighbors is not None:
if not isinstance(max_neighbors, int):
raise ValueError("Input 'max_neighbors' must be an integer.")
if max_neighbors <= 0:
raise ValueError("Input 'max_neighbors' must be larger than 0.")
## Find the nearest neighbors for each query and count the number of
# votes for each class.
knn = self._knn_model.query(dataset, k=max_neighbors, radius=radius,
verbose=verbose)
## If there are *no* results for *any* query make an SFrame of nothing.
if knn.num_rows() == 0:
ystar = _tc.SFrame(
{'class': _tc.SArray([None] * n_query, self._target_type),
'probability': _tc.SArray([None] * n_query, int)})
else:
## Find the class with the most votes for each query and postprocess.
grp = knn.groupby(['query_label', 'reference_label'], _tc.aggregate.COUNT)
ystar = grp.groupby('query_label',
{'class': _tc.aggregate.ARGMAX('Count', 'reference_label'),
'max_votes': _tc.aggregate.MAX('Count'),
'total_votes': _tc.aggregate.SUM('Count')})
ystar['probability'] = ystar['max_votes'] / ystar['total_votes']
## Fill in 'None' for query points that don't have any near neighbors.
row_ids = _tc.SFrame({'query_label': range(n_query)})
ystar = ystar.join(row_ids, how='right')
## Sort by row number (because row number is not returned) and return
ystar = ystar.sort('query_label', ascending=True)
ystar = ystar[['class', 'probability']]
return ystar | [
"def",
"classify",
"(",
"self",
",",
"dataset",
",",
"max_neighbors",
"=",
"10",
",",
"radius",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"## Validate the query 'dataset'. Note that the 'max_neighbors' and",
"# 'radius' parameters are validated by the nearest neighbor model's",
"# query method.",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"_raise_error_if_sframe_empty",
"(",
"dataset",
",",
"\"dataset\"",
")",
"n_query",
"=",
"dataset",
".",
"num_rows",
"(",
")",
"## Validate neighborhood parameters 'max_neighbors'.",
"# - NOTE: when the parameter name is changed in nearest neighbors, the",
"# query call will do this itself, and this block can be removed.",
"if",
"max_neighbors",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"max_neighbors",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"\"Input 'max_neighbors' must be an integer.\"",
")",
"if",
"max_neighbors",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Input 'max_neighbors' must be larger than 0.\"",
")",
"## Find the nearest neighbors for each query and count the number of",
"# votes for each class.",
"knn",
"=",
"self",
".",
"_knn_model",
".",
"query",
"(",
"dataset",
",",
"k",
"=",
"max_neighbors",
",",
"radius",
"=",
"radius",
",",
"verbose",
"=",
"verbose",
")",
"## If there are *no* results for *any* query make an SFrame of nothing.",
"if",
"knn",
".",
"num_rows",
"(",
")",
"==",
"0",
":",
"ystar",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"'class'",
":",
"_tc",
".",
"SArray",
"(",
"[",
"None",
"]",
"*",
"n_query",
",",
"self",
".",
"_target_type",
")",
",",
"'probability'",
":",
"_tc",
".",
"SArray",
"(",
"[",
"None",
"]",
"*",
"n_query",
",",
"int",
")",
"}",
")",
"else",
":",
"## Find the class with the most votes for each query and postprocess.",
"grp",
"=",
"knn",
".",
"groupby",
"(",
"[",
"'query_label'",
",",
"'reference_label'",
"]",
",",
"_tc",
".",
"aggregate",
".",
"COUNT",
")",
"ystar",
"=",
"grp",
".",
"groupby",
"(",
"'query_label'",
",",
"{",
"'class'",
":",
"_tc",
".",
"aggregate",
".",
"ARGMAX",
"(",
"'Count'",
",",
"'reference_label'",
")",
",",
"'max_votes'",
":",
"_tc",
".",
"aggregate",
".",
"MAX",
"(",
"'Count'",
")",
",",
"'total_votes'",
":",
"_tc",
".",
"aggregate",
".",
"SUM",
"(",
"'Count'",
")",
"}",
")",
"ystar",
"[",
"'probability'",
"]",
"=",
"ystar",
"[",
"'max_votes'",
"]",
"/",
"ystar",
"[",
"'total_votes'",
"]",
"## Fill in 'None' for query points that don't have any near neighbors.",
"row_ids",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"'query_label'",
":",
"range",
"(",
"n_query",
")",
"}",
")",
"ystar",
"=",
"ystar",
".",
"join",
"(",
"row_ids",
",",
"how",
"=",
"'right'",
")",
"## Sort by row number (because row number is not returned) and return",
"ystar",
"=",
"ystar",
".",
"sort",
"(",
"'query_label'",
",",
"ascending",
"=",
"True",
")",
"ystar",
"=",
"ystar",
"[",
"[",
"'class'",
",",
"'probability'",
"]",
"]",
"return",
"ystar"
] | Return the predicted class for each observation in *dataset*. This
prediction is made based on the closest neighbors stored in the nearest
neighbors classifier model.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
verbose : bool, optional
If True, print progress updates.
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
Returns
-------
out : SFrame
An SFrame with model predictions. The first column is the most
likely class according to the model, and the second column is the
predicted probability for that class.
See Also
--------
create, predict, predict_topk
Notes
-----
- If the 'radius' parameter is small, it is possible that a query point
has no qualified neighbors in the training dataset. In this case, the
resulting class and probability for that query are 'None' in the
SFrame output by this method. If the target column in the training
dataset has missing values, these predictions will be ambiguous.
- Ties between predicted classes are broken randomly.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> sf_new = turicreate.SFrame({'height': [26, 19],
... 'weight': [25, 35]})
...
>>> m = turicreate.nearest_neighbor_classifier.create(sf, target='species')
>>> ystar = m.classify(sf_new, max_neighbors=2)
>>> print ystar
+-------+-------------+
| class | probability |
+-------+-------------+
| dog | 1.0 |
| fossa | 0.5 |
+-------+-------------+ | [
"Return",
"the",
"predicted",
"class",
"for",
"each",
"observation",
"in",
"*",
"dataset",
"*",
".",
"This",
"prediction",
"is",
"made",
"based",
"on",
"the",
"closest",
"neighbors",
"stored",
"in",
"the",
"nearest",
"neighbors",
"classifier",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py#L421-L533 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py | NearestNeighborClassifier.predict | def predict(self, dataset, max_neighbors=10, radius=None,
output_type='class', verbose=True):
"""
Return predicted class labels for instances in *dataset*. This model
makes predictions based on the closest neighbors stored in the nearest
neighbors classifier model.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include the features used for
model training, but does not require a target column. Additional
columns are ignored.
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
output_type : {'class', 'probability'}, optional
Type of prediction output:
- `class`: Predicted class label. The class with the maximum number
of votes among the nearest neighbors in the reference dataset.
- `probability`: Maximum number of votes for any class out of all
nearest neighbors in the reference dataset.
Returns
-------
out : SArray
An SArray with model predictions.
See Also
----------
create, classify, predict_topk
Notes
-----
- If the 'radius' parameter is small, it is possible that a query point
has no qualified neighbors in the training dataset. In this case, the
result for that query is 'None' in the SArray output by this method.
If the target column in the training dataset has missing values, these
predictions will be ambiguous.
- Ties between predicted classes are broken randomly.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> sf_new = turicreate.SFrame({'height': [26, 19],
... 'weight': [25, 35]})
...
>>> m = turicreate.nearest_neighbor_classifier.create(sf, target='species')
>>> ystar = m.predict(sf_new, max_neighbors=2, output_type='class')
>>> print ystar
['dog', 'fossa']
"""
ystar = self.classify(dataset=dataset, max_neighbors=max_neighbors,
radius=radius, verbose=verbose)
if output_type == 'class':
return ystar['class']
elif output_type == 'probability':
return ystar['probability']
else:
raise ValueError("Input 'output_type' not understood. 'output_type' "
"must be either 'class' or 'probability'.") | python | def predict(self, dataset, max_neighbors=10, radius=None,
output_type='class', verbose=True):
"""
Return predicted class labels for instances in *dataset*. This model
makes predictions based on the closest neighbors stored in the nearest
neighbors classifier model.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include the features used for
model training, but does not require a target column. Additional
columns are ignored.
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
output_type : {'class', 'probability'}, optional
Type of prediction output:
- `class`: Predicted class label. The class with the maximum number
of votes among the nearest neighbors in the reference dataset.
- `probability`: Maximum number of votes for any class out of all
nearest neighbors in the reference dataset.
Returns
-------
out : SArray
An SArray with model predictions.
See Also
----------
create, classify, predict_topk
Notes
-----
- If the 'radius' parameter is small, it is possible that a query point
has no qualified neighbors in the training dataset. In this case, the
result for that query is 'None' in the SArray output by this method.
If the target column in the training dataset has missing values, these
predictions will be ambiguous.
- Ties between predicted classes are broken randomly.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> sf_new = turicreate.SFrame({'height': [26, 19],
... 'weight': [25, 35]})
...
>>> m = turicreate.nearest_neighbor_classifier.create(sf, target='species')
>>> ystar = m.predict(sf_new, max_neighbors=2, output_type='class')
>>> print ystar
['dog', 'fossa']
"""
ystar = self.classify(dataset=dataset, max_neighbors=max_neighbors,
radius=radius, verbose=verbose)
if output_type == 'class':
return ystar['class']
elif output_type == 'probability':
return ystar['probability']
else:
raise ValueError("Input 'output_type' not understood. 'output_type' "
"must be either 'class' or 'probability'.") | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"max_neighbors",
"=",
"10",
",",
"radius",
"=",
"None",
",",
"output_type",
"=",
"'class'",
",",
"verbose",
"=",
"True",
")",
":",
"ystar",
"=",
"self",
".",
"classify",
"(",
"dataset",
"=",
"dataset",
",",
"max_neighbors",
"=",
"max_neighbors",
",",
"radius",
"=",
"radius",
",",
"verbose",
"=",
"verbose",
")",
"if",
"output_type",
"==",
"'class'",
":",
"return",
"ystar",
"[",
"'class'",
"]",
"elif",
"output_type",
"==",
"'probability'",
":",
"return",
"ystar",
"[",
"'probability'",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Input 'output_type' not understood. 'output_type' \"",
"\"must be either 'class' or 'probability'.\"",
")"
] | Return predicted class labels for instances in *dataset*. This model
makes predictions based on the closest neighbors stored in the nearest
neighbors classifier model.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include the features used for
model training, but does not require a target column. Additional
columns are ignored.
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
output_type : {'class', 'probability'}, optional
Type of prediction output:
- `class`: Predicted class label. The class with the maximum number
of votes among the nearest neighbors in the reference dataset.
- `probability`: Maximum number of votes for any class out of all
nearest neighbors in the reference dataset.
Returns
-------
out : SArray
An SArray with model predictions.
See Also
----------
create, classify, predict_topk
Notes
-----
- If the 'radius' parameter is small, it is possible that a query point
has no qualified neighbors in the training dataset. In this case, the
result for that query is 'None' in the SArray output by this method.
If the target column in the training dataset has missing values, these
predictions will be ambiguous.
- Ties between predicted classes are broken randomly.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> sf_new = turicreate.SFrame({'height': [26, 19],
... 'weight': [25, 35]})
...
>>> m = turicreate.nearest_neighbor_classifier.create(sf, target='species')
>>> ystar = m.predict(sf_new, max_neighbors=2, output_type='class')
>>> print ystar
['dog', 'fossa'] | [
"Return",
"predicted",
"class",
"labels",
"for",
"instances",
"in",
"*",
"dataset",
"*",
".",
"This",
"model",
"makes",
"predictions",
"based",
"on",
"the",
"closest",
"neighbors",
"stored",
"in",
"the",
"nearest",
"neighbors",
"classifier",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py#L535-L610 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py | NearestNeighborClassifier.predict_topk | def predict_topk(self, dataset, max_neighbors=10, radius=None, k=3,
verbose=False):
"""
Return top-k most likely predictions for each observation in
``dataset``. Predictions are returned as an SFrame with three columns:
`row_id`, `class`, and `probability`.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include the features used for
model training, but does not require a target column. Additional
columns are ignored.
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
k : int, optional
Number of classes to return for each input example.
Returns
-------
out : SFrame
See Also
----------
create, classify, predict
Notes
-----
- If the 'radius' parameter is small, it is possible that a query point
has no neighbors in the training dataset. In this case, the query is
dropped from the SFrame output by this method. If all queries have no
neighbors, then the result is an empty SFrame. If the target column in
the training dataset has missing values, these predictions will be
ambiguous.
- Ties between predicted classes are broken randomly.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> sf_new = turicreate.SFrame({'height': [26, 19],
... 'weight': [25, 35]})
...
>>> m = turicreate.nearest_neighbor_classifier.create(sf_train, target='species')
>>> ystar = m.predict_topk(sf_new, max_neighbors=2)
>>> print ystar
+--------+-------+-------------+
| row_id | class | probability |
+--------+-------+-------------+
| 0 | dog | 1.0 |
| 1 | fossa | 0.5 |
| 1 | dog | 0.5 |
+--------+-------+-------------+
"""
## Validate the number of results to return. Note that the
# 'max_neighbors' and 'radius' parameters are validated by the nearest
# neighbor model's query method.
if not isinstance(k, int) or k < 1:
raise TypeError("The number of results to return for each point, " +
"'k', must be an integer greater than 0.")
## Validate the query dataset.
_raise_error_if_not_sframe(dataset, "dataset")
_raise_error_if_sframe_empty(dataset, "dataset")
## Validate neighborhood parameters 'max_neighbors'.
# - NOTE: when the parameter name is changed in nearest neighbors, the
# query call will do this itself, and this block can be removed.
if max_neighbors is not None:
if not isinstance(max_neighbors, int):
raise ValueError("Input 'max_neighbors' must be an integer.")
if max_neighbors <= 0:
raise ValueError("Input 'max_neighbors' must be larger than 0.")
## Find the nearest neighbors for each query and count the number of
# votes for each class.
knn = self._knn_model.query(dataset, k=max_neighbors, radius=radius,
verbose=verbose)
## If there are *no* results for *any* query make an empty SFrame.
if knn.num_rows() == 0:
ystar = _tc.SFrame({'row_id': [], 'class': [], 'probability': []})
ystar['row_id'] = ystar['row_id'].astype(int)
ystar['class'] = ystar['class'].astype(str)
else:
## Find the classes with the top-k vote totals
grp = knn.groupby(['query_label', 'reference_label'],
_tc.aggregate.COUNT)
ystar = grp.unstack(column_names=['reference_label', 'Count'],
new_column_name='votes')
ystar['topk'] = ystar['votes'].apply(
lambda x: _sort_topk_votes(x, k))
ystar['total_votes'] = ystar['votes'].apply(
lambda x: sum(x.values()))
## Re-stack, unpack, and rename the results
ystar = ystar.stack('topk', new_column_name='topk')
ystar = ystar.unpack('topk')
ystar.rename({'topk.class': 'class', 'query_label': 'row_id'}, inplace=True)
ystar['probability'] = ystar['topk.votes'] / ystar['total_votes']
ystar = ystar[['row_id', 'class', 'probability']]
return ystar | python | def predict_topk(self, dataset, max_neighbors=10, radius=None, k=3,
verbose=False):
"""
Return top-k most likely predictions for each observation in
``dataset``. Predictions are returned as an SFrame with three columns:
`row_id`, `class`, and `probability`.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include the features used for
model training, but does not require a target column. Additional
columns are ignored.
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
k : int, optional
Number of classes to return for each input example.
Returns
-------
out : SFrame
See Also
----------
create, classify, predict
Notes
-----
- If the 'radius' parameter is small, it is possible that a query point
has no neighbors in the training dataset. In this case, the query is
dropped from the SFrame output by this method. If all queries have no
neighbors, then the result is an empty SFrame. If the target column in
the training dataset has missing values, these predictions will be
ambiguous.
- Ties between predicted classes are broken randomly.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> sf_new = turicreate.SFrame({'height': [26, 19],
... 'weight': [25, 35]})
...
>>> m = turicreate.nearest_neighbor_classifier.create(sf_train, target='species')
>>> ystar = m.predict_topk(sf_new, max_neighbors=2)
>>> print ystar
+--------+-------+-------------+
| row_id | class | probability |
+--------+-------+-------------+
| 0 | dog | 1.0 |
| 1 | fossa | 0.5 |
| 1 | dog | 0.5 |
+--------+-------+-------------+
"""
## Validate the number of results to return. Note that the
# 'max_neighbors' and 'radius' parameters are validated by the nearest
# neighbor model's query method.
if not isinstance(k, int) or k < 1:
raise TypeError("The number of results to return for each point, " +
"'k', must be an integer greater than 0.")
## Validate the query dataset.
_raise_error_if_not_sframe(dataset, "dataset")
_raise_error_if_sframe_empty(dataset, "dataset")
## Validate neighborhood parameters 'max_neighbors'.
# - NOTE: when the parameter name is changed in nearest neighbors, the
# query call will do this itself, and this block can be removed.
if max_neighbors is not None:
if not isinstance(max_neighbors, int):
raise ValueError("Input 'max_neighbors' must be an integer.")
if max_neighbors <= 0:
raise ValueError("Input 'max_neighbors' must be larger than 0.")
## Find the nearest neighbors for each query and count the number of
# votes for each class.
knn = self._knn_model.query(dataset, k=max_neighbors, radius=radius,
verbose=verbose)
## If there are *no* results for *any* query make an empty SFrame.
if knn.num_rows() == 0:
ystar = _tc.SFrame({'row_id': [], 'class': [], 'probability': []})
ystar['row_id'] = ystar['row_id'].astype(int)
ystar['class'] = ystar['class'].astype(str)
else:
## Find the classes with the top-k vote totals
grp = knn.groupby(['query_label', 'reference_label'],
_tc.aggregate.COUNT)
ystar = grp.unstack(column_names=['reference_label', 'Count'],
new_column_name='votes')
ystar['topk'] = ystar['votes'].apply(
lambda x: _sort_topk_votes(x, k))
ystar['total_votes'] = ystar['votes'].apply(
lambda x: sum(x.values()))
## Re-stack, unpack, and rename the results
ystar = ystar.stack('topk', new_column_name='topk')
ystar = ystar.unpack('topk')
ystar.rename({'topk.class': 'class', 'query_label': 'row_id'}, inplace=True)
ystar['probability'] = ystar['topk.votes'] / ystar['total_votes']
ystar = ystar[['row_id', 'class', 'probability']]
return ystar | [
"def",
"predict_topk",
"(",
"self",
",",
"dataset",
",",
"max_neighbors",
"=",
"10",
",",
"radius",
"=",
"None",
",",
"k",
"=",
"3",
",",
"verbose",
"=",
"False",
")",
":",
"## Validate the number of results to return. Note that the",
"# 'max_neighbors' and 'radius' parameters are validated by the nearest",
"# neighbor model's query method.",
"if",
"not",
"isinstance",
"(",
"k",
",",
"int",
")",
"or",
"k",
"<",
"1",
":",
"raise",
"TypeError",
"(",
"\"The number of results to return for each point, \"",
"+",
"\"'k', must be an integer greater than 0.\"",
")",
"## Validate the query dataset.",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"_raise_error_if_sframe_empty",
"(",
"dataset",
",",
"\"dataset\"",
")",
"## Validate neighborhood parameters 'max_neighbors'.",
"# - NOTE: when the parameter name is changed in nearest neighbors, the",
"# query call will do this itself, and this block can be removed.",
"if",
"max_neighbors",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"max_neighbors",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"\"Input 'max_neighbors' must be an integer.\"",
")",
"if",
"max_neighbors",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Input 'max_neighbors' must be larger than 0.\"",
")",
"## Find the nearest neighbors for each query and count the number of",
"# votes for each class.",
"knn",
"=",
"self",
".",
"_knn_model",
".",
"query",
"(",
"dataset",
",",
"k",
"=",
"max_neighbors",
",",
"radius",
"=",
"radius",
",",
"verbose",
"=",
"verbose",
")",
"## If there are *no* results for *any* query make an empty SFrame.",
"if",
"knn",
".",
"num_rows",
"(",
")",
"==",
"0",
":",
"ystar",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"'row_id'",
":",
"[",
"]",
",",
"'class'",
":",
"[",
"]",
",",
"'probability'",
":",
"[",
"]",
"}",
")",
"ystar",
"[",
"'row_id'",
"]",
"=",
"ystar",
"[",
"'row_id'",
"]",
".",
"astype",
"(",
"int",
")",
"ystar",
"[",
"'class'",
"]",
"=",
"ystar",
"[",
"'class'",
"]",
".",
"astype",
"(",
"str",
")",
"else",
":",
"## Find the classes with the top-k vote totals",
"grp",
"=",
"knn",
".",
"groupby",
"(",
"[",
"'query_label'",
",",
"'reference_label'",
"]",
",",
"_tc",
".",
"aggregate",
".",
"COUNT",
")",
"ystar",
"=",
"grp",
".",
"unstack",
"(",
"column_names",
"=",
"[",
"'reference_label'",
",",
"'Count'",
"]",
",",
"new_column_name",
"=",
"'votes'",
")",
"ystar",
"[",
"'topk'",
"]",
"=",
"ystar",
"[",
"'votes'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"_sort_topk_votes",
"(",
"x",
",",
"k",
")",
")",
"ystar",
"[",
"'total_votes'",
"]",
"=",
"ystar",
"[",
"'votes'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"sum",
"(",
"x",
".",
"values",
"(",
")",
")",
")",
"## Re-stack, unpack, and rename the results",
"ystar",
"=",
"ystar",
".",
"stack",
"(",
"'topk'",
",",
"new_column_name",
"=",
"'topk'",
")",
"ystar",
"=",
"ystar",
".",
"unpack",
"(",
"'topk'",
")",
"ystar",
".",
"rename",
"(",
"{",
"'topk.class'",
":",
"'class'",
",",
"'query_label'",
":",
"'row_id'",
"}",
",",
"inplace",
"=",
"True",
")",
"ystar",
"[",
"'probability'",
"]",
"=",
"ystar",
"[",
"'topk.votes'",
"]",
"/",
"ystar",
"[",
"'total_votes'",
"]",
"ystar",
"=",
"ystar",
"[",
"[",
"'row_id'",
",",
"'class'",
",",
"'probability'",
"]",
"]",
"return",
"ystar"
] | Return top-k most likely predictions for each observation in
``dataset``. Predictions are returned as an SFrame with three columns:
`row_id`, `class`, and `probability`.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include the features used for
model training, but does not require a target column. Additional
columns are ignored.
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
k : int, optional
Number of classes to return for each input example.
Returns
-------
out : SFrame
See Also
----------
create, classify, predict
Notes
-----
- If the 'radius' parameter is small, it is possible that a query point
has no neighbors in the training dataset. In this case, the query is
dropped from the SFrame output by this method. If all queries have no
neighbors, then the result is an empty SFrame. If the target column in
the training dataset has missing values, these predictions will be
ambiguous.
- Ties between predicted classes are broken randomly.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
...
>>> sf_new = turicreate.SFrame({'height': [26, 19],
... 'weight': [25, 35]})
...
>>> m = turicreate.nearest_neighbor_classifier.create(sf_train, target='species')
>>> ystar = m.predict_topk(sf_new, max_neighbors=2)
>>> print ystar
+--------+-------+-------------+
| row_id | class | probability |
+--------+-------+-------------+
| 0 | dog | 1.0 |
| 1 | fossa | 0.5 |
| 1 | dog | 0.5 |
+--------+-------+-------------+ | [
"Return",
"top",
"-",
"k",
"most",
"likely",
"predictions",
"for",
"each",
"observation",
"in",
"dataset",
".",
"Predictions",
"are",
"returned",
"as",
"an",
"SFrame",
"with",
"three",
"columns",
":",
"row_id",
"class",
"and",
"probability",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py#L612-L731 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py | NearestNeighborClassifier.evaluate | def evaluate(self, dataset, metric='auto', max_neighbors=10, radius=None):
"""
Evaluate the model's predictive accuracy. This is done by predicting the
target class for instances in a new dataset and comparing to known
target values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto': Returns all available metrics.
- 'accuracy': Classification accuracy.
- 'confusion_matrix': An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve': An SFrame containing information needed for an roc
curve (binary classification only).
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
Returns
-------
out : dict
Evaluation results. The dictionary keys are *accuracy* and
*confusion_matrix* and *roc_curve* (if applicable).
See also
--------
create, predict, predict_topk, classify
Notes
-----
- Because the model randomly breaks ties between predicted classes, the
results of repeated calls to `evaluate` method may differ.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
>>> m = turicreate.nearest_neighbor_classifier.create(sf, target='species')
>>> ans = m.evaluate(sf_train, max_neighbors=2,
... metric='confusion_matrix')
>>> print ans['confusion_matrix']
+--------------+-----------------+-------+
| target_label | predicted_label | count |
+--------------+-----------------+-------+
| cat | dog | 1 |
| dog | dog | 2 |
| fossa | dog | 1 |
+--------------+-----------------+-------+
"""
## Validate the metric name
_raise_error_evaluation_metric_is_valid(metric,
['auto', 'accuracy', 'confusion_matrix', 'roc_curve'])
## Make sure the input dataset has a target column with an appropriate
# type.
target = self.target
_raise_error_if_column_exists(dataset, target, 'dataset', target)
if not dataset[target].dtype == str and not dataset[target].dtype == int:
raise TypeError("The target column of the evaluation dataset must "
"contain integers or strings.")
if self.num_classes != 2:
if (metric == 'roc_curve') or (metric == ['roc_curve']):
err_msg = "Currently, ROC curve is not supported for "
err_msg += "multi-class classification in this model."
raise _ToolkitError(err_msg)
else:
warn_msg = "WARNING: Ignoring `roc_curve`. "
warn_msg += "Not supported for multi-class classification."
print(warn_msg)
## Compute predictions with the input dataset.
ystar = self.predict(dataset, output_type='class',
max_neighbors=max_neighbors, radius=radius)
ystar_prob = self.predict(dataset, output_type='probability',
max_neighbors=max_neighbors, radius=radius)
## Compile accuracy metrics
results = {}
if metric in ['accuracy', 'auto']:
results['accuracy'] = _evaluation.accuracy(targets=dataset[target],
predictions=ystar)
if metric in ['confusion_matrix', 'auto']:
results['confusion_matrix'] = \
_evaluation.confusion_matrix(targets=dataset[target],
predictions=ystar)
if self.num_classes == 2:
if metric in ['roc_curve', 'auto']:
results['roc_curve'] = \
_evaluation.roc_curve(targets=dataset[target],
predictions=ystar_prob)
return results | python | def evaluate(self, dataset, metric='auto', max_neighbors=10, radius=None):
"""
Evaluate the model's predictive accuracy. This is done by predicting the
target class for instances in a new dataset and comparing to known
target values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto': Returns all available metrics.
- 'accuracy': Classification accuracy.
- 'confusion_matrix': An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve': An SFrame containing information needed for an roc
curve (binary classification only).
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
Returns
-------
out : dict
Evaluation results. The dictionary keys are *accuracy* and
*confusion_matrix* and *roc_curve* (if applicable).
See also
--------
create, predict, predict_topk, classify
Notes
-----
- Because the model randomly breaks ties between predicted classes, the
results of repeated calls to `evaluate` method may differ.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
>>> m = turicreate.nearest_neighbor_classifier.create(sf, target='species')
>>> ans = m.evaluate(sf_train, max_neighbors=2,
... metric='confusion_matrix')
>>> print ans['confusion_matrix']
+--------------+-----------------+-------+
| target_label | predicted_label | count |
+--------------+-----------------+-------+
| cat | dog | 1 |
| dog | dog | 2 |
| fossa | dog | 1 |
+--------------+-----------------+-------+
"""
## Validate the metric name
_raise_error_evaluation_metric_is_valid(metric,
['auto', 'accuracy', 'confusion_matrix', 'roc_curve'])
## Make sure the input dataset has a target column with an appropriate
# type.
target = self.target
_raise_error_if_column_exists(dataset, target, 'dataset', target)
if not dataset[target].dtype == str and not dataset[target].dtype == int:
raise TypeError("The target column of the evaluation dataset must "
"contain integers or strings.")
if self.num_classes != 2:
if (metric == 'roc_curve') or (metric == ['roc_curve']):
err_msg = "Currently, ROC curve is not supported for "
err_msg += "multi-class classification in this model."
raise _ToolkitError(err_msg)
else:
warn_msg = "WARNING: Ignoring `roc_curve`. "
warn_msg += "Not supported for multi-class classification."
print(warn_msg)
## Compute predictions with the input dataset.
ystar = self.predict(dataset, output_type='class',
max_neighbors=max_neighbors, radius=radius)
ystar_prob = self.predict(dataset, output_type='probability',
max_neighbors=max_neighbors, radius=radius)
## Compile accuracy metrics
results = {}
if metric in ['accuracy', 'auto']:
results['accuracy'] = _evaluation.accuracy(targets=dataset[target],
predictions=ystar)
if metric in ['confusion_matrix', 'auto']:
results['confusion_matrix'] = \
_evaluation.confusion_matrix(targets=dataset[target],
predictions=ystar)
if self.num_classes == 2:
if metric in ['roc_curve', 'auto']:
results['roc_curve'] = \
_evaluation.roc_curve(targets=dataset[target],
predictions=ystar_prob)
return results | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
",",
"max_neighbors",
"=",
"10",
",",
"radius",
"=",
"None",
")",
":",
"## Validate the metric name",
"_raise_error_evaluation_metric_is_valid",
"(",
"metric",
",",
"[",
"'auto'",
",",
"'accuracy'",
",",
"'confusion_matrix'",
",",
"'roc_curve'",
"]",
")",
"## Make sure the input dataset has a target column with an appropriate",
"# type.",
"target",
"=",
"self",
".",
"target",
"_raise_error_if_column_exists",
"(",
"dataset",
",",
"target",
",",
"'dataset'",
",",
"target",
")",
"if",
"not",
"dataset",
"[",
"target",
"]",
".",
"dtype",
"==",
"str",
"and",
"not",
"dataset",
"[",
"target",
"]",
".",
"dtype",
"==",
"int",
":",
"raise",
"TypeError",
"(",
"\"The target column of the evaluation dataset must \"",
"\"contain integers or strings.\"",
")",
"if",
"self",
".",
"num_classes",
"!=",
"2",
":",
"if",
"(",
"metric",
"==",
"'roc_curve'",
")",
"or",
"(",
"metric",
"==",
"[",
"'roc_curve'",
"]",
")",
":",
"err_msg",
"=",
"\"Currently, ROC curve is not supported for \"",
"err_msg",
"+=",
"\"multi-class classification in this model.\"",
"raise",
"_ToolkitError",
"(",
"err_msg",
")",
"else",
":",
"warn_msg",
"=",
"\"WARNING: Ignoring `roc_curve`. \"",
"warn_msg",
"+=",
"\"Not supported for multi-class classification.\"",
"print",
"(",
"warn_msg",
")",
"## Compute predictions with the input dataset.",
"ystar",
"=",
"self",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"'class'",
",",
"max_neighbors",
"=",
"max_neighbors",
",",
"radius",
"=",
"radius",
")",
"ystar_prob",
"=",
"self",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"'probability'",
",",
"max_neighbors",
"=",
"max_neighbors",
",",
"radius",
"=",
"radius",
")",
"## Compile accuracy metrics",
"results",
"=",
"{",
"}",
"if",
"metric",
"in",
"[",
"'accuracy'",
",",
"'auto'",
"]",
":",
"results",
"[",
"'accuracy'",
"]",
"=",
"_evaluation",
".",
"accuracy",
"(",
"targets",
"=",
"dataset",
"[",
"target",
"]",
",",
"predictions",
"=",
"ystar",
")",
"if",
"metric",
"in",
"[",
"'confusion_matrix'",
",",
"'auto'",
"]",
":",
"results",
"[",
"'confusion_matrix'",
"]",
"=",
"_evaluation",
".",
"confusion_matrix",
"(",
"targets",
"=",
"dataset",
"[",
"target",
"]",
",",
"predictions",
"=",
"ystar",
")",
"if",
"self",
".",
"num_classes",
"==",
"2",
":",
"if",
"metric",
"in",
"[",
"'roc_curve'",
",",
"'auto'",
"]",
":",
"results",
"[",
"'roc_curve'",
"]",
"=",
"_evaluation",
".",
"roc_curve",
"(",
"targets",
"=",
"dataset",
"[",
"target",
"]",
",",
"predictions",
"=",
"ystar_prob",
")",
"return",
"results"
] | Evaluate the model's predictive accuracy. This is done by predicting the
target class for instances in a new dataset and comparing to known
target values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto': Returns all available metrics.
- 'accuracy': Classification accuracy.
- 'confusion_matrix': An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve': An SFrame containing information needed for an roc
curve (binary classification only).
max_neighbors : int, optional
Maximum number of neighbors to consider for each point.
radius : float, optional
Maximum distance from each point to a neighbor in the reference
dataset.
Returns
-------
out : dict
Evaluation results. The dictionary keys are *accuracy* and
*confusion_matrix* and *roc_curve* (if applicable).
See also
--------
create, predict, predict_topk, classify
Notes
-----
- Because the model randomly breaks ties between predicted classes, the
results of repeated calls to `evaluate` method may differ.
Examples
--------
>>> sf_train = turicreate.SFrame({'species': ['cat', 'dog', 'fossa', 'dog'],
... 'height': [9, 25, 20, 23],
... 'weight': [13, 28, 33, 22]})
>>> m = turicreate.nearest_neighbor_classifier.create(sf, target='species')
>>> ans = m.evaluate(sf_train, max_neighbors=2,
... metric='confusion_matrix')
>>> print ans['confusion_matrix']
+--------------+-----------------+-------+
| target_label | predicted_label | count |
+--------------+-----------------+-------+
| cat | dog | 1 |
| dog | dog | 2 |
| fossa | dog | 1 |
+--------------+-----------------+-------+ | [
"Evaluate",
"the",
"model",
"s",
"predictive",
"accuracy",
".",
"This",
"is",
"done",
"by",
"predicting",
"the",
"target",
"class",
"for",
"instances",
"in",
"a",
"new",
"dataset",
"and",
"comparing",
"to",
"known",
"target",
"values",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py#L734-L847 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py | TransformerChain._compact_class_repr | def _compact_class_repr(obj):
""" A compact version of __repr__ for each of the steps.
"""
dict_str_list = []
post_repr_string = ""
# If features are present, then shorten it.
init_func = obj.__init__
if _sys.version_info.major == 2:
init_func = init_func.__func__
fields = _inspect.getargspec(init_func).args
fields = fields[1:] # remove self
if 'features' in fields:
fields.remove('features')
features = obj.get("features")
if features is not None:
post_repr_string = ' on %s feature(s)' % len(features)
if 'excluded_features' in fields:
fields.remove('excluded_features')
# GLC transformers.
if issubclass(obj.__class__, _Transformer):
for attr in fields:
dict_str_list.append("%s=%s" % (attr, obj.get(attr).__repr__()))
# Chains
elif obj.__class__ == TransformerChain:
_step_classes = list(map(lambda x: x.__class__.__name__, obj.get('steps')))
_steps = _internal_utils.pretty_print_list(
_step_classes, 'steps', False)
dict_str_list.append(_steps)
# For user defined transformers.
else:
for attr in fields:
dict_str_list.append("%s=%s" % (attr, obj.__dict__[attr]))
return "%s(%s)%s" % (obj.__class__.__name__, ", ".join(dict_str_list),
post_repr_string) | python | def _compact_class_repr(obj):
""" A compact version of __repr__ for each of the steps.
"""
dict_str_list = []
post_repr_string = ""
# If features are present, then shorten it.
init_func = obj.__init__
if _sys.version_info.major == 2:
init_func = init_func.__func__
fields = _inspect.getargspec(init_func).args
fields = fields[1:] # remove self
if 'features' in fields:
fields.remove('features')
features = obj.get("features")
if features is not None:
post_repr_string = ' on %s feature(s)' % len(features)
if 'excluded_features' in fields:
fields.remove('excluded_features')
# GLC transformers.
if issubclass(obj.__class__, _Transformer):
for attr in fields:
dict_str_list.append("%s=%s" % (attr, obj.get(attr).__repr__()))
# Chains
elif obj.__class__ == TransformerChain:
_step_classes = list(map(lambda x: x.__class__.__name__, obj.get('steps')))
_steps = _internal_utils.pretty_print_list(
_step_classes, 'steps', False)
dict_str_list.append(_steps)
# For user defined transformers.
else:
for attr in fields:
dict_str_list.append("%s=%s" % (attr, obj.__dict__[attr]))
return "%s(%s)%s" % (obj.__class__.__name__, ", ".join(dict_str_list),
post_repr_string) | [
"def",
"_compact_class_repr",
"(",
"obj",
")",
":",
"dict_str_list",
"=",
"[",
"]",
"post_repr_string",
"=",
"\"\"",
"# If features are present, then shorten it.",
"init_func",
"=",
"obj",
".",
"__init__",
"if",
"_sys",
".",
"version_info",
".",
"major",
"==",
"2",
":",
"init_func",
"=",
"init_func",
".",
"__func__",
"fields",
"=",
"_inspect",
".",
"getargspec",
"(",
"init_func",
")",
".",
"args",
"fields",
"=",
"fields",
"[",
"1",
":",
"]",
"# remove self",
"if",
"'features'",
"in",
"fields",
":",
"fields",
".",
"remove",
"(",
"'features'",
")",
"features",
"=",
"obj",
".",
"get",
"(",
"\"features\"",
")",
"if",
"features",
"is",
"not",
"None",
":",
"post_repr_string",
"=",
"' on %s feature(s)'",
"%",
"len",
"(",
"features",
")",
"if",
"'excluded_features'",
"in",
"fields",
":",
"fields",
".",
"remove",
"(",
"'excluded_features'",
")",
"# GLC transformers.",
"if",
"issubclass",
"(",
"obj",
".",
"__class__",
",",
"_Transformer",
")",
":",
"for",
"attr",
"in",
"fields",
":",
"dict_str_list",
".",
"append",
"(",
"\"%s=%s\"",
"%",
"(",
"attr",
",",
"obj",
".",
"get",
"(",
"attr",
")",
".",
"__repr__",
"(",
")",
")",
")",
"# Chains",
"elif",
"obj",
".",
"__class__",
"==",
"TransformerChain",
":",
"_step_classes",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"__class__",
".",
"__name__",
",",
"obj",
".",
"get",
"(",
"'steps'",
")",
")",
")",
"_steps",
"=",
"_internal_utils",
".",
"pretty_print_list",
"(",
"_step_classes",
",",
"'steps'",
",",
"False",
")",
"dict_str_list",
".",
"append",
"(",
"_steps",
")",
"# For user defined transformers.",
"else",
":",
"for",
"attr",
"in",
"fields",
":",
"dict_str_list",
".",
"append",
"(",
"\"%s=%s\"",
"%",
"(",
"attr",
",",
"obj",
".",
"__dict__",
"[",
"attr",
"]",
")",
")",
"return",
"\"%s(%s)%s\"",
"%",
"(",
"obj",
".",
"__class__",
".",
"__name__",
",",
"\", \"",
".",
"join",
"(",
"dict_str_list",
")",
",",
"post_repr_string",
")"
] | A compact version of __repr__ for each of the steps. | [
"A",
"compact",
"version",
"of",
"__repr__",
"for",
"each",
"of",
"the",
"steps",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py#L126-L165 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py | TransformerChain._preprocess | def _preprocess(self, data):
"""
Internal function to perform fit_transform() on all but last step.
"""
transformed_data = _copy(data)
for name, step in self._transformers[:-1]:
transformed_data = step.fit_transform(transformed_data)
if type(transformed_data) != _tc.SFrame:
raise RuntimeError("The transform function in step '%s' did not"
" return an SFrame (got %s instead)." % (name,
type(transformed_data).__name__))
return transformed_data | python | def _preprocess(self, data):
"""
Internal function to perform fit_transform() on all but last step.
"""
transformed_data = _copy(data)
for name, step in self._transformers[:-1]:
transformed_data = step.fit_transform(transformed_data)
if type(transformed_data) != _tc.SFrame:
raise RuntimeError("The transform function in step '%s' did not"
" return an SFrame (got %s instead)." % (name,
type(transformed_data).__name__))
return transformed_data | [
"def",
"_preprocess",
"(",
"self",
",",
"data",
")",
":",
"transformed_data",
"=",
"_copy",
"(",
"data",
")",
"for",
"name",
",",
"step",
"in",
"self",
".",
"_transformers",
"[",
":",
"-",
"1",
"]",
":",
"transformed_data",
"=",
"step",
".",
"fit_transform",
"(",
"transformed_data",
")",
"if",
"type",
"(",
"transformed_data",
")",
"!=",
"_tc",
".",
"SFrame",
":",
"raise",
"RuntimeError",
"(",
"\"The transform function in step '%s' did not\"",
"\" return an SFrame (got %s instead).\"",
"%",
"(",
"name",
",",
"type",
"(",
"transformed_data",
")",
".",
"__name__",
")",
")",
"return",
"transformed_data"
] | Internal function to perform fit_transform() on all but last step. | [
"Internal",
"function",
"to",
"perform",
"fit_transform",
"()",
"on",
"all",
"but",
"last",
"step",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py#L192-L203 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py | TransformerChain.fit | def fit(self, data):
"""
Fits a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted object)
See Also
--------
transform, fit_transform
Examples
--------
.. sourcecode:: python
>> chain = chain.fit(sf)
"""
if not self._transformers:
return
transformed_data = self._preprocess(data)
final_step = self._transformers[-1]
final_step[1].fit(transformed_data) | python | def fit(self, data):
"""
Fits a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted object)
See Also
--------
transform, fit_transform
Examples
--------
.. sourcecode:: python
>> chain = chain.fit(sf)
"""
if not self._transformers:
return
transformed_data = self._preprocess(data)
final_step = self._transformers[-1]
final_step[1].fit(transformed_data) | [
"def",
"fit",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"self",
".",
"_transformers",
":",
"return",
"transformed_data",
"=",
"self",
".",
"_preprocess",
"(",
"data",
")",
"final_step",
"=",
"self",
".",
"_transformers",
"[",
"-",
"1",
"]",
"final_step",
"[",
"1",
"]",
".",
"fit",
"(",
"transformed_data",
")"
] | Fits a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted object)
See Also
--------
transform, fit_transform
Examples
--------
.. sourcecode:: python
>> chain = chain.fit(sf) | [
"Fits",
"a",
"transformer",
"using",
"the",
"SFrame",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py#L205-L233 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py | TransformerChain.fit_transform | def fit_transform(self, data):
"""
First fit a transformer using the SFrame `data` and then return a transformed
version of `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer. The same data is then also
transformed.
Returns
-------
Transformed SFrame.
See Also
--------
transform, fit_transform
Notes
-----
- The default implementation calls fit() and then calls transform().
You may override this function with a more efficient implementation."
Examples
--------
.. sourcecode:: python
>> transformed_sf = chain.fit_transform(sf)
"""
if not self._transformers:
return self._preprocess(data)
transformed_data = self._preprocess(data)
final_step = self._transformers[-1]
return final_step[1].fit_transform(transformed_data) | python | def fit_transform(self, data):
"""
First fit a transformer using the SFrame `data` and then return a transformed
version of `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer. The same data is then also
transformed.
Returns
-------
Transformed SFrame.
See Also
--------
transform, fit_transform
Notes
-----
- The default implementation calls fit() and then calls transform().
You may override this function with a more efficient implementation."
Examples
--------
.. sourcecode:: python
>> transformed_sf = chain.fit_transform(sf)
"""
if not self._transformers:
return self._preprocess(data)
transformed_data = self._preprocess(data)
final_step = self._transformers[-1]
return final_step[1].fit_transform(transformed_data) | [
"def",
"fit_transform",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"self",
".",
"_transformers",
":",
"return",
"self",
".",
"_preprocess",
"(",
"data",
")",
"transformed_data",
"=",
"self",
".",
"_preprocess",
"(",
"data",
")",
"final_step",
"=",
"self",
".",
"_transformers",
"[",
"-",
"1",
"]",
"return",
"final_step",
"[",
"1",
"]",
".",
"fit_transform",
"(",
"transformed_data",
")"
] | First fit a transformer using the SFrame `data` and then return a transformed
version of `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer. The same data is then also
transformed.
Returns
-------
Transformed SFrame.
See Also
--------
transform, fit_transform
Notes
-----
- The default implementation calls fit() and then calls transform().
You may override this function with a more efficient implementation."
Examples
--------
.. sourcecode:: python
>> transformed_sf = chain.fit_transform(sf) | [
"First",
"fit",
"a",
"transformer",
"using",
"the",
"SFrame",
"data",
"and",
"then",
"return",
"a",
"transformed",
"version",
"of",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py#L235-L271 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py | TransformerChain.transform | def transform(self, data):
"""
Transform the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, fit_transform
Examples
--------
.. sourcecode:: python
>> my_tr = turicreate.feature_engineering.create(train_data, MyTransformer())
>> transformed_sf = my_tr.transform(sf)
"""
transformed_data = _copy(data)
for name, step in self._transformers:
transformed_data = step.transform(transformed_data)
if type(transformed_data) != _tc.SFrame:
raise TypeError("The transform function in step '%s' did not return"
" an SFrame." % name)
return transformed_data | python | def transform(self, data):
"""
Transform the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, fit_transform
Examples
--------
.. sourcecode:: python
>> my_tr = turicreate.feature_engineering.create(train_data, MyTransformer())
>> transformed_sf = my_tr.transform(sf)
"""
transformed_data = _copy(data)
for name, step in self._transformers:
transformed_data = step.transform(transformed_data)
if type(transformed_data) != _tc.SFrame:
raise TypeError("The transform function in step '%s' did not return"
" an SFrame." % name)
return transformed_data | [
"def",
"transform",
"(",
"self",
",",
"data",
")",
":",
"transformed_data",
"=",
"_copy",
"(",
"data",
")",
"for",
"name",
",",
"step",
"in",
"self",
".",
"_transformers",
":",
"transformed_data",
"=",
"step",
".",
"transform",
"(",
"transformed_data",
")",
"if",
"type",
"(",
"transformed_data",
")",
"!=",
"_tc",
".",
"SFrame",
":",
"raise",
"TypeError",
"(",
"\"The transform function in step '%s' did not return\"",
"\" an SFrame.\"",
"%",
"name",
")",
"return",
"transformed_data"
] | Transform the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, fit_transform
Examples
--------
.. sourcecode:: python
>> my_tr = turicreate.feature_engineering.create(train_data, MyTransformer())
>> transformed_sf = my_tr.transform(sf) | [
"Transform",
"the",
"SFrame",
"data",
"using",
"a",
"fitted",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py#L273-L308 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py | TransformerChain._load_version | def _load_version(cls, unpickler, version):
"""
An function to load an object with a specific version of the class.
Parameters
----------
pickler : file
A GLUnpickler file handle.
version : int
A version number as maintained by the class writer.
"""
obj = unpickler.load()
return TransformerChain(obj._state["steps"]) | python | def _load_version(cls, unpickler, version):
"""
An function to load an object with a specific version of the class.
Parameters
----------
pickler : file
A GLUnpickler file handle.
version : int
A version number as maintained by the class writer.
"""
obj = unpickler.load()
return TransformerChain(obj._state["steps"]) | [
"def",
"_load_version",
"(",
"cls",
",",
"unpickler",
",",
"version",
")",
":",
"obj",
"=",
"unpickler",
".",
"load",
"(",
")",
"return",
"TransformerChain",
"(",
"obj",
".",
"_state",
"[",
"\"steps\"",
"]",
")"
] | An function to load an object with a specific version of the class.
Parameters
----------
pickler : file
A GLUnpickler file handle.
version : int
A version number as maintained by the class writer. | [
"An",
"function",
"to",
"load",
"an",
"object",
"with",
"a",
"specific",
"version",
"of",
"the",
"class",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py#L348-L361 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/graph_analytics/pagerank.py | create | def create(graph, reset_probability=0.15,
threshold=1e-2,
max_iterations=20,
_single_precision=False,
_distributed='auto',
verbose=True):
"""
Compute the PageRank for each vertex in the graph. Return a model object
with total PageRank as well as the PageRank value for each vertex in the
graph.
Parameters
----------
graph : SGraph
The graph on which to compute the pagerank value.
reset_probability : float, optional
Probability that a random surfer jumps to an arbitrary page.
threshold : float, optional
Threshold for convergence, measured in the L1 norm
(the sum of absolute value) of the delta of each vertex's
pagerank value.
max_iterations : int, optional
The maximum number of iterations to run.
_single_precision : bool, optional
If true, running pagerank in single precision. The resulting
pagerank values may not be accurate for large graph, but
should run faster and use less memory.
_distributed : distributed environment, internal
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : PagerankModel
References
----------
- `Wikipedia - PageRank <http://en.wikipedia.org/wiki/PageRank>`_
- Page, L., et al. (1998) `The PageRank Citation Ranking: Bringing Order to
the Web <http://ilpubs.stanford.edu:8090/422/1/1999-66.pdf>`_.
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.pagerank.PageRankModel` as follows:
>>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz', format='snap')
>>> pr = turicreate.pagerank.create(g)
We can obtain the page rank corresponding to each vertex in the graph ``g``
using:
>>> pr_out = pr['pagerank'] # SFrame
We can add the new pagerank field to the original graph g using:
>>> g.vertices['pagerank'] = pr['graph'].vertices['pagerank']
Note that the task above does not require a join because the vertex
ordering is preserved through ``create()``.
See Also
--------
PagerankModel
"""
from turicreate._cython.cy_server import QuietProgress
if not isinstance(graph, _SGraph):
raise TypeError('graph input must be a SGraph object.')
opts = {'threshold': threshold, 'reset_probability': reset_probability,
'max_iterations': max_iterations,
'single_precision': _single_precision,
'graph': graph.__proxy__}
with QuietProgress(verbose):
params = _tc.extensions._toolkits.graph.pagerank.create(opts)
model = params['model']
return PagerankModel(model) | python | def create(graph, reset_probability=0.15,
threshold=1e-2,
max_iterations=20,
_single_precision=False,
_distributed='auto',
verbose=True):
"""
Compute the PageRank for each vertex in the graph. Return a model object
with total PageRank as well as the PageRank value for each vertex in the
graph.
Parameters
----------
graph : SGraph
The graph on which to compute the pagerank value.
reset_probability : float, optional
Probability that a random surfer jumps to an arbitrary page.
threshold : float, optional
Threshold for convergence, measured in the L1 norm
(the sum of absolute value) of the delta of each vertex's
pagerank value.
max_iterations : int, optional
The maximum number of iterations to run.
_single_precision : bool, optional
If true, running pagerank in single precision. The resulting
pagerank values may not be accurate for large graph, but
should run faster and use less memory.
_distributed : distributed environment, internal
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : PagerankModel
References
----------
- `Wikipedia - PageRank <http://en.wikipedia.org/wiki/PageRank>`_
- Page, L., et al. (1998) `The PageRank Citation Ranking: Bringing Order to
the Web <http://ilpubs.stanford.edu:8090/422/1/1999-66.pdf>`_.
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.pagerank.PageRankModel` as follows:
>>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz', format='snap')
>>> pr = turicreate.pagerank.create(g)
We can obtain the page rank corresponding to each vertex in the graph ``g``
using:
>>> pr_out = pr['pagerank'] # SFrame
We can add the new pagerank field to the original graph g using:
>>> g.vertices['pagerank'] = pr['graph'].vertices['pagerank']
Note that the task above does not require a join because the vertex
ordering is preserved through ``create()``.
See Also
--------
PagerankModel
"""
from turicreate._cython.cy_server import QuietProgress
if not isinstance(graph, _SGraph):
raise TypeError('graph input must be a SGraph object.')
opts = {'threshold': threshold, 'reset_probability': reset_probability,
'max_iterations': max_iterations,
'single_precision': _single_precision,
'graph': graph.__proxy__}
with QuietProgress(verbose):
params = _tc.extensions._toolkits.graph.pagerank.create(opts)
model = params['model']
return PagerankModel(model) | [
"def",
"create",
"(",
"graph",
",",
"reset_probability",
"=",
"0.15",
",",
"threshold",
"=",
"1e-2",
",",
"max_iterations",
"=",
"20",
",",
"_single_precision",
"=",
"False",
",",
"_distributed",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
")",
":",
"from",
"turicreate",
".",
"_cython",
".",
"cy_server",
"import",
"QuietProgress",
"if",
"not",
"isinstance",
"(",
"graph",
",",
"_SGraph",
")",
":",
"raise",
"TypeError",
"(",
"'graph input must be a SGraph object.'",
")",
"opts",
"=",
"{",
"'threshold'",
":",
"threshold",
",",
"'reset_probability'",
":",
"reset_probability",
",",
"'max_iterations'",
":",
"max_iterations",
",",
"'single_precision'",
":",
"_single_precision",
",",
"'graph'",
":",
"graph",
".",
"__proxy__",
"}",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"params",
"=",
"_tc",
".",
"extensions",
".",
"_toolkits",
".",
"graph",
".",
"pagerank",
".",
"create",
"(",
"opts",
")",
"model",
"=",
"params",
"[",
"'model'",
"]",
"return",
"PagerankModel",
"(",
"model",
")"
] | Compute the PageRank for each vertex in the graph. Return a model object
with total PageRank as well as the PageRank value for each vertex in the
graph.
Parameters
----------
graph : SGraph
The graph on which to compute the pagerank value.
reset_probability : float, optional
Probability that a random surfer jumps to an arbitrary page.
threshold : float, optional
Threshold for convergence, measured in the L1 norm
(the sum of absolute value) of the delta of each vertex's
pagerank value.
max_iterations : int, optional
The maximum number of iterations to run.
_single_precision : bool, optional
If true, running pagerank in single precision. The resulting
pagerank values may not be accurate for large graph, but
should run faster and use less memory.
_distributed : distributed environment, internal
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : PagerankModel
References
----------
- `Wikipedia - PageRank <http://en.wikipedia.org/wiki/PageRank>`_
- Page, L., et al. (1998) `The PageRank Citation Ranking: Bringing Order to
the Web <http://ilpubs.stanford.edu:8090/422/1/1999-66.pdf>`_.
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.pagerank.PageRankModel` as follows:
>>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz', format='snap')
>>> pr = turicreate.pagerank.create(g)
We can obtain the page rank corresponding to each vertex in the graph ``g``
using:
>>> pr_out = pr['pagerank'] # SFrame
We can add the new pagerank field to the original graph g using:
>>> g.vertices['pagerank'] = pr['graph'].vertices['pagerank']
Note that the task above does not require a join because the vertex
ordering is preserved through ``create()``.
See Also
--------
PagerankModel | [
"Compute",
"the",
"PageRank",
"for",
"each",
"vertex",
"in",
"the",
"graph",
".",
"Return",
"a",
"model",
"object",
"with",
"total",
"PageRank",
"as",
"well",
"as",
"the",
"PageRank",
"value",
"for",
"each",
"vertex",
"in",
"the",
"graph",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/graph_analytics/pagerank.py#L105-L191 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/gcc.py | init | def init(version = None, command = None, options = None):
"""
Initializes the gcc toolset for the given version. If necessary, command may
be used to specify where the compiler is located. The parameter 'options' is a
space-delimited list of options, each one specified as
<option-name>option-value. Valid option names are: cxxflags, linkflags and
linker-type. Accepted linker-type values are gnu, darwin, osf, hpux or sun
and the default value will be selected based on the current OS.
Example:
using gcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ;
"""
options = to_seq(options)
command = to_seq(command)
# Information about the gcc command...
# The command.
command = to_seq(common.get_invocation_command('gcc', 'g++', command))
# The root directory of the tool install.
root = feature.get_values('<root>', options)
root = root[0] if root else ''
# The bin directory where to find the command to execute.
bin = None
# The flavor of compiler.
flavor = feature.get_values('<flavor>', options)
flavor = flavor[0] if flavor else ''
# Autodetect the root and bin dir if not given.
if command:
if not bin:
bin = common.get_absolute_tool_path(command[-1])
if not root:
root = os.path.dirname(bin)
# Autodetect the version and flavor if not given.
if command:
machine_info = subprocess.Popen(command + ['-dumpmachine'], stdout=subprocess.PIPE).communicate()[0]
machine = __machine_match.search(machine_info).group(1)
version_info = subprocess.Popen(command + ['-dumpversion'], stdout=subprocess.PIPE).communicate()[0]
version = __version_match.search(version_info).group(1)
if not flavor and machine.find('mingw') != -1:
flavor = 'mingw'
condition = None
if flavor:
condition = common.check_init_parameters('gcc', None,
('version', version),
('flavor', flavor))
else:
condition = common.check_init_parameters('gcc', None,
('version', version))
if command:
command = command[0]
common.handle_options('gcc', condition, command, options)
linker = feature.get_values('<linker-type>', options)
if not linker:
if os_name() == 'OSF':
linker = 'osf'
elif os_name() == 'HPUX':
linker = 'hpux' ;
else:
linker = 'gnu'
init_link_flags('gcc', linker, condition)
# If gcc is installed in non-standard location, we'd need to add
# LD_LIBRARY_PATH when running programs created with it (for unit-test/run
# rules).
if command:
# On multilib 64-bit boxes, there are both 32-bit and 64-bit libraries
# and all must be added to LD_LIBRARY_PATH. The linker will pick the
# right onces. Note that we don't provide a clean way to build 32-bit
# binary with 64-bit compiler, but user can always pass -m32 manually.
lib_path = [os.path.join(root, 'bin'),
os.path.join(root, 'lib'),
os.path.join(root, 'lib32'),
os.path.join(root, 'lib64')]
if debug():
print 'notice: using gcc libraries ::', condition, '::', lib_path
toolset.flags('gcc.link', 'RUN_PATH', condition, lib_path)
# If it's not a system gcc install we should adjust the various programs as
# needed to prefer using the install specific versions. This is essential
# for correct use of MinGW and for cross-compiling.
# - The archive builder.
archiver = common.get_invocation_command('gcc',
'ar', feature.get_values('<archiver>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.AR', condition, [archiver])
if debug():
print 'notice: using gcc archiver ::', condition, '::', archiver
# - Ranlib
ranlib = common.get_invocation_command('gcc',
'ranlib', feature.get_values('<ranlib>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.RANLIB', condition, [ranlib])
if debug():
print 'notice: using gcc archiver ::', condition, '::', ranlib
# - The resource compiler.
rc_command = common.get_invocation_command_nodefault('gcc',
'windres', feature.get_values('<rc>', options), [bin], path_last=True)
rc_type = feature.get_values('<rc-type>', options)
if not rc_type:
rc_type = 'windres'
if not rc_command:
# If we can't find an RC compiler we fallback to a null RC compiler that
# creates empty object files. This allows the same Jamfiles to work
# across the board. The null RC uses the assembler to create the empty
# objects, so configure that.
rc_command = common.get_invocation_command('gcc', 'as', [], [bin], path_last=True)
rc_type = 'null'
rc.configure([rc_command], condition, ['<rc-type>' + rc_type]) | python | def init(version = None, command = None, options = None):
"""
Initializes the gcc toolset for the given version. If necessary, command may
be used to specify where the compiler is located. The parameter 'options' is a
space-delimited list of options, each one specified as
<option-name>option-value. Valid option names are: cxxflags, linkflags and
linker-type. Accepted linker-type values are gnu, darwin, osf, hpux or sun
and the default value will be selected based on the current OS.
Example:
using gcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ;
"""
options = to_seq(options)
command = to_seq(command)
# Information about the gcc command...
# The command.
command = to_seq(common.get_invocation_command('gcc', 'g++', command))
# The root directory of the tool install.
root = feature.get_values('<root>', options)
root = root[0] if root else ''
# The bin directory where to find the command to execute.
bin = None
# The flavor of compiler.
flavor = feature.get_values('<flavor>', options)
flavor = flavor[0] if flavor else ''
# Autodetect the root and bin dir if not given.
if command:
if not bin:
bin = common.get_absolute_tool_path(command[-1])
if not root:
root = os.path.dirname(bin)
# Autodetect the version and flavor if not given.
if command:
machine_info = subprocess.Popen(command + ['-dumpmachine'], stdout=subprocess.PIPE).communicate()[0]
machine = __machine_match.search(machine_info).group(1)
version_info = subprocess.Popen(command + ['-dumpversion'], stdout=subprocess.PIPE).communicate()[0]
version = __version_match.search(version_info).group(1)
if not flavor and machine.find('mingw') != -1:
flavor = 'mingw'
condition = None
if flavor:
condition = common.check_init_parameters('gcc', None,
('version', version),
('flavor', flavor))
else:
condition = common.check_init_parameters('gcc', None,
('version', version))
if command:
command = command[0]
common.handle_options('gcc', condition, command, options)
linker = feature.get_values('<linker-type>', options)
if not linker:
if os_name() == 'OSF':
linker = 'osf'
elif os_name() == 'HPUX':
linker = 'hpux' ;
else:
linker = 'gnu'
init_link_flags('gcc', linker, condition)
# If gcc is installed in non-standard location, we'd need to add
# LD_LIBRARY_PATH when running programs created with it (for unit-test/run
# rules).
if command:
# On multilib 64-bit boxes, there are both 32-bit and 64-bit libraries
# and all must be added to LD_LIBRARY_PATH. The linker will pick the
# right onces. Note that we don't provide a clean way to build 32-bit
# binary with 64-bit compiler, but user can always pass -m32 manually.
lib_path = [os.path.join(root, 'bin'),
os.path.join(root, 'lib'),
os.path.join(root, 'lib32'),
os.path.join(root, 'lib64')]
if debug():
print 'notice: using gcc libraries ::', condition, '::', lib_path
toolset.flags('gcc.link', 'RUN_PATH', condition, lib_path)
# If it's not a system gcc install we should adjust the various programs as
# needed to prefer using the install specific versions. This is essential
# for correct use of MinGW and for cross-compiling.
# - The archive builder.
archiver = common.get_invocation_command('gcc',
'ar', feature.get_values('<archiver>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.AR', condition, [archiver])
if debug():
print 'notice: using gcc archiver ::', condition, '::', archiver
# - Ranlib
ranlib = common.get_invocation_command('gcc',
'ranlib', feature.get_values('<ranlib>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.RANLIB', condition, [ranlib])
if debug():
print 'notice: using gcc archiver ::', condition, '::', ranlib
# - The resource compiler.
rc_command = common.get_invocation_command_nodefault('gcc',
'windres', feature.get_values('<rc>', options), [bin], path_last=True)
rc_type = feature.get_values('<rc-type>', options)
if not rc_type:
rc_type = 'windres'
if not rc_command:
# If we can't find an RC compiler we fallback to a null RC compiler that
# creates empty object files. This allows the same Jamfiles to work
# across the board. The null RC uses the assembler to create the empty
# objects, so configure that.
rc_command = common.get_invocation_command('gcc', 'as', [], [bin], path_last=True)
rc_type = 'null'
rc.configure([rc_command], condition, ['<rc-type>' + rc_type]) | [
"def",
"init",
"(",
"version",
"=",
"None",
",",
"command",
"=",
"None",
",",
"options",
"=",
"None",
")",
":",
"options",
"=",
"to_seq",
"(",
"options",
")",
"command",
"=",
"to_seq",
"(",
"command",
")",
"# Information about the gcc command...",
"# The command.",
"command",
"=",
"to_seq",
"(",
"common",
".",
"get_invocation_command",
"(",
"'gcc'",
",",
"'g++'",
",",
"command",
")",
")",
"# The root directory of the tool install.",
"root",
"=",
"feature",
".",
"get_values",
"(",
"'<root>'",
",",
"options",
")",
"root",
"=",
"root",
"[",
"0",
"]",
"if",
"root",
"else",
"''",
"# The bin directory where to find the command to execute.",
"bin",
"=",
"None",
"# The flavor of compiler.",
"flavor",
"=",
"feature",
".",
"get_values",
"(",
"'<flavor>'",
",",
"options",
")",
"flavor",
"=",
"flavor",
"[",
"0",
"]",
"if",
"flavor",
"else",
"''",
"# Autodetect the root and bin dir if not given.",
"if",
"command",
":",
"if",
"not",
"bin",
":",
"bin",
"=",
"common",
".",
"get_absolute_tool_path",
"(",
"command",
"[",
"-",
"1",
"]",
")",
"if",
"not",
"root",
":",
"root",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"bin",
")",
"# Autodetect the version and flavor if not given.",
"if",
"command",
":",
"machine_info",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
"+",
"[",
"'-dumpmachine'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"machine",
"=",
"__machine_match",
".",
"search",
"(",
"machine_info",
")",
".",
"group",
"(",
"1",
")",
"version_info",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
"+",
"[",
"'-dumpversion'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"version",
"=",
"__version_match",
".",
"search",
"(",
"version_info",
")",
".",
"group",
"(",
"1",
")",
"if",
"not",
"flavor",
"and",
"machine",
".",
"find",
"(",
"'mingw'",
")",
"!=",
"-",
"1",
":",
"flavor",
"=",
"'mingw'",
"condition",
"=",
"None",
"if",
"flavor",
":",
"condition",
"=",
"common",
".",
"check_init_parameters",
"(",
"'gcc'",
",",
"None",
",",
"(",
"'version'",
",",
"version",
")",
",",
"(",
"'flavor'",
",",
"flavor",
")",
")",
"else",
":",
"condition",
"=",
"common",
".",
"check_init_parameters",
"(",
"'gcc'",
",",
"None",
",",
"(",
"'version'",
",",
"version",
")",
")",
"if",
"command",
":",
"command",
"=",
"command",
"[",
"0",
"]",
"common",
".",
"handle_options",
"(",
"'gcc'",
",",
"condition",
",",
"command",
",",
"options",
")",
"linker",
"=",
"feature",
".",
"get_values",
"(",
"'<linker-type>'",
",",
"options",
")",
"if",
"not",
"linker",
":",
"if",
"os_name",
"(",
")",
"==",
"'OSF'",
":",
"linker",
"=",
"'osf'",
"elif",
"os_name",
"(",
")",
"==",
"'HPUX'",
":",
"linker",
"=",
"'hpux'",
"else",
":",
"linker",
"=",
"'gnu'",
"init_link_flags",
"(",
"'gcc'",
",",
"linker",
",",
"condition",
")",
"# If gcc is installed in non-standard location, we'd need to add",
"# LD_LIBRARY_PATH when running programs created with it (for unit-test/run",
"# rules).",
"if",
"command",
":",
"# On multilib 64-bit boxes, there are both 32-bit and 64-bit libraries",
"# and all must be added to LD_LIBRARY_PATH. The linker will pick the",
"# right onces. Note that we don't provide a clean way to build 32-bit",
"# binary with 64-bit compiler, but user can always pass -m32 manually.",
"lib_path",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"'bin'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"'lib'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"'lib32'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"'lib64'",
")",
"]",
"if",
"debug",
"(",
")",
":",
"print",
"'notice: using gcc libraries ::'",
",",
"condition",
",",
"'::'",
",",
"lib_path",
"toolset",
".",
"flags",
"(",
"'gcc.link'",
",",
"'RUN_PATH'",
",",
"condition",
",",
"lib_path",
")",
"# If it's not a system gcc install we should adjust the various programs as",
"# needed to prefer using the install specific versions. This is essential",
"# for correct use of MinGW and for cross-compiling.",
"# - The archive builder.",
"archiver",
"=",
"common",
".",
"get_invocation_command",
"(",
"'gcc'",
",",
"'ar'",
",",
"feature",
".",
"get_values",
"(",
"'<archiver>'",
",",
"options",
")",
",",
"[",
"bin",
"]",
",",
"path_last",
"=",
"True",
")",
"toolset",
".",
"flags",
"(",
"'gcc.archive'",
",",
"'.AR'",
",",
"condition",
",",
"[",
"archiver",
"]",
")",
"if",
"debug",
"(",
")",
":",
"print",
"'notice: using gcc archiver ::'",
",",
"condition",
",",
"'::'",
",",
"archiver",
"# - Ranlib",
"ranlib",
"=",
"common",
".",
"get_invocation_command",
"(",
"'gcc'",
",",
"'ranlib'",
",",
"feature",
".",
"get_values",
"(",
"'<ranlib>'",
",",
"options",
")",
",",
"[",
"bin",
"]",
",",
"path_last",
"=",
"True",
")",
"toolset",
".",
"flags",
"(",
"'gcc.archive'",
",",
"'.RANLIB'",
",",
"condition",
",",
"[",
"ranlib",
"]",
")",
"if",
"debug",
"(",
")",
":",
"print",
"'notice: using gcc archiver ::'",
",",
"condition",
",",
"'::'",
",",
"ranlib",
"# - The resource compiler.",
"rc_command",
"=",
"common",
".",
"get_invocation_command_nodefault",
"(",
"'gcc'",
",",
"'windres'",
",",
"feature",
".",
"get_values",
"(",
"'<rc>'",
",",
"options",
")",
",",
"[",
"bin",
"]",
",",
"path_last",
"=",
"True",
")",
"rc_type",
"=",
"feature",
".",
"get_values",
"(",
"'<rc-type>'",
",",
"options",
")",
"if",
"not",
"rc_type",
":",
"rc_type",
"=",
"'windres'",
"if",
"not",
"rc_command",
":",
"# If we can't find an RC compiler we fallback to a null RC compiler that",
"# creates empty object files. This allows the same Jamfiles to work",
"# across the board. The null RC uses the assembler to create the empty",
"# objects, so configure that.",
"rc_command",
"=",
"common",
".",
"get_invocation_command",
"(",
"'gcc'",
",",
"'as'",
",",
"[",
"]",
",",
"[",
"bin",
"]",
",",
"path_last",
"=",
"True",
")",
"rc_type",
"=",
"'null'",
"rc",
".",
"configure",
"(",
"[",
"rc_command",
"]",
",",
"condition",
",",
"[",
"'<rc-type>'",
"+",
"rc_type",
"]",
")"
] | Initializes the gcc toolset for the given version. If necessary, command may
be used to specify where the compiler is located. The parameter 'options' is a
space-delimited list of options, each one specified as
<option-name>option-value. Valid option names are: cxxflags, linkflags and
linker-type. Accepted linker-type values are gnu, darwin, osf, hpux or sun
and the default value will be selected based on the current OS.
Example:
using gcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ; | [
"Initializes",
"the",
"gcc",
"toolset",
"for",
"the",
"given",
"version",
".",
"If",
"necessary",
"command",
"may",
"be",
"used",
"to",
"specify",
"where",
"the",
"compiler",
"is",
"located",
".",
"The",
"parameter",
"options",
"is",
"a",
"space",
"-",
"delimited",
"list",
"of",
"options",
"each",
"one",
"specified",
"as",
"<option",
"-",
"name",
">",
"option",
"-",
"value",
".",
"Valid",
"option",
"names",
"are",
":",
"cxxflags",
"linkflags",
"and",
"linker",
"-",
"type",
".",
"Accepted",
"linker",
"-",
"type",
"values",
"are",
"gnu",
"darwin",
"osf",
"hpux",
"or",
"sun",
"and",
"the",
"default",
"value",
"will",
"be",
"selected",
"based",
"on",
"the",
"current",
"OS",
".",
"Example",
":",
"using",
"gcc",
":",
"3",
".",
"4",
":",
":",
"<cxxflags",
">",
"foo",
"<linkflags",
">",
"bar",
"<linker",
"-",
"type",
">",
"sun",
";"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/gcc.py#L87-L203 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/gcc.py | init_link_flags | def init_link_flags(toolset, linker, condition):
"""
Now, the vendor specific flags.
The parameter linker can be either gnu, darwin, osf, hpux or sun.
"""
toolset_link = toolset + '.link'
if linker == 'gnu':
# Strip the binary when no debugging is needed. We use --strip-all flag
# as opposed to -s since icc (intel's compiler) is generally
# option-compatible with and inherits from the gcc toolset, but does not
# support -s.
# FIXME: what does unchecked translate to?
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,--strip-all']) # : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
flags(toolset_link, 'START-GROUP', condition, ['-Wl,--start-group'])# : unchecked ;
flags(toolset_link, 'END-GROUP', condition, ['-Wl,--end-group']) # : unchecked ;
# gnu ld has the ability to change the search behaviour for libraries
# referenced by -l switch. These modifiers are -Bstatic and -Bdynamic
# and change search for -l switches that follow them. The following list
# shows the tried variants.
# The search stops at the first variant that has a match.
# *nix: -Bstatic -lxxx
# libxxx.a
#
# *nix: -Bdynamic -lxxx
# libxxx.so
# libxxx.a
#
# windows (mingw,cygwin) -Bstatic -lxxx
# libxxx.a
# xxx.lib
#
# windows (mingw,cygwin) -Bdynamic -lxxx
# libxxx.dll.a
# xxx.dll.a
# libxxx.a
# xxx.lib
# cygxxx.dll (*)
# libxxx.dll
# xxx.dll
# libxxx.a
#
# (*) This is for cygwin
# Please note that -Bstatic and -Bdynamic are not a guarantee that a
# static or dynamic lib indeed gets linked in. The switches only change
# search patterns!
# On *nix mixing shared libs with static runtime is not a good idea.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bdynamic']) # : unchecked ;
# On windows allow mixing of static and dynamic libs with static
# runtime.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bdynamic']) # : unchecked ;
flags(toolset_link, 'OPTIONS',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
elif linker == 'darwin':
# On Darwin, the -s option to ld does not work unless we pass -static,
# and passing -static unconditionally is a bad idea. So, don't pass -s.
# at all, darwin.jam will use separate 'strip' invocation.
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
elif linker == 'osf':
# No --strip-all, just -s.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# This does not supports -R.
flags(toolset_link, 'RPATH_OPTION', condition, ['-rpath']) # : unchecked ;
# -rpath-link is not supported at all.
elif linker == 'sun':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# Solaris linker does not have a separate -rpath-link, but allows to use
# -L for the same purpose.
flags(toolset_link, 'LINKPATH', condition, ['<xdll-path>']) # : unchecked ;
# This permits shared libraries with non-PIC code on Solaris.
# VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the
# following is not needed. Whether -fPIC should be hardcoded, is a
# separate question.
# AH, 2004/10/16: it is still necessary because some tests link against
# static libraries that were compiled without PIC.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-mimpure-text'])
# : unchecked ;
elif linker == 'hpux':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition),
['-Wl,-s']) # : unchecked ;
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition),
['-fPIC']) # : unchecked ;
else:
# FIXME:
errors.user_error(
"$(toolset) initialization: invalid linker '$(linker)' " +
"The value '$(linker)' specified for <linker> is not recognized. " +
"Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'") | python | def init_link_flags(toolset, linker, condition):
"""
Now, the vendor specific flags.
The parameter linker can be either gnu, darwin, osf, hpux or sun.
"""
toolset_link = toolset + '.link'
if linker == 'gnu':
# Strip the binary when no debugging is needed. We use --strip-all flag
# as opposed to -s since icc (intel's compiler) is generally
# option-compatible with and inherits from the gcc toolset, but does not
# support -s.
# FIXME: what does unchecked translate to?
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,--strip-all']) # : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
flags(toolset_link, 'START-GROUP', condition, ['-Wl,--start-group'])# : unchecked ;
flags(toolset_link, 'END-GROUP', condition, ['-Wl,--end-group']) # : unchecked ;
# gnu ld has the ability to change the search behaviour for libraries
# referenced by -l switch. These modifiers are -Bstatic and -Bdynamic
# and change search for -l switches that follow them. The following list
# shows the tried variants.
# The search stops at the first variant that has a match.
# *nix: -Bstatic -lxxx
# libxxx.a
#
# *nix: -Bdynamic -lxxx
# libxxx.so
# libxxx.a
#
# windows (mingw,cygwin) -Bstatic -lxxx
# libxxx.a
# xxx.lib
#
# windows (mingw,cygwin) -Bdynamic -lxxx
# libxxx.dll.a
# xxx.dll.a
# libxxx.a
# xxx.lib
# cygxxx.dll (*)
# libxxx.dll
# xxx.dll
# libxxx.a
#
# (*) This is for cygwin
# Please note that -Bstatic and -Bdynamic are not a guarantee that a
# static or dynamic lib indeed gets linked in. The switches only change
# search patterns!
# On *nix mixing shared libs with static runtime is not a good idea.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bdynamic']) # : unchecked ;
# On windows allow mixing of static and dynamic libs with static
# runtime.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bdynamic']) # : unchecked ;
flags(toolset_link, 'OPTIONS',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
elif linker == 'darwin':
# On Darwin, the -s option to ld does not work unless we pass -static,
# and passing -static unconditionally is a bad idea. So, don't pass -s.
# at all, darwin.jam will use separate 'strip' invocation.
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
elif linker == 'osf':
# No --strip-all, just -s.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# This does not supports -R.
flags(toolset_link, 'RPATH_OPTION', condition, ['-rpath']) # : unchecked ;
# -rpath-link is not supported at all.
elif linker == 'sun':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# Solaris linker does not have a separate -rpath-link, but allows to use
# -L for the same purpose.
flags(toolset_link, 'LINKPATH', condition, ['<xdll-path>']) # : unchecked ;
# This permits shared libraries with non-PIC code on Solaris.
# VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the
# following is not needed. Whether -fPIC should be hardcoded, is a
# separate question.
# AH, 2004/10/16: it is still necessary because some tests link against
# static libraries that were compiled without PIC.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-mimpure-text'])
# : unchecked ;
elif linker == 'hpux':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition),
['-Wl,-s']) # : unchecked ;
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition),
['-fPIC']) # : unchecked ;
else:
# FIXME:
errors.user_error(
"$(toolset) initialization: invalid linker '$(linker)' " +
"The value '$(linker)' specified for <linker> is not recognized. " +
"Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'") | [
"def",
"init_link_flags",
"(",
"toolset",
",",
"linker",
",",
"condition",
")",
":",
"toolset_link",
"=",
"toolset",
"+",
"'.link'",
"if",
"linker",
"==",
"'gnu'",
":",
"# Strip the binary when no debugging is needed. We use --strip-all flag",
"# as opposed to -s since icc (intel's compiler) is generally",
"# option-compatible with and inherits from the gcc toolset, but does not",
"# support -s.",
"# FIXME: what does unchecked translate to?",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<debug-symbols>off'",
",",
"condition",
")",
",",
"[",
"'-Wl,--strip-all'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'RPATH'",
",",
"condition",
",",
"[",
"'<dll-path>'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'RPATH_LINK'",
",",
"condition",
",",
"[",
"'<xdll-path>'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'START-GROUP'",
",",
"condition",
",",
"[",
"'-Wl,--start-group'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'END-GROUP'",
",",
"condition",
",",
"[",
"'-Wl,--end-group'",
"]",
")",
"# : unchecked ;",
"# gnu ld has the ability to change the search behaviour for libraries",
"# referenced by -l switch. These modifiers are -Bstatic and -Bdynamic",
"# and change search for -l switches that follow them. The following list",
"# shows the tried variants.",
"# The search stops at the first variant that has a match.",
"# *nix: -Bstatic -lxxx",
"# libxxx.a",
"#",
"# *nix: -Bdynamic -lxxx",
"# libxxx.so",
"# libxxx.a",
"#",
"# windows (mingw,cygwin) -Bstatic -lxxx",
"# libxxx.a",
"# xxx.lib",
"#",
"# windows (mingw,cygwin) -Bdynamic -lxxx",
"# libxxx.dll.a",
"# xxx.dll.a",
"# libxxx.a",
"# xxx.lib",
"# cygxxx.dll (*)",
"# libxxx.dll",
"# xxx.dll",
"# libxxx.a",
"#",
"# (*) This is for cygwin",
"# Please note that -Bstatic and -Bdynamic are not a guarantee that a",
"# static or dynamic lib indeed gets linked in. The switches only change",
"# search patterns!",
"# On *nix mixing shared libs with static runtime is not a good idea.",
"flags",
"(",
"toolset_link",
",",
"'FINDLIBS-ST-PFX'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<runtime-link>shared'",
",",
"condition",
")",
",",
"[",
"'-Wl,-Bstatic'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'FINDLIBS-SA-PFX'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<runtime-link>shared'",
",",
"condition",
")",
",",
"[",
"'-Wl,-Bdynamic'",
"]",
")",
"# : unchecked ;",
"# On windows allow mixing of static and dynamic libs with static",
"# runtime.",
"flags",
"(",
"toolset_link",
",",
"'FINDLIBS-ST-PFX'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<runtime-link>static/<target-os>windows'",
",",
"condition",
")",
",",
"[",
"'-Wl,-Bstatic'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'FINDLIBS-SA-PFX'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<runtime-link>static/<target-os>windows'",
",",
"condition",
")",
",",
"[",
"'-Wl,-Bdynamic'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<runtime-link>static/<target-os>windows'",
",",
"condition",
")",
",",
"[",
"'-Wl,-Bstatic'",
"]",
")",
"# : unchecked ;",
"elif",
"linker",
"==",
"'darwin'",
":",
"# On Darwin, the -s option to ld does not work unless we pass -static,",
"# and passing -static unconditionally is a bad idea. So, don't pass -s.",
"# at all, darwin.jam will use separate 'strip' invocation.",
"flags",
"(",
"toolset_link",
",",
"'RPATH'",
",",
"condition",
",",
"[",
"'<dll-path>'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'RPATH_LINK'",
",",
"condition",
",",
"[",
"'<xdll-path>'",
"]",
")",
"# : unchecked ;",
"elif",
"linker",
"==",
"'osf'",
":",
"# No --strip-all, just -s.",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<debug-symbols>off'",
",",
"condition",
")",
",",
"[",
"'-Wl,-s'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'RPATH'",
",",
"condition",
",",
"[",
"'<dll-path>'",
"]",
")",
"# : unchecked ;",
"# This does not supports -R.",
"flags",
"(",
"toolset_link",
",",
"'RPATH_OPTION'",
",",
"condition",
",",
"[",
"'-rpath'",
"]",
")",
"# : unchecked ;",
"# -rpath-link is not supported at all.",
"elif",
"linker",
"==",
"'sun'",
":",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<debug-symbols>off'",
",",
"condition",
")",
",",
"[",
"'-Wl,-s'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'RPATH'",
",",
"condition",
",",
"[",
"'<dll-path>'",
"]",
")",
"# : unchecked ;",
"# Solaris linker does not have a separate -rpath-link, but allows to use",
"# -L for the same purpose.",
"flags",
"(",
"toolset_link",
",",
"'LINKPATH'",
",",
"condition",
",",
"[",
"'<xdll-path>'",
"]",
")",
"# : unchecked ;",
"# This permits shared libraries with non-PIC code on Solaris.",
"# VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the",
"# following is not needed. Whether -fPIC should be hardcoded, is a",
"# separate question.",
"# AH, 2004/10/16: it is still necessary because some tests link against",
"# static libraries that were compiled without PIC.",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<link>shared'",
",",
"condition",
")",
",",
"[",
"'-mimpure-text'",
"]",
")",
"# : unchecked ;",
"elif",
"linker",
"==",
"'hpux'",
":",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<debug-symbols>off'",
",",
"condition",
")",
",",
"[",
"'-Wl,-s'",
"]",
")",
"# : unchecked ;",
"flags",
"(",
"toolset_link",
",",
"'OPTIONS'",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
"+",
"'/<link>shared'",
",",
"condition",
")",
",",
"[",
"'-fPIC'",
"]",
")",
"# : unchecked ;",
"else",
":",
"# FIXME:",
"errors",
".",
"user_error",
"(",
"\"$(toolset) initialization: invalid linker '$(linker)' \"",
"+",
"\"The value '$(linker)' specified for <linker> is not recognized. \"",
"+",
"\"Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'\"",
")"
] | Now, the vendor specific flags.
The parameter linker can be either gnu, darwin, osf, hpux or sun. | [
"Now",
"the",
"vendor",
"specific",
"flags",
".",
"The",
"parameter",
"linker",
"can",
"be",
"either",
"gnu",
"darwin",
"osf",
"hpux",
"or",
"sun",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/gcc.py#L494-L608 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/engine.py | Engine.add_dependency | def add_dependency (self, targets, sources):
"""Adds a dependency from 'targets' to 'sources'
Both 'targets' and 'sources' can be either list
of target names, or a single target name.
"""
if isinstance (targets, str):
targets = [targets]
if isinstance (sources, str):
sources = [sources]
assert is_iterable(targets)
assert is_iterable(sources)
for target in targets:
for source in sources:
self.do_add_dependency (target, source) | python | def add_dependency (self, targets, sources):
"""Adds a dependency from 'targets' to 'sources'
Both 'targets' and 'sources' can be either list
of target names, or a single target name.
"""
if isinstance (targets, str):
targets = [targets]
if isinstance (sources, str):
sources = [sources]
assert is_iterable(targets)
assert is_iterable(sources)
for target in targets:
for source in sources:
self.do_add_dependency (target, source) | [
"def",
"add_dependency",
"(",
"self",
",",
"targets",
",",
"sources",
")",
":",
"if",
"isinstance",
"(",
"targets",
",",
"str",
")",
":",
"targets",
"=",
"[",
"targets",
"]",
"if",
"isinstance",
"(",
"sources",
",",
"str",
")",
":",
"sources",
"=",
"[",
"sources",
"]",
"assert",
"is_iterable",
"(",
"targets",
")",
"assert",
"is_iterable",
"(",
"sources",
")",
"for",
"target",
"in",
"targets",
":",
"for",
"source",
"in",
"sources",
":",
"self",
".",
"do_add_dependency",
"(",
"target",
",",
"source",
")"
] | Adds a dependency from 'targets' to 'sources'
Both 'targets' and 'sources' can be either list
of target names, or a single target name. | [
"Adds",
"a",
"dependency",
"from",
"targets",
"to",
"sources"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L76-L91 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/engine.py | Engine.get_target_variable | def get_target_variable(self, targets, variable):
"""Gets the value of `variable` on set on the first target in `targets`.
Args:
targets (str or list): one or more targets to get the variable from.
variable (str): the name of the variable
Returns:
the value of `variable` set on `targets` (list)
Example:
>>> ENGINE = get_manager().engine()
>>> ENGINE.set_target_variable(targets, 'MY-VAR', 'Hello World')
>>> ENGINE.get_target_variable(targets, 'MY-VAR')
['Hello World']
Equivalent Jam code:
MY-VAR on $(targets) = "Hello World" ;
echo [ on $(targets) return $(MY-VAR) ] ;
"Hello World"
"""
if isinstance(targets, str):
targets = [targets]
assert is_iterable(targets)
assert isinstance(variable, basestring)
return bjam_interface.call('get-target-variable', targets, variable) | python | def get_target_variable(self, targets, variable):
"""Gets the value of `variable` on set on the first target in `targets`.
Args:
targets (str or list): one or more targets to get the variable from.
variable (str): the name of the variable
Returns:
the value of `variable` set on `targets` (list)
Example:
>>> ENGINE = get_manager().engine()
>>> ENGINE.set_target_variable(targets, 'MY-VAR', 'Hello World')
>>> ENGINE.get_target_variable(targets, 'MY-VAR')
['Hello World']
Equivalent Jam code:
MY-VAR on $(targets) = "Hello World" ;
echo [ on $(targets) return $(MY-VAR) ] ;
"Hello World"
"""
if isinstance(targets, str):
targets = [targets]
assert is_iterable(targets)
assert isinstance(variable, basestring)
return bjam_interface.call('get-target-variable', targets, variable) | [
"def",
"get_target_variable",
"(",
"self",
",",
"targets",
",",
"variable",
")",
":",
"if",
"isinstance",
"(",
"targets",
",",
"str",
")",
":",
"targets",
"=",
"[",
"targets",
"]",
"assert",
"is_iterable",
"(",
"targets",
")",
"assert",
"isinstance",
"(",
"variable",
",",
"basestring",
")",
"return",
"bjam_interface",
".",
"call",
"(",
"'get-target-variable'",
",",
"targets",
",",
"variable",
")"
] | Gets the value of `variable` on set on the first target in `targets`.
Args:
targets (str or list): one or more targets to get the variable from.
variable (str): the name of the variable
Returns:
the value of `variable` set on `targets` (list)
Example:
>>> ENGINE = get_manager().engine()
>>> ENGINE.set_target_variable(targets, 'MY-VAR', 'Hello World')
>>> ENGINE.get_target_variable(targets, 'MY-VAR')
['Hello World']
Equivalent Jam code:
MY-VAR on $(targets) = "Hello World" ;
echo [ on $(targets) return $(MY-VAR) ] ;
"Hello World" | [
"Gets",
"the",
"value",
"of",
"variable",
"on",
"set",
"on",
"the",
"first",
"target",
"in",
"targets",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L93-L121 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/engine.py | Engine.set_target_variable | def set_target_variable (self, targets, variable, value, append=0):
""" Sets a target variable.
The 'variable' will be available to bjam when it decides
where to generate targets, and will also be available to
updating rule for that 'taret'.
"""
if isinstance (targets, str):
targets = [targets]
if isinstance(value, str):
value = [value]
assert is_iterable(targets)
assert isinstance(variable, basestring)
assert is_iterable(value)
if targets:
if append:
bjam_interface.call("set-target-variable", targets, variable, value, "true")
else:
bjam_interface.call("set-target-variable", targets, variable, value) | python | def set_target_variable (self, targets, variable, value, append=0):
""" Sets a target variable.
The 'variable' will be available to bjam when it decides
where to generate targets, and will also be available to
updating rule for that 'taret'.
"""
if isinstance (targets, str):
targets = [targets]
if isinstance(value, str):
value = [value]
assert is_iterable(targets)
assert isinstance(variable, basestring)
assert is_iterable(value)
if targets:
if append:
bjam_interface.call("set-target-variable", targets, variable, value, "true")
else:
bjam_interface.call("set-target-variable", targets, variable, value) | [
"def",
"set_target_variable",
"(",
"self",
",",
"targets",
",",
"variable",
",",
"value",
",",
"append",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"targets",
",",
"str",
")",
":",
"targets",
"=",
"[",
"targets",
"]",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"[",
"value",
"]",
"assert",
"is_iterable",
"(",
"targets",
")",
"assert",
"isinstance",
"(",
"variable",
",",
"basestring",
")",
"assert",
"is_iterable",
"(",
"value",
")",
"if",
"targets",
":",
"if",
"append",
":",
"bjam_interface",
".",
"call",
"(",
"\"set-target-variable\"",
",",
"targets",
",",
"variable",
",",
"value",
",",
"\"true\"",
")",
"else",
":",
"bjam_interface",
".",
"call",
"(",
"\"set-target-variable\"",
",",
"targets",
",",
"variable",
",",
"value",
")"
] | Sets a target variable.
The 'variable' will be available to bjam when it decides
where to generate targets, and will also be available to
updating rule for that 'taret'. | [
"Sets",
"a",
"target",
"variable",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L123-L143 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/engine.py | Engine.set_update_action | def set_update_action (self, action_name, targets, sources, properties=None):
""" Binds a target to the corresponding update action.
If target needs to be updated, the action registered
with action_name will be used.
The 'action_name' must be previously registered by
either 'register_action' or 'register_bjam_action'
method.
"""
if isinstance(targets, str):
targets = [targets]
if isinstance(sources, str):
sources = [sources]
if properties is None:
properties = property_set.empty()
assert isinstance(action_name, basestring)
assert is_iterable(targets)
assert is_iterable(sources)
assert(isinstance(properties, property_set.PropertySet))
self.do_set_update_action (action_name, targets, sources, properties) | python | def set_update_action (self, action_name, targets, sources, properties=None):
""" Binds a target to the corresponding update action.
If target needs to be updated, the action registered
with action_name will be used.
The 'action_name' must be previously registered by
either 'register_action' or 'register_bjam_action'
method.
"""
if isinstance(targets, str):
targets = [targets]
if isinstance(sources, str):
sources = [sources]
if properties is None:
properties = property_set.empty()
assert isinstance(action_name, basestring)
assert is_iterable(targets)
assert is_iterable(sources)
assert(isinstance(properties, property_set.PropertySet))
self.do_set_update_action (action_name, targets, sources, properties) | [
"def",
"set_update_action",
"(",
"self",
",",
"action_name",
",",
"targets",
",",
"sources",
",",
"properties",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"targets",
",",
"str",
")",
":",
"targets",
"=",
"[",
"targets",
"]",
"if",
"isinstance",
"(",
"sources",
",",
"str",
")",
":",
"sources",
"=",
"[",
"sources",
"]",
"if",
"properties",
"is",
"None",
":",
"properties",
"=",
"property_set",
".",
"empty",
"(",
")",
"assert",
"isinstance",
"(",
"action_name",
",",
"basestring",
")",
"assert",
"is_iterable",
"(",
"targets",
")",
"assert",
"is_iterable",
"(",
"sources",
")",
"assert",
"(",
"isinstance",
"(",
"properties",
",",
"property_set",
".",
"PropertySet",
")",
")",
"self",
".",
"do_set_update_action",
"(",
"action_name",
",",
"targets",
",",
"sources",
",",
"properties",
")"
] | Binds a target to the corresponding update action.
If target needs to be updated, the action registered
with action_name will be used.
The 'action_name' must be previously registered by
either 'register_action' or 'register_bjam_action'
method. | [
"Binds",
"a",
"target",
"to",
"the",
"corresponding",
"update",
"action",
".",
"If",
"target",
"needs",
"to",
"be",
"updated",
"the",
"action",
"registered",
"with",
"action_name",
"will",
"be",
"used",
".",
"The",
"action_name",
"must",
"be",
"previously",
"registered",
"by",
"either",
"register_action",
"or",
"register_bjam_action",
"method",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L145-L164 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/engine.py | Engine.register_action | def register_action (self, action_name, command='', bound_list = [], flags = [],
function = None):
"""Creates a new build engine action.
Creates on bjam side an action named 'action_name', with
'command' as the command to be executed, 'bound_variables'
naming the list of variables bound when the command is executed
and specified flag.
If 'function' is not None, it should be a callable taking three
parameters:
- targets
- sources
- instance of the property_set class
This function will be called by set_update_action, and can
set additional target variables.
"""
assert isinstance(action_name, basestring)
assert isinstance(command, basestring)
assert is_iterable(bound_list)
assert is_iterable(flags)
assert function is None or callable(function)
bjam_flags = reduce(operator.or_,
(action_modifiers[flag] for flag in flags), 0)
# We allow command to be empty so that we can define 'action' as pure
# python function that would do some conditional logic and then relay
# to other actions.
assert command or function
if command:
bjam_interface.define_action(action_name, command, bound_list, bjam_flags)
self.actions[action_name] = BjamAction(
action_name, function, has_command=bool(command)) | python | def register_action (self, action_name, command='', bound_list = [], flags = [],
function = None):
"""Creates a new build engine action.
Creates on bjam side an action named 'action_name', with
'command' as the command to be executed, 'bound_variables'
naming the list of variables bound when the command is executed
and specified flag.
If 'function' is not None, it should be a callable taking three
parameters:
- targets
- sources
- instance of the property_set class
This function will be called by set_update_action, and can
set additional target variables.
"""
assert isinstance(action_name, basestring)
assert isinstance(command, basestring)
assert is_iterable(bound_list)
assert is_iterable(flags)
assert function is None or callable(function)
bjam_flags = reduce(operator.or_,
(action_modifiers[flag] for flag in flags), 0)
# We allow command to be empty so that we can define 'action' as pure
# python function that would do some conditional logic and then relay
# to other actions.
assert command or function
if command:
bjam_interface.define_action(action_name, command, bound_list, bjam_flags)
self.actions[action_name] = BjamAction(
action_name, function, has_command=bool(command)) | [
"def",
"register_action",
"(",
"self",
",",
"action_name",
",",
"command",
"=",
"''",
",",
"bound_list",
"=",
"[",
"]",
",",
"flags",
"=",
"[",
"]",
",",
"function",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"action_name",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"command",
",",
"basestring",
")",
"assert",
"is_iterable",
"(",
"bound_list",
")",
"assert",
"is_iterable",
"(",
"flags",
")",
"assert",
"function",
"is",
"None",
"or",
"callable",
"(",
"function",
")",
"bjam_flags",
"=",
"reduce",
"(",
"operator",
".",
"or_",
",",
"(",
"action_modifiers",
"[",
"flag",
"]",
"for",
"flag",
"in",
"flags",
")",
",",
"0",
")",
"# We allow command to be empty so that we can define 'action' as pure",
"# python function that would do some conditional logic and then relay",
"# to other actions.",
"assert",
"command",
"or",
"function",
"if",
"command",
":",
"bjam_interface",
".",
"define_action",
"(",
"action_name",
",",
"command",
",",
"bound_list",
",",
"bjam_flags",
")",
"self",
".",
"actions",
"[",
"action_name",
"]",
"=",
"BjamAction",
"(",
"action_name",
",",
"function",
",",
"has_command",
"=",
"bool",
"(",
"command",
")",
")"
] | Creates a new build engine action.
Creates on bjam side an action named 'action_name', with
'command' as the command to be executed, 'bound_variables'
naming the list of variables bound when the command is executed
and specified flag.
If 'function' is not None, it should be a callable taking three
parameters:
- targets
- sources
- instance of the property_set class
This function will be called by set_update_action, and can
set additional target variables. | [
"Creates",
"a",
"new",
"build",
"engine",
"action",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L166-L199 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/engine.py | Engine.register_bjam_action | def register_bjam_action (self, action_name, function=None):
"""Informs self that 'action_name' is declared in bjam.
From this point, 'action_name' is a valid argument to the
set_update_action method. The action_name should be callable
in the global module of bjam.
"""
# We allow duplicate calls to this rule for the same
# action name. This way, jamfile rules that take action names
# can just register them without specially checking if
# action is already registered.
assert isinstance(action_name, basestring)
assert function is None or callable(function)
if action_name not in self.actions:
self.actions[action_name] = BjamNativeAction(action_name, function) | python | def register_bjam_action (self, action_name, function=None):
"""Informs self that 'action_name' is declared in bjam.
From this point, 'action_name' is a valid argument to the
set_update_action method. The action_name should be callable
in the global module of bjam.
"""
# We allow duplicate calls to this rule for the same
# action name. This way, jamfile rules that take action names
# can just register them without specially checking if
# action is already registered.
assert isinstance(action_name, basestring)
assert function is None or callable(function)
if action_name not in self.actions:
self.actions[action_name] = BjamNativeAction(action_name, function) | [
"def",
"register_bjam_action",
"(",
"self",
",",
"action_name",
",",
"function",
"=",
"None",
")",
":",
"# We allow duplicate calls to this rule for the same",
"# action name. This way, jamfile rules that take action names",
"# can just register them without specially checking if",
"# action is already registered.",
"assert",
"isinstance",
"(",
"action_name",
",",
"basestring",
")",
"assert",
"function",
"is",
"None",
"or",
"callable",
"(",
"function",
")",
"if",
"action_name",
"not",
"in",
"self",
".",
"actions",
":",
"self",
".",
"actions",
"[",
"action_name",
"]",
"=",
"BjamNativeAction",
"(",
"action_name",
",",
"function",
")"
] | Informs self that 'action_name' is declared in bjam.
From this point, 'action_name' is a valid argument to the
set_update_action method. The action_name should be callable
in the global module of bjam. | [
"Informs",
"self",
"that",
"action_name",
"is",
"declared",
"in",
"bjam",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L201-L216 | train |
apple/turicreate | src/unity/python/turicreate/data_structures/image.py | Image.pixel_data | def pixel_data(self):
"""
Returns the pixel data stored in the Image object.
Returns
-------
out : numpy.array
The pixel data of the Image object. It returns a multi-dimensional
numpy array, where the shape of the array represents the shape of
the image (height, weight, channels).
See Also
--------
width, channels, height
Examples
--------
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> image_array = img.pixel_data
"""
from .. import extensions as _extensions
data = _np.zeros((self.height, self.width, self.channels), dtype=_np.uint8)
_extensions.image_load_to_numpy(self, data.ctypes.data, data.strides)
if self.channels == 1:
data = data.squeeze(2)
return data | python | def pixel_data(self):
"""
Returns the pixel data stored in the Image object.
Returns
-------
out : numpy.array
The pixel data of the Image object. It returns a multi-dimensional
numpy array, where the shape of the array represents the shape of
the image (height, weight, channels).
See Also
--------
width, channels, height
Examples
--------
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> image_array = img.pixel_data
"""
from .. import extensions as _extensions
data = _np.zeros((self.height, self.width, self.channels), dtype=_np.uint8)
_extensions.image_load_to_numpy(self, data.ctypes.data, data.strides)
if self.channels == 1:
data = data.squeeze(2)
return data | [
"def",
"pixel_data",
"(",
"self",
")",
":",
"from",
".",
".",
"import",
"extensions",
"as",
"_extensions",
"data",
"=",
"_np",
".",
"zeros",
"(",
"(",
"self",
".",
"height",
",",
"self",
".",
"width",
",",
"self",
".",
"channels",
")",
",",
"dtype",
"=",
"_np",
".",
"uint8",
")",
"_extensions",
".",
"image_load_to_numpy",
"(",
"self",
",",
"data",
".",
"ctypes",
".",
"data",
",",
"data",
".",
"strides",
")",
"if",
"self",
".",
"channels",
"==",
"1",
":",
"data",
"=",
"data",
".",
"squeeze",
"(",
"2",
")",
"return",
"data"
] | Returns the pixel data stored in the Image object.
Returns
-------
out : numpy.array
The pixel data of the Image object. It returns a multi-dimensional
numpy array, where the shape of the array represents the shape of
the image (height, weight, channels).
See Also
--------
width, channels, height
Examples
--------
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> image_array = img.pixel_data | [
"Returns",
"the",
"pixel",
"data",
"stored",
"in",
"the",
"Image",
"object",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/image.py#L141-L167 | train |
apple/turicreate | src/unity/python/turicreate/data_structures/image.py | Image.show | def show(self):
"""
Displays the image. Requires PIL/Pillow.
Alternatively, you can create an :class:`turicreate.SArray` of this image
and use py:func:`turicreate.SArray.show()`
See Also
--------
turicreate.image_analysis.resize
Examples
--------
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> img.show()
"""
from ..visualization._plot import _target
try:
img = self._to_pil_image()
try:
# output into jupyter notebook if possible
if _target == 'auto' and \
get_ipython().__class__.__name__ == "ZMQInteractiveShell":
from io import BytesIO
from IPython import display
b = BytesIO()
img.save(b, format='png')
data = b.getvalue()
ip_img = display.Image(data=data, format='png', embed=True)
display.display(ip_img)
else:
# fall back to pillow .show (jupyter notebook integration disabled or not in jupyter notebook)
img.show()
except NameError:
# fall back to pillow .show (no get_ipython() available)
img.show()
except ImportError:
print("Install pillow to use the .show() method.") | python | def show(self):
"""
Displays the image. Requires PIL/Pillow.
Alternatively, you can create an :class:`turicreate.SArray` of this image
and use py:func:`turicreate.SArray.show()`
See Also
--------
turicreate.image_analysis.resize
Examples
--------
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> img.show()
"""
from ..visualization._plot import _target
try:
img = self._to_pil_image()
try:
# output into jupyter notebook if possible
if _target == 'auto' and \
get_ipython().__class__.__name__ == "ZMQInteractiveShell":
from io import BytesIO
from IPython import display
b = BytesIO()
img.save(b, format='png')
data = b.getvalue()
ip_img = display.Image(data=data, format='png', embed=True)
display.display(ip_img)
else:
# fall back to pillow .show (jupyter notebook integration disabled or not in jupyter notebook)
img.show()
except NameError:
# fall back to pillow .show (no get_ipython() available)
img.show()
except ImportError:
print("Install pillow to use the .show() method.") | [
"def",
"show",
"(",
"self",
")",
":",
"from",
".",
".",
"visualization",
".",
"_plot",
"import",
"_target",
"try",
":",
"img",
"=",
"self",
".",
"_to_pil_image",
"(",
")",
"try",
":",
"# output into jupyter notebook if possible",
"if",
"_target",
"==",
"'auto'",
"and",
"get_ipython",
"(",
")",
".",
"__class__",
".",
"__name__",
"==",
"\"ZMQInteractiveShell\"",
":",
"from",
"io",
"import",
"BytesIO",
"from",
"IPython",
"import",
"display",
"b",
"=",
"BytesIO",
"(",
")",
"img",
".",
"save",
"(",
"b",
",",
"format",
"=",
"'png'",
")",
"data",
"=",
"b",
".",
"getvalue",
"(",
")",
"ip_img",
"=",
"display",
".",
"Image",
"(",
"data",
"=",
"data",
",",
"format",
"=",
"'png'",
",",
"embed",
"=",
"True",
")",
"display",
".",
"display",
"(",
"ip_img",
")",
"else",
":",
"# fall back to pillow .show (jupyter notebook integration disabled or not in jupyter notebook)",
"img",
".",
"show",
"(",
")",
"except",
"NameError",
":",
"# fall back to pillow .show (no get_ipython() available)",
"img",
".",
"show",
"(",
")",
"except",
"ImportError",
":",
"print",
"(",
"\"Install pillow to use the .show() method.\"",
")"
] | Displays the image. Requires PIL/Pillow.
Alternatively, you can create an :class:`turicreate.SArray` of this image
and use py:func:`turicreate.SArray.show()`
See Also
--------
turicreate.image_analysis.resize
Examples
--------
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> img.show() | [
"Displays",
"the",
"image",
".",
"Requires",
"PIL",
"/",
"Pillow",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/image.py#L218-L256 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/model.py | MLModel.predict | def predict(self, data, useCPUOnly=False, **kwargs):
"""
Return predictions for the model. The kwargs gets passed into the
model as a dictionary.
Parameters
----------
data : dict[str, value]
Dictionary of data to make predictions from where the keys are
the names of the input features.
useCPUOnly : bool
Set to true to restrict computation to use only the CPU. Defaults to False.
Returns
-------
out : dict[str, value]
Predictions as a dictionary where each key is the output feature
name.
Examples
--------
>>> data = {'bedroom': 1.0, 'bath': 1.0, 'size': 1240}
>>> predictions = model.predict(data)
"""
if self.__proxy__:
return self.__proxy__.predict(data,useCPUOnly)
else:
if _macos_version() < (10, 13):
raise Exception('Model prediction is only supported on macOS version 10.13 or later.')
try:
from ..libcoremlpython import _MLModelProxy
except:
_MLModelProxy = None
if not _MLModelProxy:
raise Exception('Unable to load CoreML.framework. Cannot make predictions.')
elif _MLModelProxy.maximum_supported_specification_version() < self._spec.specificationVersion:
engineVersion = _MLModelProxy.maximum_supported_specification_version()
raise Exception('The specification has version ' + str(self._spec.specificationVersion)
+ ' but the Core ML framework version installed only supports Core ML model specification version '
+ str(engineVersion) + ' or older.')
elif _has_custom_layer(self._spec):
raise Exception('This model contains a custom neural network layer, so predict is not supported.')
else:
raise Exception('Unable to load CoreML.framework. Cannot make predictions.') | python | def predict(self, data, useCPUOnly=False, **kwargs):
"""
Return predictions for the model. The kwargs gets passed into the
model as a dictionary.
Parameters
----------
data : dict[str, value]
Dictionary of data to make predictions from where the keys are
the names of the input features.
useCPUOnly : bool
Set to true to restrict computation to use only the CPU. Defaults to False.
Returns
-------
out : dict[str, value]
Predictions as a dictionary where each key is the output feature
name.
Examples
--------
>>> data = {'bedroom': 1.0, 'bath': 1.0, 'size': 1240}
>>> predictions = model.predict(data)
"""
if self.__proxy__:
return self.__proxy__.predict(data,useCPUOnly)
else:
if _macos_version() < (10, 13):
raise Exception('Model prediction is only supported on macOS version 10.13 or later.')
try:
from ..libcoremlpython import _MLModelProxy
except:
_MLModelProxy = None
if not _MLModelProxy:
raise Exception('Unable to load CoreML.framework. Cannot make predictions.')
elif _MLModelProxy.maximum_supported_specification_version() < self._spec.specificationVersion:
engineVersion = _MLModelProxy.maximum_supported_specification_version()
raise Exception('The specification has version ' + str(self._spec.specificationVersion)
+ ' but the Core ML framework version installed only supports Core ML model specification version '
+ str(engineVersion) + ' or older.')
elif _has_custom_layer(self._spec):
raise Exception('This model contains a custom neural network layer, so predict is not supported.')
else:
raise Exception('Unable to load CoreML.framework. Cannot make predictions.') | [
"def",
"predict",
"(",
"self",
",",
"data",
",",
"useCPUOnly",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"__proxy__",
":",
"return",
"self",
".",
"__proxy__",
".",
"predict",
"(",
"data",
",",
"useCPUOnly",
")",
"else",
":",
"if",
"_macos_version",
"(",
")",
"<",
"(",
"10",
",",
"13",
")",
":",
"raise",
"Exception",
"(",
"'Model prediction is only supported on macOS version 10.13 or later.'",
")",
"try",
":",
"from",
".",
".",
"libcoremlpython",
"import",
"_MLModelProxy",
"except",
":",
"_MLModelProxy",
"=",
"None",
"if",
"not",
"_MLModelProxy",
":",
"raise",
"Exception",
"(",
"'Unable to load CoreML.framework. Cannot make predictions.'",
")",
"elif",
"_MLModelProxy",
".",
"maximum_supported_specification_version",
"(",
")",
"<",
"self",
".",
"_spec",
".",
"specificationVersion",
":",
"engineVersion",
"=",
"_MLModelProxy",
".",
"maximum_supported_specification_version",
"(",
")",
"raise",
"Exception",
"(",
"'The specification has version '",
"+",
"str",
"(",
"self",
".",
"_spec",
".",
"specificationVersion",
")",
"+",
"' but the Core ML framework version installed only supports Core ML model specification version '",
"+",
"str",
"(",
"engineVersion",
")",
"+",
"' or older.'",
")",
"elif",
"_has_custom_layer",
"(",
"self",
".",
"_spec",
")",
":",
"raise",
"Exception",
"(",
"'This model contains a custom neural network layer, so predict is not supported.'",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unable to load CoreML.framework. Cannot make predictions.'",
")"
] | Return predictions for the model. The kwargs gets passed into the
model as a dictionary.
Parameters
----------
data : dict[str, value]
Dictionary of data to make predictions from where the keys are
the names of the input features.
useCPUOnly : bool
Set to true to restrict computation to use only the CPU. Defaults to False.
Returns
-------
out : dict[str, value]
Predictions as a dictionary where each key is the output feature
name.
Examples
--------
>>> data = {'bedroom': 1.0, 'bath': 1.0, 'size': 1240}
>>> predictions = model.predict(data) | [
"Return",
"predictions",
"for",
"the",
"model",
".",
"The",
"kwargs",
"gets",
"passed",
"into",
"the",
"model",
"as",
"a",
"dictionary",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/model.py#L300-L347 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/model.py | MLModel.visualize_spec | def visualize_spec(self, port=None, input_shape_dict=None):
"""
Visualize the model.
Parameters
----------
port : int
if server is to be hosted on specific localhost port
input_shape_dict : dict
The shapes are calculated assuming the batch and sequence
are 1 i.e. (1, 1, C, H, W). If either is not 1, then provide
full input shape
Returns
-------
None
Examples
--------
>>> model = coreml.models.MLModel('HousePricer.mlmodel')
>>> model.visualize_spec()
"""
spec = self._spec
model_type = spec.WhichOneof('Type')
model_description = spec.description
input_spec = model_description.input
output_spec = model_description.output
spec_inputs = []
for model_input in input_spec:
spec_inputs.append((model_input.name, str(model_input.type)))
spec_outputs = []
for model_output in output_spec:
spec_outputs.append((model_output.name, str(model_output.type)))
cy_nodes = []
cy_edges = []
cy_nodes.append({
'data': {
'id': 'input_node',
'name': '',
'info': {
'type': 'input node'
},
'classes': 'input',
}
})
for model_input, input_type in spec_inputs:
cy_nodes.append({
'data': {
'id': str(model_input),
'name': str(model_input),
'info': {
'type': "\n".join(str(input_type).split("\n")),
'inputs': str([]),
'outputs': str([model_input])
},
'parent': 'input_node'
},
'classes': 'input'
})
if model_type == 'pipeline':
pipeline_spec = spec.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'pipelineRegressor':
pipeline_spec = spec.pipelineRegressor.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'pipelineClassifier':
pipeline_spec = spec.pipelineClassifier.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'neuralNetwork':
nn_spec = spec.neuralNetwork
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
elif model_type == 'neuralNetworkClassifier':
nn_spec = spec.neuralNetworkClassifier
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
elif model_type == 'neuralNetworkRegressor':
nn_spec = spec.neuralNetworkRegressor
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
else:
print("Model is not of type Pipeline or Neural Network "
"and cannot be visualized")
return
import coremltools
web_dir = _os.path.join(_os.path.dirname(coremltools.__file__),
'graph_visualization')
with open('{}/model.json'.format(web_dir), 'w') as file:
_json.dump(cy_data, file)
_start_server(port, web_dir) | python | def visualize_spec(self, port=None, input_shape_dict=None):
"""
Visualize the model.
Parameters
----------
port : int
if server is to be hosted on specific localhost port
input_shape_dict : dict
The shapes are calculated assuming the batch and sequence
are 1 i.e. (1, 1, C, H, W). If either is not 1, then provide
full input shape
Returns
-------
None
Examples
--------
>>> model = coreml.models.MLModel('HousePricer.mlmodel')
>>> model.visualize_spec()
"""
spec = self._spec
model_type = spec.WhichOneof('Type')
model_description = spec.description
input_spec = model_description.input
output_spec = model_description.output
spec_inputs = []
for model_input in input_spec:
spec_inputs.append((model_input.name, str(model_input.type)))
spec_outputs = []
for model_output in output_spec:
spec_outputs.append((model_output.name, str(model_output.type)))
cy_nodes = []
cy_edges = []
cy_nodes.append({
'data': {
'id': 'input_node',
'name': '',
'info': {
'type': 'input node'
},
'classes': 'input',
}
})
for model_input, input_type in spec_inputs:
cy_nodes.append({
'data': {
'id': str(model_input),
'name': str(model_input),
'info': {
'type': "\n".join(str(input_type).split("\n")),
'inputs': str([]),
'outputs': str([model_input])
},
'parent': 'input_node'
},
'classes': 'input'
})
if model_type == 'pipeline':
pipeline_spec = spec.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'pipelineRegressor':
pipeline_spec = spec.pipelineRegressor.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'pipelineClassifier':
pipeline_spec = spec.pipelineClassifier.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'neuralNetwork':
nn_spec = spec.neuralNetwork
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
elif model_type == 'neuralNetworkClassifier':
nn_spec = spec.neuralNetworkClassifier
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
elif model_type == 'neuralNetworkRegressor':
nn_spec = spec.neuralNetworkRegressor
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
else:
print("Model is not of type Pipeline or Neural Network "
"and cannot be visualized")
return
import coremltools
web_dir = _os.path.join(_os.path.dirname(coremltools.__file__),
'graph_visualization')
with open('{}/model.json'.format(web_dir), 'w') as file:
_json.dump(cy_data, file)
_start_server(port, web_dir) | [
"def",
"visualize_spec",
"(",
"self",
",",
"port",
"=",
"None",
",",
"input_shape_dict",
"=",
"None",
")",
":",
"spec",
"=",
"self",
".",
"_spec",
"model_type",
"=",
"spec",
".",
"WhichOneof",
"(",
"'Type'",
")",
"model_description",
"=",
"spec",
".",
"description",
"input_spec",
"=",
"model_description",
".",
"input",
"output_spec",
"=",
"model_description",
".",
"output",
"spec_inputs",
"=",
"[",
"]",
"for",
"model_input",
"in",
"input_spec",
":",
"spec_inputs",
".",
"append",
"(",
"(",
"model_input",
".",
"name",
",",
"str",
"(",
"model_input",
".",
"type",
")",
")",
")",
"spec_outputs",
"=",
"[",
"]",
"for",
"model_output",
"in",
"output_spec",
":",
"spec_outputs",
".",
"append",
"(",
"(",
"model_output",
".",
"name",
",",
"str",
"(",
"model_output",
".",
"type",
")",
")",
")",
"cy_nodes",
"=",
"[",
"]",
"cy_edges",
"=",
"[",
"]",
"cy_nodes",
".",
"append",
"(",
"{",
"'data'",
":",
"{",
"'id'",
":",
"'input_node'",
",",
"'name'",
":",
"''",
",",
"'info'",
":",
"{",
"'type'",
":",
"'input node'",
"}",
",",
"'classes'",
":",
"'input'",
",",
"}",
"}",
")",
"for",
"model_input",
",",
"input_type",
"in",
"spec_inputs",
":",
"cy_nodes",
".",
"append",
"(",
"{",
"'data'",
":",
"{",
"'id'",
":",
"str",
"(",
"model_input",
")",
",",
"'name'",
":",
"str",
"(",
"model_input",
")",
",",
"'info'",
":",
"{",
"'type'",
":",
"\"\\n\"",
".",
"join",
"(",
"str",
"(",
"input_type",
")",
".",
"split",
"(",
"\"\\n\"",
")",
")",
",",
"'inputs'",
":",
"str",
"(",
"[",
"]",
")",
",",
"'outputs'",
":",
"str",
"(",
"[",
"model_input",
"]",
")",
"}",
",",
"'parent'",
":",
"'input_node'",
"}",
",",
"'classes'",
":",
"'input'",
"}",
")",
"if",
"model_type",
"==",
"'pipeline'",
":",
"pipeline_spec",
"=",
"spec",
".",
"pipeline",
"cy_data",
"=",
"_pipeline_nodes_and_edges",
"(",
"cy_nodes",
",",
"cy_edges",
",",
"pipeline_spec",
",",
"spec_outputs",
")",
"elif",
"model_type",
"==",
"'pipelineRegressor'",
":",
"pipeline_spec",
"=",
"spec",
".",
"pipelineRegressor",
".",
"pipeline",
"cy_data",
"=",
"_pipeline_nodes_and_edges",
"(",
"cy_nodes",
",",
"cy_edges",
",",
"pipeline_spec",
",",
"spec_outputs",
")",
"elif",
"model_type",
"==",
"'pipelineClassifier'",
":",
"pipeline_spec",
"=",
"spec",
".",
"pipelineClassifier",
".",
"pipeline",
"cy_data",
"=",
"_pipeline_nodes_and_edges",
"(",
"cy_nodes",
",",
"cy_edges",
",",
"pipeline_spec",
",",
"spec_outputs",
")",
"elif",
"model_type",
"==",
"'neuralNetwork'",
":",
"nn_spec",
"=",
"spec",
".",
"neuralNetwork",
"cy_data",
"=",
"_neural_network_nodes_and_edges",
"(",
"nn_spec",
",",
"cy_nodes",
",",
"cy_edges",
",",
"spec_outputs",
",",
"input_spec",
",",
"input_shape_dict",
"=",
"input_shape_dict",
")",
"elif",
"model_type",
"==",
"'neuralNetworkClassifier'",
":",
"nn_spec",
"=",
"spec",
".",
"neuralNetworkClassifier",
"cy_data",
"=",
"_neural_network_nodes_and_edges",
"(",
"nn_spec",
",",
"cy_nodes",
",",
"cy_edges",
",",
"spec_outputs",
",",
"input_spec",
",",
"input_shape_dict",
"=",
"input_shape_dict",
")",
"elif",
"model_type",
"==",
"'neuralNetworkRegressor'",
":",
"nn_spec",
"=",
"spec",
".",
"neuralNetworkRegressor",
"cy_data",
"=",
"_neural_network_nodes_and_edges",
"(",
"nn_spec",
",",
"cy_nodes",
",",
"cy_edges",
",",
"spec_outputs",
",",
"input_spec",
",",
"input_shape_dict",
"=",
"input_shape_dict",
")",
"else",
":",
"print",
"(",
"\"Model is not of type Pipeline or Neural Network \"",
"\"and cannot be visualized\"",
")",
"return",
"import",
"coremltools",
"web_dir",
"=",
"_os",
".",
"path",
".",
"join",
"(",
"_os",
".",
"path",
".",
"dirname",
"(",
"coremltools",
".",
"__file__",
")",
",",
"'graph_visualization'",
")",
"with",
"open",
"(",
"'{}/model.json'",
".",
"format",
"(",
"web_dir",
")",
",",
"'w'",
")",
"as",
"file",
":",
"_json",
".",
"dump",
"(",
"cy_data",
",",
"file",
")",
"_start_server",
"(",
"port",
",",
"web_dir",
")"
] | Visualize the model.
Parameters
----------
port : int
if server is to be hosted on specific localhost port
input_shape_dict : dict
The shapes are calculated assuming the batch and sequence
are 1 i.e. (1, 1, C, H, W). If either is not 1, then provide
full input shape
Returns
-------
None
Examples
--------
>>> model = coreml.models.MLModel('HousePricer.mlmodel')
>>> model.visualize_spec() | [
"Visualize",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/model.py#L349-L478 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py | _construct_auto_distance | def _construct_auto_distance(feature_names, column_names, column_types, sample):
"""
Construct composite distance parameters based on selected features and their
types.
"""
## Make a dictionary from the column_names and column_types
col_type_dict = {k: v for k, v in zip(column_names, column_types)}
## Loop through feature names, appending a distance component if the
# feature's type is *not* numeric. If the type *is* numeric, append it to
# the numeric_cols list, then at the end make a numeric columns distance
# component.
composite_distance_params = []
numeric_cols = []
for c in feature_names:
if col_type_dict[c] == str:
composite_distance_params.append([[c], _turicreate.distances.levenshtein, 1])
elif col_type_dict[c] == dict:
composite_distance_params.append([[c], _turicreate.distances.jaccard, 1])
elif col_type_dict[c] == array.array:
composite_distance_params.append([[c], _turicreate.distances.euclidean, 1])
elif col_type_dict[c] == list:
only_str_lists = _validate_lists(sample[c], allowed_types=[str])
if not only_str_lists:
raise TypeError("Only lists of all str objects are currently supported")
composite_distance_params.append([[c], _turicreate.distances.jaccard, 1])
elif col_type_dict[c] in [int, float, array.array, list]:
numeric_cols.append(c)
else:
raise TypeError("Unable to automatically determine a distance "+\
"for column {}".format(c))
# Make the standalone numeric column distance component
if len(numeric_cols) > 0:
composite_distance_params.append([numeric_cols, _turicreate.distances.euclidean, 1])
return composite_distance_params | python | def _construct_auto_distance(feature_names, column_names, column_types, sample):
"""
Construct composite distance parameters based on selected features and their
types.
"""
## Make a dictionary from the column_names and column_types
col_type_dict = {k: v for k, v in zip(column_names, column_types)}
## Loop through feature names, appending a distance component if the
# feature's type is *not* numeric. If the type *is* numeric, append it to
# the numeric_cols list, then at the end make a numeric columns distance
# component.
composite_distance_params = []
numeric_cols = []
for c in feature_names:
if col_type_dict[c] == str:
composite_distance_params.append([[c], _turicreate.distances.levenshtein, 1])
elif col_type_dict[c] == dict:
composite_distance_params.append([[c], _turicreate.distances.jaccard, 1])
elif col_type_dict[c] == array.array:
composite_distance_params.append([[c], _turicreate.distances.euclidean, 1])
elif col_type_dict[c] == list:
only_str_lists = _validate_lists(sample[c], allowed_types=[str])
if not only_str_lists:
raise TypeError("Only lists of all str objects are currently supported")
composite_distance_params.append([[c], _turicreate.distances.jaccard, 1])
elif col_type_dict[c] in [int, float, array.array, list]:
numeric_cols.append(c)
else:
raise TypeError("Unable to automatically determine a distance "+\
"for column {}".format(c))
# Make the standalone numeric column distance component
if len(numeric_cols) > 0:
composite_distance_params.append([numeric_cols, _turicreate.distances.euclidean, 1])
return composite_distance_params | [
"def",
"_construct_auto_distance",
"(",
"feature_names",
",",
"column_names",
",",
"column_types",
",",
"sample",
")",
":",
"## Make a dictionary from the column_names and column_types",
"col_type_dict",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"column_names",
",",
"column_types",
")",
"}",
"## Loop through feature names, appending a distance component if the",
"# feature's type is *not* numeric. If the type *is* numeric, append it to",
"# the numeric_cols list, then at the end make a numeric columns distance",
"# component.",
"composite_distance_params",
"=",
"[",
"]",
"numeric_cols",
"=",
"[",
"]",
"for",
"c",
"in",
"feature_names",
":",
"if",
"col_type_dict",
"[",
"c",
"]",
"==",
"str",
":",
"composite_distance_params",
".",
"append",
"(",
"[",
"[",
"c",
"]",
",",
"_turicreate",
".",
"distances",
".",
"levenshtein",
",",
"1",
"]",
")",
"elif",
"col_type_dict",
"[",
"c",
"]",
"==",
"dict",
":",
"composite_distance_params",
".",
"append",
"(",
"[",
"[",
"c",
"]",
",",
"_turicreate",
".",
"distances",
".",
"jaccard",
",",
"1",
"]",
")",
"elif",
"col_type_dict",
"[",
"c",
"]",
"==",
"array",
".",
"array",
":",
"composite_distance_params",
".",
"append",
"(",
"[",
"[",
"c",
"]",
",",
"_turicreate",
".",
"distances",
".",
"euclidean",
",",
"1",
"]",
")",
"elif",
"col_type_dict",
"[",
"c",
"]",
"==",
"list",
":",
"only_str_lists",
"=",
"_validate_lists",
"(",
"sample",
"[",
"c",
"]",
",",
"allowed_types",
"=",
"[",
"str",
"]",
")",
"if",
"not",
"only_str_lists",
":",
"raise",
"TypeError",
"(",
"\"Only lists of all str objects are currently supported\"",
")",
"composite_distance_params",
".",
"append",
"(",
"[",
"[",
"c",
"]",
",",
"_turicreate",
".",
"distances",
".",
"jaccard",
",",
"1",
"]",
")",
"elif",
"col_type_dict",
"[",
"c",
"]",
"in",
"[",
"int",
",",
"float",
",",
"array",
".",
"array",
",",
"list",
"]",
":",
"numeric_cols",
".",
"append",
"(",
"c",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unable to automatically determine a distance \"",
"+",
"\"for column {}\"",
".",
"format",
"(",
"c",
")",
")",
"# Make the standalone numeric column distance component",
"if",
"len",
"(",
"numeric_cols",
")",
">",
"0",
":",
"composite_distance_params",
".",
"append",
"(",
"[",
"numeric_cols",
",",
"_turicreate",
".",
"distances",
".",
"euclidean",
",",
"1",
"]",
")",
"return",
"composite_distance_params"
] | Construct composite distance parameters based on selected features and their
types. | [
"Construct",
"composite",
"distance",
"parameters",
"based",
"on",
"selected",
"features",
"and",
"their",
"types",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L33-L71 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py | create | def create(dataset, label=None, features=None, distance=None, method='auto',
verbose=True, **kwargs):
"""
Create a nearest neighbor model, which can be searched efficiently and
quickly for the nearest neighbors of a query observation. If the `method`
argument is specified as `auto`, the type of model is chosen automatically
based on the type of data in `dataset`.
.. warning::
The 'dot_product' distance is deprecated and will be removed in future
versions of Turi Create. Please use 'transformed_dot_product'
distance instead, although note that this is more than a name change;
it is a *different* transformation of the dot product of two vectors.
Please see the distances module documentation for more details.
Parameters
----------
dataset : SFrame
Reference data. If the features for each observation are numeric, they
may be in separate columns of 'dataset' or a single column with lists
of values. The features may also be in the form of a column of sparse
vectors (i.e. dictionaries), with string keys and numeric values.
label : string, optional
Name of the SFrame column with row labels. If 'label' is not specified,
row numbers are used to identify reference dataset rows when the model
is queried.
features : list[string], optional
Name of the columns with features to use in computing distances between
observations and the query points. 'None' (the default) indicates that
all columns except the label should be used as features. Each column
can be one of the following types:
- *Numeric*: values of numeric type integer or float.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate variable in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values.
Each key indicates a separate variable in the model.
- *List*: list of integer or string values. Each element is treated as
a separate variable in the model.
- *String*: string values.
Please note: if a composite distance is also specified, this parameter
is ignored.
distance : string, function, or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of three types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Function*: a function handle from the
:mod:`~turicreate.toolkits.distances` module.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
If 'distance' is left unspecified or set to 'auto', a composite
distance is constructed automatically based on feature types.
method : {'auto', 'ball_tree', 'brute_force', 'lsh'}, optional
Method for computing nearest neighbors. The options are:
- *auto* (default): the method is chosen automatically, based on the
type of data and the distance. If the distance is 'manhattan' or
'euclidean' and the features are numeric or vectors of numeric
values, then the 'ball_tree' method is used. Otherwise, the
'brute_force' method is used.
- *ball_tree*: use a tree structure to find the k-closest neighbors to
each query point. The ball tree model is slower to construct than the
brute force model, but queries are faster than linear time. This
method is not applicable for the cosine and dot product distances.
See `Liu, et al (2004)
<http://papers.nips.cc/paper/2666-an-investigation-of-p
ractical-approximat e-nearest-neighbor-algorithms>`_ for
implementation details.
- *brute_force*: compute the distance from a query point to all
reference observations. There is no computation time for model
creation with the brute force method (although the reference data is
held in the model, but each query takes linear time.
- *lsh*: use Locality Sensitive Hashing (LSH) to find approximate
nearest neighbors efficiently. The LSH model supports 'euclidean',
'squared_euclidean', 'manhattan', 'cosine', 'jaccard', 'dot_product'
(deprecated), and 'transformed_dot_product' distances. Two options
are provided for LSH -- ``num_tables`` and
``num_projections_per_table``. See the notes below for details.
verbose: bool, optional
If True, print progress updates and model details.
**kwargs : optional
Options for the distance function and query method.
- *leaf_size*: for the ball tree method, the number of points in each
leaf of the tree. The default is to use the max of 1,000 and
n/(2^11), which ensures a maximum tree depth of 12.
- *num_tables*: For the LSH method, the number of hash tables
constructed. The default value is 20. We recommend choosing values
from 10 to 30.
- *num_projections_per_table*: For the LSH method, the number of
projections/hash functions for each hash table. The default value is
4 for 'jaccard' distance, 16 for 'cosine' distance and 8 for other
distances. We recommend using number 2 ~ 6 for 'jaccard' distance, 8
~ 20 for 'cosine' distance and 4 ~ 12 for other distances.
Returns
-------
out : NearestNeighborsModel
A structure for efficiently computing the nearest neighbors in 'dataset'
of new query points.
See Also
--------
NearestNeighborsModel.query, turicreate.toolkits.distances
Notes
-----
- Missing data is not allowed in the 'dataset' provided to this function.
Please use the :func:`turicreate.SFrame.fillna` and
:func:`turicreate.SFrame.dropna` utilities to handle missing data before
creating a nearest neighbors model.
- Missing keys in sparse vectors are assumed to have value 0.
- The `composite_params` parameter was removed as of Turi Create
version 1.5. The `distance` parameter now accepts either standard or
composite distances. Please see the :mod:`~turicreate.toolkits.distances`
module documentation for more information on composite distances.
- If the features should be weighted equally in the distance calculations
but are measured on different scales, it is important to standardize the
features. One way to do this is to subtract the mean of each column and
divide by the standard deviation.
**Locality Sensitive Hashing (LSH)**
There are several efficient nearest neighbors search algorithms that work
well for data with low dimensions :math:`d` (approximately 50). However,
most of the solutions suffer from either space or query time that is
exponential in :math:`d`. For large :math:`d`, they often provide little,
if any, improvement over the 'brute_force' method. This is a well-known
consequence of the phenomenon called `The Curse of Dimensionality`.
`Locality Sensitive Hashing (LSH)
<https://en.wikipedia.org/wiki/Locality-sensitive_hashing>`_ is an approach
that is designed to efficiently solve the *approximate* nearest neighbor
search problem for high dimensional data. The key idea of LSH is to hash
the data points using several hash functions, so that the probability of
collision is much higher for data points which are close to each other than
those which are far apart.
An LSH family is a family of functions :math:`h` which map points from the
metric space to a bucket, so that
- if :math:`d(p, q) \\leq R`, then :math:`h(p) = h(q)` with at least probability :math:`p_1`.
- if :math:`d(p, q) \\geq cR`, then :math:`h(p) = h(q)` with probability at most :math:`p_2`.
LSH for efficient approximate nearest neighbor search:
- We define a new family of hash functions :math:`g`, where each
function :math:`g` is obtained by concatenating :math:`k` functions
:math:`h_1, ..., h_k`, i.e., :math:`g(p)=[h_1(p),...,h_k(p)]`.
The algorithm constructs :math:`L` hash tables, each of which
corresponds to a different randomly chosen hash function :math:`g`.
There are :math:`k \\cdot L` hash functions used in total.
- In the preprocessing step, we hash all :math:`n` reference points
into each of the :math:`L` hash tables.
- Given a query point :math:`q`, the algorithm iterates over the
:math:`L` hash functions :math:`g`. For each :math:`g` considered, it
retrieves the data points that are hashed into the same bucket as q.
These data points from all the :math:`L` hash tables are considered as
candidates that are then re-ranked by their real distances with the query
data.
**Note** that the number of tables :math:`L` and the number of hash
functions per table :math:`k` are two main parameters. They can be set
using the options ``num_tables`` and ``num_projections_per_table``
respectively.
Hash functions for different distances:
- `euclidean` and `squared_euclidean`:
:math:`h(q) = \\lfloor \\frac{a \\cdot q + b}{w} \\rfloor` where
:math:`a` is a vector, of which the elements are independently
sampled from normal distribution, and :math:`b` is a number
uniformly sampled from :math:`[0, r]`. :math:`r` is a parameter for the
bucket width. We set :math:`r` using the average all-pair `euclidean`
distances from a small randomly sampled subset of the reference data.
- `manhattan`: The hash function of `manhattan` is similar with that of
`euclidean`. The only difference is that the elements of `a` are sampled
from Cauchy distribution, instead of normal distribution.
- `cosine`: Random Projection is designed to approximate the cosine
distance between vectors. The hash function is :math:`h(q) = sgn(a \\cdot
q)`, where :math:`a` is randomly sampled normal unit vector.
- `jaccard`: We use a recently proposed method one permutation hashing by
Shrivastava and Li. See the paper `[Shrivastava and Li, UAI 2014]
<http://www.auai.org/uai2014/proceedings/individuals/225.pdf>`_ for
details.
- `dot_product`: The reference data points are first transformed to
fixed-norm vectors, and then the minimum `dot_product` distance search
problem can be solved via finding the reference data with smallest
`cosine` distances. See the paper `[Neyshabur and Srebro, ICML 2015]
<http://proceedings.mlr.press/v37/neyshabur15.html>`_ for details.
References
----------
- `Wikipedia - nearest neighbor
search <http://en.wikipedia.org/wiki/Nearest_neighbor_search>`_
- `Wikipedia - ball tree <http://en.wikipedia.org/wiki/Ball_tree>`_
- Ball tree implementation: Liu, T., et al. (2004) `An Investigation of
Practical Approximate Nearest Neighbor Algorithms
<http://papers.nips.cc/paper/2666-an-investigation-of-p
ractical-approximat e-nearest-neighbor-algorithms>`_. Advances in Neural
Information Processing Systems pp. 825-832.
- `Wikipedia - Jaccard distance
<http://en.wikipedia.org/wiki/Jaccard_index>`_
- Weighted Jaccard distance: Chierichetti, F., et al. (2010) `Finding the
Jaccard Median
<http://theory.stanford.edu/~sergei/papers/soda10-jaccard.pdf>`_.
Proceedings of the Twenty-First Annual ACM-SIAM Symposium on Discrete
Algorithms. Society for Industrial and Applied Mathematics.
- `Wikipedia - Cosine distance
<http://en.wikipedia.org/wiki/Cosine_similarity>`_
- `Wikipedia - Levenshtein distance
<http://en.wikipedia.org/wiki/Levenshtein_distance>`_
- Locality Sensitive Hashing : Chapter 3 of the book `Mining Massive
Datasets <http://infolab.stanford.edu/~ullman/mmds/ch3.pdf>`_.
Examples
--------
Construct a nearest neighbors model with automatically determined method
and distance:
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'str_feature': ['cat', 'dog', 'fossa']})
>>> model = turicreate.nearest_neighbors.create(sf, features=['X1', 'X2'])
For datasets with a large number of rows and up to about 100 variables, the
ball tree method often leads to much faster queries.
>>> model = turicreate.nearest_neighbors.create(sf, features=['X1', 'X2'],
... method='ball_tree')
Often the final determination of a neighbor is based on several distance
computations over different sets of features. Each part of this composite
distance may have a different relative weight.
>>> my_dist = [[['X1', 'X2'], 'euclidean', 2.],
... [['str_feature'], 'levenshtein', 3.]]
...
>>> model = turicreate.nearest_neighbors.create(sf, distance=my_dist)
"""
## Validate the 'dataset' input
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Basic validation of the features input
if features is not None and not isinstance(features, list):
raise TypeError("If specified, input 'features' must be a list of " +
"strings.")
## Clean the method options and create the options dictionary
allowed_kwargs = ['leaf_size', 'num_tables', 'num_projections_per_table']
_method_options = {}
for k, v in kwargs.items():
if k in allowed_kwargs:
_method_options[k] = v
else:
raise _ToolkitError("'{}' is not a valid keyword argument".format(k) +
" for the nearest neighbors model. Please " +
"check for capitalization and other typos.")
## Exclude inappropriate combinations of method an distance
if method == 'ball_tree' and (distance == 'cosine'
or distance == _turicreate.distances.cosine
or distance == 'dot_product'
or distance == _turicreate.distances.dot_product
or distance == 'transformed_dot_product'
or distance == _turicreate.distances.transformed_dot_product):
raise TypeError("The ball tree method does not work with 'cosine' " +
"'dot_product', or 'transformed_dot_product' distance." +
"Please use the 'brute_force' method for these distances.")
if method == 'lsh' and ('num_projections_per_table' not in _method_options):
if distance == 'jaccard' or distance == _turicreate.distances.jaccard:
_method_options['num_projections_per_table'] = 4
elif distance == 'cosine' or distance == _turicreate.distances.cosine:
_method_options['num_projections_per_table'] = 16
else:
_method_options['num_projections_per_table'] = 8
## Initial validation and processing of the label
if label is None:
_label = _robust_column_name('__id', dataset.column_names())
_dataset = dataset.add_row_number(_label)
else:
_label = label
_dataset = _copy.copy(dataset)
col_type_map = {c:_dataset[c].dtype for c in _dataset.column_names()}
_validate_row_label(_label, col_type_map)
ref_labels = _dataset[_label]
## Determine the internal list of available feature names (may still include
# the row label name).
if features is None:
_features = _dataset.column_names()
else:
_features = _copy.deepcopy(features)
## Check if there's only one feature and it's the same as the row label.
# This would also be trapped by the composite distance validation, but the
# error message is not very informative for the user.
free_features = set(_features).difference([_label])
if len(free_features) < 1:
raise _ToolkitError("The only available feature is the same as the " +
"row label column. Please specify features " +
"that are not also row labels.")
### Validate and preprocess the distance function
### ---------------------------------------------
# - The form of the 'distance' controls how we interact with the 'features'
# parameter as well.
# - At this point, the row label 'label' may still be in the list(s) of
# features.
## Convert any distance function input into a single composite distance.
# distance is already a composite distance
if isinstance(distance, list):
distance = _copy.deepcopy(distance)
# distance is a single name (except 'auto') or function handle.
elif (hasattr(distance, '__call__') or
(isinstance(distance, str) and not distance == 'auto')):
distance = [[_features, distance, 1]]
# distance is unspecified and needs to be constructed.
elif distance is None or distance == 'auto':
sample = _dataset.head()
distance = _construct_auto_distance(_features,
_dataset.column_names(),
_dataset.column_types(),
sample)
else:
raise TypeError("Input 'distance' not understood. The 'distance' "
" argument must be a string, function handle, or " +
"composite distance.")
## Basic composite distance validation, remove the row label from all
# feature lists, and convert string distance names into distance functions.
distance = _scrub_composite_distance_features(distance, [_label])
distance = _convert_distance_names_to_functions(distance)
_validate_composite_distance(distance)
## Raise an error if any distances are used with non-lists
list_features_to_check = []
sparse_distances = ['jaccard', 'weighted_jaccard', 'cosine', 'dot_product', 'transformed_dot_product']
sparse_distances = [_turicreate.distances.__dict__[k] for k in sparse_distances]
for d in distance:
feature_names, dist, _ = d
list_features = [f for f in feature_names if _dataset[f].dtype == list]
for f in list_features:
if dist in sparse_distances:
list_features_to_check.append(f)
else:
raise TypeError("The chosen distance cannot currently be used " +
"on list-typed columns.")
for f in list_features_to_check:
only_str_lists = _validate_lists(_dataset[f], [str])
if not only_str_lists:
raise TypeError("Distances for sparse data, such as jaccard " +
"and weighted_jaccard, can only be used on " +
"lists containing only strings. Please modify " +
"any list features accordingly before creating " +
"the nearest neighbors model.")
## Raise an error if any component has string features are in single columns
for d in distance:
feature_names, dist, _ = d
if (len(feature_names) > 1) and (dist == _turicreate.distances.levenshtein):
raise ValueError("Levenshtein distance cannot be used with multiple " +
"columns. Please concatenate strings into a single " +
"column before creating the nearest neighbors model.")
## Get the union of feature names and make a clean dataset.
clean_features = _get_composite_distance_features(distance)
sf_clean = _tkutl._toolkits_select_columns(_dataset, clean_features)
## Decide which method to use
## - If more than one distance component (specified either directly or
# generated automatically because distance set to 'auto'), then do brute
# force.
if len(distance) > 1:
_method = 'brute_force'
if method != 'brute_force' and verbose is True:
print("Defaulting to brute force instead of ball tree because " +\
"there are multiple distance components.")
else:
if method == 'auto':
# get the total number of variables. Assume the number of elements in
# array type columns does not change
num_variables = sum([len(x) if hasattr(x, '__iter__') else 1
for x in _six.itervalues(sf_clean[0])])
# flag if all the features in the single composite are of numeric
# type.
numeric_type_flag = all([x in [int, float, list, array.array]
for x in sf_clean.column_types()])
## Conditions necessary for ball tree to work and be worth it
if ((distance[0][1] in ['euclidean',
'manhattan',
_turicreate.distances.euclidean,
_turicreate.distances.manhattan])
and numeric_type_flag is True
and num_variables <= 200):
_method = 'ball_tree'
else:
_method = 'brute_force'
else:
_method = method
## Pick the right model name for the method
if _method == 'ball_tree':
model_name = 'nearest_neighbors_ball_tree'
elif _method == 'brute_force':
model_name = 'nearest_neighbors_brute_force'
elif _method == 'lsh':
model_name = 'nearest_neighbors_lsh'
else:
raise ValueError("Method must be 'auto', 'ball_tree', 'brute_force', " +
"or 'lsh'.")
## Package the model options
opts = {}
opts.update(_method_options)
opts.update(
{'model_name': model_name,
'ref_labels': ref_labels,
'label': label,
'sf_features': sf_clean,
'composite_params': distance})
## Construct the nearest neighbors model
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.train(opts)
model_proxy = result['model']
model = NearestNeighborsModel(model_proxy)
return model | python | def create(dataset, label=None, features=None, distance=None, method='auto',
verbose=True, **kwargs):
"""
Create a nearest neighbor model, which can be searched efficiently and
quickly for the nearest neighbors of a query observation. If the `method`
argument is specified as `auto`, the type of model is chosen automatically
based on the type of data in `dataset`.
.. warning::
The 'dot_product' distance is deprecated and will be removed in future
versions of Turi Create. Please use 'transformed_dot_product'
distance instead, although note that this is more than a name change;
it is a *different* transformation of the dot product of two vectors.
Please see the distances module documentation for more details.
Parameters
----------
dataset : SFrame
Reference data. If the features for each observation are numeric, they
may be in separate columns of 'dataset' or a single column with lists
of values. The features may also be in the form of a column of sparse
vectors (i.e. dictionaries), with string keys and numeric values.
label : string, optional
Name of the SFrame column with row labels. If 'label' is not specified,
row numbers are used to identify reference dataset rows when the model
is queried.
features : list[string], optional
Name of the columns with features to use in computing distances between
observations and the query points. 'None' (the default) indicates that
all columns except the label should be used as features. Each column
can be one of the following types:
- *Numeric*: values of numeric type integer or float.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate variable in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values.
Each key indicates a separate variable in the model.
- *List*: list of integer or string values. Each element is treated as
a separate variable in the model.
- *String*: string values.
Please note: if a composite distance is also specified, this parameter
is ignored.
distance : string, function, or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of three types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Function*: a function handle from the
:mod:`~turicreate.toolkits.distances` module.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
If 'distance' is left unspecified or set to 'auto', a composite
distance is constructed automatically based on feature types.
method : {'auto', 'ball_tree', 'brute_force', 'lsh'}, optional
Method for computing nearest neighbors. The options are:
- *auto* (default): the method is chosen automatically, based on the
type of data and the distance. If the distance is 'manhattan' or
'euclidean' and the features are numeric or vectors of numeric
values, then the 'ball_tree' method is used. Otherwise, the
'brute_force' method is used.
- *ball_tree*: use a tree structure to find the k-closest neighbors to
each query point. The ball tree model is slower to construct than the
brute force model, but queries are faster than linear time. This
method is not applicable for the cosine and dot product distances.
See `Liu, et al (2004)
<http://papers.nips.cc/paper/2666-an-investigation-of-p
ractical-approximat e-nearest-neighbor-algorithms>`_ for
implementation details.
- *brute_force*: compute the distance from a query point to all
reference observations. There is no computation time for model
creation with the brute force method (although the reference data is
held in the model, but each query takes linear time.
- *lsh*: use Locality Sensitive Hashing (LSH) to find approximate
nearest neighbors efficiently. The LSH model supports 'euclidean',
'squared_euclidean', 'manhattan', 'cosine', 'jaccard', 'dot_product'
(deprecated), and 'transformed_dot_product' distances. Two options
are provided for LSH -- ``num_tables`` and
``num_projections_per_table``. See the notes below for details.
verbose: bool, optional
If True, print progress updates and model details.
**kwargs : optional
Options for the distance function and query method.
- *leaf_size*: for the ball tree method, the number of points in each
leaf of the tree. The default is to use the max of 1,000 and
n/(2^11), which ensures a maximum tree depth of 12.
- *num_tables*: For the LSH method, the number of hash tables
constructed. The default value is 20. We recommend choosing values
from 10 to 30.
- *num_projections_per_table*: For the LSH method, the number of
projections/hash functions for each hash table. The default value is
4 for 'jaccard' distance, 16 for 'cosine' distance and 8 for other
distances. We recommend using number 2 ~ 6 for 'jaccard' distance, 8
~ 20 for 'cosine' distance and 4 ~ 12 for other distances.
Returns
-------
out : NearestNeighborsModel
A structure for efficiently computing the nearest neighbors in 'dataset'
of new query points.
See Also
--------
NearestNeighborsModel.query, turicreate.toolkits.distances
Notes
-----
- Missing data is not allowed in the 'dataset' provided to this function.
Please use the :func:`turicreate.SFrame.fillna` and
:func:`turicreate.SFrame.dropna` utilities to handle missing data before
creating a nearest neighbors model.
- Missing keys in sparse vectors are assumed to have value 0.
- The `composite_params` parameter was removed as of Turi Create
version 1.5. The `distance` parameter now accepts either standard or
composite distances. Please see the :mod:`~turicreate.toolkits.distances`
module documentation for more information on composite distances.
- If the features should be weighted equally in the distance calculations
but are measured on different scales, it is important to standardize the
features. One way to do this is to subtract the mean of each column and
divide by the standard deviation.
**Locality Sensitive Hashing (LSH)**
There are several efficient nearest neighbors search algorithms that work
well for data with low dimensions :math:`d` (approximately 50). However,
most of the solutions suffer from either space or query time that is
exponential in :math:`d`. For large :math:`d`, they often provide little,
if any, improvement over the 'brute_force' method. This is a well-known
consequence of the phenomenon called `The Curse of Dimensionality`.
`Locality Sensitive Hashing (LSH)
<https://en.wikipedia.org/wiki/Locality-sensitive_hashing>`_ is an approach
that is designed to efficiently solve the *approximate* nearest neighbor
search problem for high dimensional data. The key idea of LSH is to hash
the data points using several hash functions, so that the probability of
collision is much higher for data points which are close to each other than
those which are far apart.
An LSH family is a family of functions :math:`h` which map points from the
metric space to a bucket, so that
- if :math:`d(p, q) \\leq R`, then :math:`h(p) = h(q)` with at least probability :math:`p_1`.
- if :math:`d(p, q) \\geq cR`, then :math:`h(p) = h(q)` with probability at most :math:`p_2`.
LSH for efficient approximate nearest neighbor search:
- We define a new family of hash functions :math:`g`, where each
function :math:`g` is obtained by concatenating :math:`k` functions
:math:`h_1, ..., h_k`, i.e., :math:`g(p)=[h_1(p),...,h_k(p)]`.
The algorithm constructs :math:`L` hash tables, each of which
corresponds to a different randomly chosen hash function :math:`g`.
There are :math:`k \\cdot L` hash functions used in total.
- In the preprocessing step, we hash all :math:`n` reference points
into each of the :math:`L` hash tables.
- Given a query point :math:`q`, the algorithm iterates over the
:math:`L` hash functions :math:`g`. For each :math:`g` considered, it
retrieves the data points that are hashed into the same bucket as q.
These data points from all the :math:`L` hash tables are considered as
candidates that are then re-ranked by their real distances with the query
data.
**Note** that the number of tables :math:`L` and the number of hash
functions per table :math:`k` are two main parameters. They can be set
using the options ``num_tables`` and ``num_projections_per_table``
respectively.
Hash functions for different distances:
- `euclidean` and `squared_euclidean`:
:math:`h(q) = \\lfloor \\frac{a \\cdot q + b}{w} \\rfloor` where
:math:`a` is a vector, of which the elements are independently
sampled from normal distribution, and :math:`b` is a number
uniformly sampled from :math:`[0, r]`. :math:`r` is a parameter for the
bucket width. We set :math:`r` using the average all-pair `euclidean`
distances from a small randomly sampled subset of the reference data.
- `manhattan`: The hash function of `manhattan` is similar with that of
`euclidean`. The only difference is that the elements of `a` are sampled
from Cauchy distribution, instead of normal distribution.
- `cosine`: Random Projection is designed to approximate the cosine
distance between vectors. The hash function is :math:`h(q) = sgn(a \\cdot
q)`, where :math:`a` is randomly sampled normal unit vector.
- `jaccard`: We use a recently proposed method one permutation hashing by
Shrivastava and Li. See the paper `[Shrivastava and Li, UAI 2014]
<http://www.auai.org/uai2014/proceedings/individuals/225.pdf>`_ for
details.
- `dot_product`: The reference data points are first transformed to
fixed-norm vectors, and then the minimum `dot_product` distance search
problem can be solved via finding the reference data with smallest
`cosine` distances. See the paper `[Neyshabur and Srebro, ICML 2015]
<http://proceedings.mlr.press/v37/neyshabur15.html>`_ for details.
References
----------
- `Wikipedia - nearest neighbor
search <http://en.wikipedia.org/wiki/Nearest_neighbor_search>`_
- `Wikipedia - ball tree <http://en.wikipedia.org/wiki/Ball_tree>`_
- Ball tree implementation: Liu, T., et al. (2004) `An Investigation of
Practical Approximate Nearest Neighbor Algorithms
<http://papers.nips.cc/paper/2666-an-investigation-of-p
ractical-approximat e-nearest-neighbor-algorithms>`_. Advances in Neural
Information Processing Systems pp. 825-832.
- `Wikipedia - Jaccard distance
<http://en.wikipedia.org/wiki/Jaccard_index>`_
- Weighted Jaccard distance: Chierichetti, F., et al. (2010) `Finding the
Jaccard Median
<http://theory.stanford.edu/~sergei/papers/soda10-jaccard.pdf>`_.
Proceedings of the Twenty-First Annual ACM-SIAM Symposium on Discrete
Algorithms. Society for Industrial and Applied Mathematics.
- `Wikipedia - Cosine distance
<http://en.wikipedia.org/wiki/Cosine_similarity>`_
- `Wikipedia - Levenshtein distance
<http://en.wikipedia.org/wiki/Levenshtein_distance>`_
- Locality Sensitive Hashing : Chapter 3 of the book `Mining Massive
Datasets <http://infolab.stanford.edu/~ullman/mmds/ch3.pdf>`_.
Examples
--------
Construct a nearest neighbors model with automatically determined method
and distance:
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'str_feature': ['cat', 'dog', 'fossa']})
>>> model = turicreate.nearest_neighbors.create(sf, features=['X1', 'X2'])
For datasets with a large number of rows and up to about 100 variables, the
ball tree method often leads to much faster queries.
>>> model = turicreate.nearest_neighbors.create(sf, features=['X1', 'X2'],
... method='ball_tree')
Often the final determination of a neighbor is based on several distance
computations over different sets of features. Each part of this composite
distance may have a different relative weight.
>>> my_dist = [[['X1', 'X2'], 'euclidean', 2.],
... [['str_feature'], 'levenshtein', 3.]]
...
>>> model = turicreate.nearest_neighbors.create(sf, distance=my_dist)
"""
## Validate the 'dataset' input
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Basic validation of the features input
if features is not None and not isinstance(features, list):
raise TypeError("If specified, input 'features' must be a list of " +
"strings.")
## Clean the method options and create the options dictionary
allowed_kwargs = ['leaf_size', 'num_tables', 'num_projections_per_table']
_method_options = {}
for k, v in kwargs.items():
if k in allowed_kwargs:
_method_options[k] = v
else:
raise _ToolkitError("'{}' is not a valid keyword argument".format(k) +
" for the nearest neighbors model. Please " +
"check for capitalization and other typos.")
## Exclude inappropriate combinations of method an distance
if method == 'ball_tree' and (distance == 'cosine'
or distance == _turicreate.distances.cosine
or distance == 'dot_product'
or distance == _turicreate.distances.dot_product
or distance == 'transformed_dot_product'
or distance == _turicreate.distances.transformed_dot_product):
raise TypeError("The ball tree method does not work with 'cosine' " +
"'dot_product', or 'transformed_dot_product' distance." +
"Please use the 'brute_force' method for these distances.")
if method == 'lsh' and ('num_projections_per_table' not in _method_options):
if distance == 'jaccard' or distance == _turicreate.distances.jaccard:
_method_options['num_projections_per_table'] = 4
elif distance == 'cosine' or distance == _turicreate.distances.cosine:
_method_options['num_projections_per_table'] = 16
else:
_method_options['num_projections_per_table'] = 8
## Initial validation and processing of the label
if label is None:
_label = _robust_column_name('__id', dataset.column_names())
_dataset = dataset.add_row_number(_label)
else:
_label = label
_dataset = _copy.copy(dataset)
col_type_map = {c:_dataset[c].dtype for c in _dataset.column_names()}
_validate_row_label(_label, col_type_map)
ref_labels = _dataset[_label]
## Determine the internal list of available feature names (may still include
# the row label name).
if features is None:
_features = _dataset.column_names()
else:
_features = _copy.deepcopy(features)
## Check if there's only one feature and it's the same as the row label.
# This would also be trapped by the composite distance validation, but the
# error message is not very informative for the user.
free_features = set(_features).difference([_label])
if len(free_features) < 1:
raise _ToolkitError("The only available feature is the same as the " +
"row label column. Please specify features " +
"that are not also row labels.")
### Validate and preprocess the distance function
### ---------------------------------------------
# - The form of the 'distance' controls how we interact with the 'features'
# parameter as well.
# - At this point, the row label 'label' may still be in the list(s) of
# features.
## Convert any distance function input into a single composite distance.
# distance is already a composite distance
if isinstance(distance, list):
distance = _copy.deepcopy(distance)
# distance is a single name (except 'auto') or function handle.
elif (hasattr(distance, '__call__') or
(isinstance(distance, str) and not distance == 'auto')):
distance = [[_features, distance, 1]]
# distance is unspecified and needs to be constructed.
elif distance is None or distance == 'auto':
sample = _dataset.head()
distance = _construct_auto_distance(_features,
_dataset.column_names(),
_dataset.column_types(),
sample)
else:
raise TypeError("Input 'distance' not understood. The 'distance' "
" argument must be a string, function handle, or " +
"composite distance.")
## Basic composite distance validation, remove the row label from all
# feature lists, and convert string distance names into distance functions.
distance = _scrub_composite_distance_features(distance, [_label])
distance = _convert_distance_names_to_functions(distance)
_validate_composite_distance(distance)
## Raise an error if any distances are used with non-lists
list_features_to_check = []
sparse_distances = ['jaccard', 'weighted_jaccard', 'cosine', 'dot_product', 'transformed_dot_product']
sparse_distances = [_turicreate.distances.__dict__[k] for k in sparse_distances]
for d in distance:
feature_names, dist, _ = d
list_features = [f for f in feature_names if _dataset[f].dtype == list]
for f in list_features:
if dist in sparse_distances:
list_features_to_check.append(f)
else:
raise TypeError("The chosen distance cannot currently be used " +
"on list-typed columns.")
for f in list_features_to_check:
only_str_lists = _validate_lists(_dataset[f], [str])
if not only_str_lists:
raise TypeError("Distances for sparse data, such as jaccard " +
"and weighted_jaccard, can only be used on " +
"lists containing only strings. Please modify " +
"any list features accordingly before creating " +
"the nearest neighbors model.")
## Raise an error if any component has string features are in single columns
for d in distance:
feature_names, dist, _ = d
if (len(feature_names) > 1) and (dist == _turicreate.distances.levenshtein):
raise ValueError("Levenshtein distance cannot be used with multiple " +
"columns. Please concatenate strings into a single " +
"column before creating the nearest neighbors model.")
## Get the union of feature names and make a clean dataset.
clean_features = _get_composite_distance_features(distance)
sf_clean = _tkutl._toolkits_select_columns(_dataset, clean_features)
## Decide which method to use
## - If more than one distance component (specified either directly or
# generated automatically because distance set to 'auto'), then do brute
# force.
if len(distance) > 1:
_method = 'brute_force'
if method != 'brute_force' and verbose is True:
print("Defaulting to brute force instead of ball tree because " +\
"there are multiple distance components.")
else:
if method == 'auto':
# get the total number of variables. Assume the number of elements in
# array type columns does not change
num_variables = sum([len(x) if hasattr(x, '__iter__') else 1
for x in _six.itervalues(sf_clean[0])])
# flag if all the features in the single composite are of numeric
# type.
numeric_type_flag = all([x in [int, float, list, array.array]
for x in sf_clean.column_types()])
## Conditions necessary for ball tree to work and be worth it
if ((distance[0][1] in ['euclidean',
'manhattan',
_turicreate.distances.euclidean,
_turicreate.distances.manhattan])
and numeric_type_flag is True
and num_variables <= 200):
_method = 'ball_tree'
else:
_method = 'brute_force'
else:
_method = method
## Pick the right model name for the method
if _method == 'ball_tree':
model_name = 'nearest_neighbors_ball_tree'
elif _method == 'brute_force':
model_name = 'nearest_neighbors_brute_force'
elif _method == 'lsh':
model_name = 'nearest_neighbors_lsh'
else:
raise ValueError("Method must be 'auto', 'ball_tree', 'brute_force', " +
"or 'lsh'.")
## Package the model options
opts = {}
opts.update(_method_options)
opts.update(
{'model_name': model_name,
'ref_labels': ref_labels,
'label': label,
'sf_features': sf_clean,
'composite_params': distance})
## Construct the nearest neighbors model
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.train(opts)
model_proxy = result['model']
model = NearestNeighborsModel(model_proxy)
return model | [
"def",
"create",
"(",
"dataset",
",",
"label",
"=",
"None",
",",
"features",
"=",
"None",
",",
"distance",
"=",
"None",
",",
"method",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"## Validate the 'dataset' input",
"_tkutl",
".",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"_tkutl",
".",
"_raise_error_if_sframe_empty",
"(",
"dataset",
",",
"\"dataset\"",
")",
"## Basic validation of the features input",
"if",
"features",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"features",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"If specified, input 'features' must be a list of \"",
"+",
"\"strings.\"",
")",
"## Clean the method options and create the options dictionary",
"allowed_kwargs",
"=",
"[",
"'leaf_size'",
",",
"'num_tables'",
",",
"'num_projections_per_table'",
"]",
"_method_options",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"allowed_kwargs",
":",
"_method_options",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"raise",
"_ToolkitError",
"(",
"\"'{}' is not a valid keyword argument\"",
".",
"format",
"(",
"k",
")",
"+",
"\" for the nearest neighbors model. Please \"",
"+",
"\"check for capitalization and other typos.\"",
")",
"## Exclude inappropriate combinations of method an distance",
"if",
"method",
"==",
"'ball_tree'",
"and",
"(",
"distance",
"==",
"'cosine'",
"or",
"distance",
"==",
"_turicreate",
".",
"distances",
".",
"cosine",
"or",
"distance",
"==",
"'dot_product'",
"or",
"distance",
"==",
"_turicreate",
".",
"distances",
".",
"dot_product",
"or",
"distance",
"==",
"'transformed_dot_product'",
"or",
"distance",
"==",
"_turicreate",
".",
"distances",
".",
"transformed_dot_product",
")",
":",
"raise",
"TypeError",
"(",
"\"The ball tree method does not work with 'cosine' \"",
"+",
"\"'dot_product', or 'transformed_dot_product' distance.\"",
"+",
"\"Please use the 'brute_force' method for these distances.\"",
")",
"if",
"method",
"==",
"'lsh'",
"and",
"(",
"'num_projections_per_table'",
"not",
"in",
"_method_options",
")",
":",
"if",
"distance",
"==",
"'jaccard'",
"or",
"distance",
"==",
"_turicreate",
".",
"distances",
".",
"jaccard",
":",
"_method_options",
"[",
"'num_projections_per_table'",
"]",
"=",
"4",
"elif",
"distance",
"==",
"'cosine'",
"or",
"distance",
"==",
"_turicreate",
".",
"distances",
".",
"cosine",
":",
"_method_options",
"[",
"'num_projections_per_table'",
"]",
"=",
"16",
"else",
":",
"_method_options",
"[",
"'num_projections_per_table'",
"]",
"=",
"8",
"## Initial validation and processing of the label",
"if",
"label",
"is",
"None",
":",
"_label",
"=",
"_robust_column_name",
"(",
"'__id'",
",",
"dataset",
".",
"column_names",
"(",
")",
")",
"_dataset",
"=",
"dataset",
".",
"add_row_number",
"(",
"_label",
")",
"else",
":",
"_label",
"=",
"label",
"_dataset",
"=",
"_copy",
".",
"copy",
"(",
"dataset",
")",
"col_type_map",
"=",
"{",
"c",
":",
"_dataset",
"[",
"c",
"]",
".",
"dtype",
"for",
"c",
"in",
"_dataset",
".",
"column_names",
"(",
")",
"}",
"_validate_row_label",
"(",
"_label",
",",
"col_type_map",
")",
"ref_labels",
"=",
"_dataset",
"[",
"_label",
"]",
"## Determine the internal list of available feature names (may still include",
"# the row label name).",
"if",
"features",
"is",
"None",
":",
"_features",
"=",
"_dataset",
".",
"column_names",
"(",
")",
"else",
":",
"_features",
"=",
"_copy",
".",
"deepcopy",
"(",
"features",
")",
"## Check if there's only one feature and it's the same as the row label.",
"# This would also be trapped by the composite distance validation, but the",
"# error message is not very informative for the user.",
"free_features",
"=",
"set",
"(",
"_features",
")",
".",
"difference",
"(",
"[",
"_label",
"]",
")",
"if",
"len",
"(",
"free_features",
")",
"<",
"1",
":",
"raise",
"_ToolkitError",
"(",
"\"The only available feature is the same as the \"",
"+",
"\"row label column. Please specify features \"",
"+",
"\"that are not also row labels.\"",
")",
"### Validate and preprocess the distance function",
"### ---------------------------------------------",
"# - The form of the 'distance' controls how we interact with the 'features'",
"# parameter as well.",
"# - At this point, the row label 'label' may still be in the list(s) of",
"# features.",
"## Convert any distance function input into a single composite distance.",
"# distance is already a composite distance",
"if",
"isinstance",
"(",
"distance",
",",
"list",
")",
":",
"distance",
"=",
"_copy",
".",
"deepcopy",
"(",
"distance",
")",
"# distance is a single name (except 'auto') or function handle.",
"elif",
"(",
"hasattr",
"(",
"distance",
",",
"'__call__'",
")",
"or",
"(",
"isinstance",
"(",
"distance",
",",
"str",
")",
"and",
"not",
"distance",
"==",
"'auto'",
")",
")",
":",
"distance",
"=",
"[",
"[",
"_features",
",",
"distance",
",",
"1",
"]",
"]",
"# distance is unspecified and needs to be constructed.",
"elif",
"distance",
"is",
"None",
"or",
"distance",
"==",
"'auto'",
":",
"sample",
"=",
"_dataset",
".",
"head",
"(",
")",
"distance",
"=",
"_construct_auto_distance",
"(",
"_features",
",",
"_dataset",
".",
"column_names",
"(",
")",
",",
"_dataset",
".",
"column_types",
"(",
")",
",",
"sample",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Input 'distance' not understood. The 'distance' \"",
"\" argument must be a string, function handle, or \"",
"+",
"\"composite distance.\"",
")",
"## Basic composite distance validation, remove the row label from all",
"# feature lists, and convert string distance names into distance functions.",
"distance",
"=",
"_scrub_composite_distance_features",
"(",
"distance",
",",
"[",
"_label",
"]",
")",
"distance",
"=",
"_convert_distance_names_to_functions",
"(",
"distance",
")",
"_validate_composite_distance",
"(",
"distance",
")",
"## Raise an error if any distances are used with non-lists",
"list_features_to_check",
"=",
"[",
"]",
"sparse_distances",
"=",
"[",
"'jaccard'",
",",
"'weighted_jaccard'",
",",
"'cosine'",
",",
"'dot_product'",
",",
"'transformed_dot_product'",
"]",
"sparse_distances",
"=",
"[",
"_turicreate",
".",
"distances",
".",
"__dict__",
"[",
"k",
"]",
"for",
"k",
"in",
"sparse_distances",
"]",
"for",
"d",
"in",
"distance",
":",
"feature_names",
",",
"dist",
",",
"_",
"=",
"d",
"list_features",
"=",
"[",
"f",
"for",
"f",
"in",
"feature_names",
"if",
"_dataset",
"[",
"f",
"]",
".",
"dtype",
"==",
"list",
"]",
"for",
"f",
"in",
"list_features",
":",
"if",
"dist",
"in",
"sparse_distances",
":",
"list_features_to_check",
".",
"append",
"(",
"f",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"The chosen distance cannot currently be used \"",
"+",
"\"on list-typed columns.\"",
")",
"for",
"f",
"in",
"list_features_to_check",
":",
"only_str_lists",
"=",
"_validate_lists",
"(",
"_dataset",
"[",
"f",
"]",
",",
"[",
"str",
"]",
")",
"if",
"not",
"only_str_lists",
":",
"raise",
"TypeError",
"(",
"\"Distances for sparse data, such as jaccard \"",
"+",
"\"and weighted_jaccard, can only be used on \"",
"+",
"\"lists containing only strings. Please modify \"",
"+",
"\"any list features accordingly before creating \"",
"+",
"\"the nearest neighbors model.\"",
")",
"## Raise an error if any component has string features are in single columns",
"for",
"d",
"in",
"distance",
":",
"feature_names",
",",
"dist",
",",
"_",
"=",
"d",
"if",
"(",
"len",
"(",
"feature_names",
")",
">",
"1",
")",
"and",
"(",
"dist",
"==",
"_turicreate",
".",
"distances",
".",
"levenshtein",
")",
":",
"raise",
"ValueError",
"(",
"\"Levenshtein distance cannot be used with multiple \"",
"+",
"\"columns. Please concatenate strings into a single \"",
"+",
"\"column before creating the nearest neighbors model.\"",
")",
"## Get the union of feature names and make a clean dataset.",
"clean_features",
"=",
"_get_composite_distance_features",
"(",
"distance",
")",
"sf_clean",
"=",
"_tkutl",
".",
"_toolkits_select_columns",
"(",
"_dataset",
",",
"clean_features",
")",
"## Decide which method to use",
"## - If more than one distance component (specified either directly or",
"# generated automatically because distance set to 'auto'), then do brute",
"# force.",
"if",
"len",
"(",
"distance",
")",
">",
"1",
":",
"_method",
"=",
"'brute_force'",
"if",
"method",
"!=",
"'brute_force'",
"and",
"verbose",
"is",
"True",
":",
"print",
"(",
"\"Defaulting to brute force instead of ball tree because \"",
"+",
"\"there are multiple distance components.\"",
")",
"else",
":",
"if",
"method",
"==",
"'auto'",
":",
"# get the total number of variables. Assume the number of elements in",
"# array type columns does not change",
"num_variables",
"=",
"sum",
"(",
"[",
"len",
"(",
"x",
")",
"if",
"hasattr",
"(",
"x",
",",
"'__iter__'",
")",
"else",
"1",
"for",
"x",
"in",
"_six",
".",
"itervalues",
"(",
"sf_clean",
"[",
"0",
"]",
")",
"]",
")",
"# flag if all the features in the single composite are of numeric",
"# type.",
"numeric_type_flag",
"=",
"all",
"(",
"[",
"x",
"in",
"[",
"int",
",",
"float",
",",
"list",
",",
"array",
".",
"array",
"]",
"for",
"x",
"in",
"sf_clean",
".",
"column_types",
"(",
")",
"]",
")",
"## Conditions necessary for ball tree to work and be worth it",
"if",
"(",
"(",
"distance",
"[",
"0",
"]",
"[",
"1",
"]",
"in",
"[",
"'euclidean'",
",",
"'manhattan'",
",",
"_turicreate",
".",
"distances",
".",
"euclidean",
",",
"_turicreate",
".",
"distances",
".",
"manhattan",
"]",
")",
"and",
"numeric_type_flag",
"is",
"True",
"and",
"num_variables",
"<=",
"200",
")",
":",
"_method",
"=",
"'ball_tree'",
"else",
":",
"_method",
"=",
"'brute_force'",
"else",
":",
"_method",
"=",
"method",
"## Pick the right model name for the method",
"if",
"_method",
"==",
"'ball_tree'",
":",
"model_name",
"=",
"'nearest_neighbors_ball_tree'",
"elif",
"_method",
"==",
"'brute_force'",
":",
"model_name",
"=",
"'nearest_neighbors_brute_force'",
"elif",
"_method",
"==",
"'lsh'",
":",
"model_name",
"=",
"'nearest_neighbors_lsh'",
"else",
":",
"raise",
"ValueError",
"(",
"\"Method must be 'auto', 'ball_tree', 'brute_force', \"",
"+",
"\"or 'lsh'.\"",
")",
"## Package the model options",
"opts",
"=",
"{",
"}",
"opts",
".",
"update",
"(",
"_method_options",
")",
"opts",
".",
"update",
"(",
"{",
"'model_name'",
":",
"model_name",
",",
"'ref_labels'",
":",
"ref_labels",
",",
"'label'",
":",
"label",
",",
"'sf_features'",
":",
"sf_clean",
",",
"'composite_params'",
":",
"distance",
"}",
")",
"## Construct the nearest neighbors model",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"result",
"=",
"_turicreate",
".",
"extensions",
".",
"_nearest_neighbors",
".",
"train",
"(",
"opts",
")",
"model_proxy",
"=",
"result",
"[",
"'model'",
"]",
"model",
"=",
"NearestNeighborsModel",
"(",
"model_proxy",
")",
"return",
"model"
] | Create a nearest neighbor model, which can be searched efficiently and
quickly for the nearest neighbors of a query observation. If the `method`
argument is specified as `auto`, the type of model is chosen automatically
based on the type of data in `dataset`.
.. warning::
The 'dot_product' distance is deprecated and will be removed in future
versions of Turi Create. Please use 'transformed_dot_product'
distance instead, although note that this is more than a name change;
it is a *different* transformation of the dot product of two vectors.
Please see the distances module documentation for more details.
Parameters
----------
dataset : SFrame
Reference data. If the features for each observation are numeric, they
may be in separate columns of 'dataset' or a single column with lists
of values. The features may also be in the form of a column of sparse
vectors (i.e. dictionaries), with string keys and numeric values.
label : string, optional
Name of the SFrame column with row labels. If 'label' is not specified,
row numbers are used to identify reference dataset rows when the model
is queried.
features : list[string], optional
Name of the columns with features to use in computing distances between
observations and the query points. 'None' (the default) indicates that
all columns except the label should be used as features. Each column
can be one of the following types:
- *Numeric*: values of numeric type integer or float.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate variable in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values.
Each key indicates a separate variable in the model.
- *List*: list of integer or string values. Each element is treated as
a separate variable in the model.
- *String*: string values.
Please note: if a composite distance is also specified, this parameter
is ignored.
distance : string, function, or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of three types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Function*: a function handle from the
:mod:`~turicreate.toolkits.distances` module.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
If 'distance' is left unspecified or set to 'auto', a composite
distance is constructed automatically based on feature types.
method : {'auto', 'ball_tree', 'brute_force', 'lsh'}, optional
Method for computing nearest neighbors. The options are:
- *auto* (default): the method is chosen automatically, based on the
type of data and the distance. If the distance is 'manhattan' or
'euclidean' and the features are numeric or vectors of numeric
values, then the 'ball_tree' method is used. Otherwise, the
'brute_force' method is used.
- *ball_tree*: use a tree structure to find the k-closest neighbors to
each query point. The ball tree model is slower to construct than the
brute force model, but queries are faster than linear time. This
method is not applicable for the cosine and dot product distances.
See `Liu, et al (2004)
<http://papers.nips.cc/paper/2666-an-investigation-of-p
ractical-approximat e-nearest-neighbor-algorithms>`_ for
implementation details.
- *brute_force*: compute the distance from a query point to all
reference observations. There is no computation time for model
creation with the brute force method (although the reference data is
held in the model, but each query takes linear time.
- *lsh*: use Locality Sensitive Hashing (LSH) to find approximate
nearest neighbors efficiently. The LSH model supports 'euclidean',
'squared_euclidean', 'manhattan', 'cosine', 'jaccard', 'dot_product'
(deprecated), and 'transformed_dot_product' distances. Two options
are provided for LSH -- ``num_tables`` and
``num_projections_per_table``. See the notes below for details.
verbose: bool, optional
If True, print progress updates and model details.
**kwargs : optional
Options for the distance function and query method.
- *leaf_size*: for the ball tree method, the number of points in each
leaf of the tree. The default is to use the max of 1,000 and
n/(2^11), which ensures a maximum tree depth of 12.
- *num_tables*: For the LSH method, the number of hash tables
constructed. The default value is 20. We recommend choosing values
from 10 to 30.
- *num_projections_per_table*: For the LSH method, the number of
projections/hash functions for each hash table. The default value is
4 for 'jaccard' distance, 16 for 'cosine' distance and 8 for other
distances. We recommend using number 2 ~ 6 for 'jaccard' distance, 8
~ 20 for 'cosine' distance and 4 ~ 12 for other distances.
Returns
-------
out : NearestNeighborsModel
A structure for efficiently computing the nearest neighbors in 'dataset'
of new query points.
See Also
--------
NearestNeighborsModel.query, turicreate.toolkits.distances
Notes
-----
- Missing data is not allowed in the 'dataset' provided to this function.
Please use the :func:`turicreate.SFrame.fillna` and
:func:`turicreate.SFrame.dropna` utilities to handle missing data before
creating a nearest neighbors model.
- Missing keys in sparse vectors are assumed to have value 0.
- The `composite_params` parameter was removed as of Turi Create
version 1.5. The `distance` parameter now accepts either standard or
composite distances. Please see the :mod:`~turicreate.toolkits.distances`
module documentation for more information on composite distances.
- If the features should be weighted equally in the distance calculations
but are measured on different scales, it is important to standardize the
features. One way to do this is to subtract the mean of each column and
divide by the standard deviation.
**Locality Sensitive Hashing (LSH)**
There are several efficient nearest neighbors search algorithms that work
well for data with low dimensions :math:`d` (approximately 50). However,
most of the solutions suffer from either space or query time that is
exponential in :math:`d`. For large :math:`d`, they often provide little,
if any, improvement over the 'brute_force' method. This is a well-known
consequence of the phenomenon called `The Curse of Dimensionality`.
`Locality Sensitive Hashing (LSH)
<https://en.wikipedia.org/wiki/Locality-sensitive_hashing>`_ is an approach
that is designed to efficiently solve the *approximate* nearest neighbor
search problem for high dimensional data. The key idea of LSH is to hash
the data points using several hash functions, so that the probability of
collision is much higher for data points which are close to each other than
those which are far apart.
An LSH family is a family of functions :math:`h` which map points from the
metric space to a bucket, so that
- if :math:`d(p, q) \\leq R`, then :math:`h(p) = h(q)` with at least probability :math:`p_1`.
- if :math:`d(p, q) \\geq cR`, then :math:`h(p) = h(q)` with probability at most :math:`p_2`.
LSH for efficient approximate nearest neighbor search:
- We define a new family of hash functions :math:`g`, where each
function :math:`g` is obtained by concatenating :math:`k` functions
:math:`h_1, ..., h_k`, i.e., :math:`g(p)=[h_1(p),...,h_k(p)]`.
The algorithm constructs :math:`L` hash tables, each of which
corresponds to a different randomly chosen hash function :math:`g`.
There are :math:`k \\cdot L` hash functions used in total.
- In the preprocessing step, we hash all :math:`n` reference points
into each of the :math:`L` hash tables.
- Given a query point :math:`q`, the algorithm iterates over the
:math:`L` hash functions :math:`g`. For each :math:`g` considered, it
retrieves the data points that are hashed into the same bucket as q.
These data points from all the :math:`L` hash tables are considered as
candidates that are then re-ranked by their real distances with the query
data.
**Note** that the number of tables :math:`L` and the number of hash
functions per table :math:`k` are two main parameters. They can be set
using the options ``num_tables`` and ``num_projections_per_table``
respectively.
Hash functions for different distances:
- `euclidean` and `squared_euclidean`:
:math:`h(q) = \\lfloor \\frac{a \\cdot q + b}{w} \\rfloor` where
:math:`a` is a vector, of which the elements are independently
sampled from normal distribution, and :math:`b` is a number
uniformly sampled from :math:`[0, r]`. :math:`r` is a parameter for the
bucket width. We set :math:`r` using the average all-pair `euclidean`
distances from a small randomly sampled subset of the reference data.
- `manhattan`: The hash function of `manhattan` is similar with that of
`euclidean`. The only difference is that the elements of `a` are sampled
from Cauchy distribution, instead of normal distribution.
- `cosine`: Random Projection is designed to approximate the cosine
distance between vectors. The hash function is :math:`h(q) = sgn(a \\cdot
q)`, where :math:`a` is randomly sampled normal unit vector.
- `jaccard`: We use a recently proposed method one permutation hashing by
Shrivastava and Li. See the paper `[Shrivastava and Li, UAI 2014]
<http://www.auai.org/uai2014/proceedings/individuals/225.pdf>`_ for
details.
- `dot_product`: The reference data points are first transformed to
fixed-norm vectors, and then the minimum `dot_product` distance search
problem can be solved via finding the reference data with smallest
`cosine` distances. See the paper `[Neyshabur and Srebro, ICML 2015]
<http://proceedings.mlr.press/v37/neyshabur15.html>`_ for details.
References
----------
- `Wikipedia - nearest neighbor
search <http://en.wikipedia.org/wiki/Nearest_neighbor_search>`_
- `Wikipedia - ball tree <http://en.wikipedia.org/wiki/Ball_tree>`_
- Ball tree implementation: Liu, T., et al. (2004) `An Investigation of
Practical Approximate Nearest Neighbor Algorithms
<http://papers.nips.cc/paper/2666-an-investigation-of-p
ractical-approximat e-nearest-neighbor-algorithms>`_. Advances in Neural
Information Processing Systems pp. 825-832.
- `Wikipedia - Jaccard distance
<http://en.wikipedia.org/wiki/Jaccard_index>`_
- Weighted Jaccard distance: Chierichetti, F., et al. (2010) `Finding the
Jaccard Median
<http://theory.stanford.edu/~sergei/papers/soda10-jaccard.pdf>`_.
Proceedings of the Twenty-First Annual ACM-SIAM Symposium on Discrete
Algorithms. Society for Industrial and Applied Mathematics.
- `Wikipedia - Cosine distance
<http://en.wikipedia.org/wiki/Cosine_similarity>`_
- `Wikipedia - Levenshtein distance
<http://en.wikipedia.org/wiki/Levenshtein_distance>`_
- Locality Sensitive Hashing : Chapter 3 of the book `Mining Massive
Datasets <http://infolab.stanford.edu/~ullman/mmds/ch3.pdf>`_.
Examples
--------
Construct a nearest neighbors model with automatically determined method
and distance:
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'str_feature': ['cat', 'dog', 'fossa']})
>>> model = turicreate.nearest_neighbors.create(sf, features=['X1', 'X2'])
For datasets with a large number of rows and up to about 100 variables, the
ball tree method often leads to much faster queries.
>>> model = turicreate.nearest_neighbors.create(sf, features=['X1', 'X2'],
... method='ball_tree')
Often the final determination of a neighbor is based on several distance
computations over different sets of features. Each part of this composite
distance may have a different relative weight.
>>> my_dist = [[['X1', 'X2'], 'euclidean', 2.],
... [['str_feature'], 'levenshtein', 3.]]
...
>>> model = turicreate.nearest_neighbors.create(sf, distance=my_dist) | [
"Create",
"a",
"nearest",
"neighbor",
"model",
"which",
"can",
"be",
"searched",
"efficiently",
"and",
"quickly",
"for",
"the",
"nearest",
"neighbors",
"of",
"a",
"query",
"observation",
".",
"If",
"the",
"method",
"argument",
"is",
"specified",
"as",
"auto",
"the",
"type",
"of",
"model",
"is",
"chosen",
"automatically",
"based",
"on",
"the",
"type",
"of",
"data",
"in",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L74-L584 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py | NearestNeighborsModel._get_summary_struct | def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
("Method", 'method'),
("Number of distance components", 'num_distance_components'),
("Number of examples", 'num_examples'),
("Number of feature columns", 'num_features'),
("Number of unpacked features", 'num_unpacked_features'),
("Total training time (seconds)", 'training_time')]
ball_tree_fields = [
("Tree depth", 'tree_depth'),
("Leaf size", 'leaf_size')]
lsh_fields = [
("Number of hash tables", 'num_tables'),
("Number of projections per table", 'num_projections_per_table')]
sections = [model_fields]
section_titles = ['Attributes']
if (self.method == 'ball_tree'):
sections.append(ball_tree_fields)
section_titles.append('Ball Tree Attributes')
if (self.method == 'lsh'):
sections.append(lsh_fields)
section_titles.append('LSH Attributes')
return (sections, section_titles) | python | def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
("Method", 'method'),
("Number of distance components", 'num_distance_components'),
("Number of examples", 'num_examples'),
("Number of feature columns", 'num_features'),
("Number of unpacked features", 'num_unpacked_features'),
("Total training time (seconds)", 'training_time')]
ball_tree_fields = [
("Tree depth", 'tree_depth'),
("Leaf size", 'leaf_size')]
lsh_fields = [
("Number of hash tables", 'num_tables'),
("Number of projections per table", 'num_projections_per_table')]
sections = [model_fields]
section_titles = ['Attributes']
if (self.method == 'ball_tree'):
sections.append(ball_tree_fields)
section_titles.append('Ball Tree Attributes')
if (self.method == 'lsh'):
sections.append(lsh_fields)
section_titles.append('LSH Attributes')
return (sections, section_titles) | [
"def",
"_get_summary_struct",
"(",
"self",
")",
":",
"model_fields",
"=",
"[",
"(",
"\"Method\"",
",",
"'method'",
")",
",",
"(",
"\"Number of distance components\"",
",",
"'num_distance_components'",
")",
",",
"(",
"\"Number of examples\"",
",",
"'num_examples'",
")",
",",
"(",
"\"Number of feature columns\"",
",",
"'num_features'",
")",
",",
"(",
"\"Number of unpacked features\"",
",",
"'num_unpacked_features'",
")",
",",
"(",
"\"Total training time (seconds)\"",
",",
"'training_time'",
")",
"]",
"ball_tree_fields",
"=",
"[",
"(",
"\"Tree depth\"",
",",
"'tree_depth'",
")",
",",
"(",
"\"Leaf size\"",
",",
"'leaf_size'",
")",
"]",
"lsh_fields",
"=",
"[",
"(",
"\"Number of hash tables\"",
",",
"'num_tables'",
")",
",",
"(",
"\"Number of projections per table\"",
",",
"'num_projections_per_table'",
")",
"]",
"sections",
"=",
"[",
"model_fields",
"]",
"section_titles",
"=",
"[",
"'Attributes'",
"]",
"if",
"(",
"self",
".",
"method",
"==",
"'ball_tree'",
")",
":",
"sections",
".",
"append",
"(",
"ball_tree_fields",
")",
"section_titles",
".",
"append",
"(",
"'Ball Tree Attributes'",
")",
"if",
"(",
"self",
".",
"method",
"==",
"'lsh'",
")",
":",
"sections",
".",
"append",
"(",
"lsh_fields",
")",
"section_titles",
".",
"append",
"(",
"'LSH Attributes'",
")",
"return",
"(",
"sections",
",",
"section_titles",
")"
] | Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object. | [
"Returns",
"a",
"structured",
"description",
"of",
"the",
"model",
"including",
"(",
"where",
"relevant",
")",
"the",
"schema",
"of",
"the",
"training",
"data",
"description",
"of",
"the",
"training",
"data",
"training",
"statistics",
"and",
"model",
"hyperparameters",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L619-L664 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py | NearestNeighborsModel._list_fields | def _list_fields(self):
"""
List the fields stored in the model, including data, model, and
training options. Each field can be queried with the ``get`` method.
Returns
-------
out : list
List of fields queryable with the ``get`` method.
"""
opts = {'model': self.__proxy__, 'model_name': self.__name__}
response = _turicreate.extensions._nearest_neighbors.list_fields(opts)
return sorted(response.keys()) | python | def _list_fields(self):
"""
List the fields stored in the model, including data, model, and
training options. Each field can be queried with the ``get`` method.
Returns
-------
out : list
List of fields queryable with the ``get`` method.
"""
opts = {'model': self.__proxy__, 'model_name': self.__name__}
response = _turicreate.extensions._nearest_neighbors.list_fields(opts)
return sorted(response.keys()) | [
"def",
"_list_fields",
"(",
"self",
")",
":",
"opts",
"=",
"{",
"'model'",
":",
"self",
".",
"__proxy__",
",",
"'model_name'",
":",
"self",
".",
"__name__",
"}",
"response",
"=",
"_turicreate",
".",
"extensions",
".",
"_nearest_neighbors",
".",
"list_fields",
"(",
"opts",
")",
"return",
"sorted",
"(",
"response",
".",
"keys",
"(",
")",
")"
] | List the fields stored in the model, including data, model, and
training options. Each field can be queried with the ``get`` method.
Returns
-------
out : list
List of fields queryable with the ``get`` method. | [
"List",
"the",
"fields",
"stored",
"in",
"the",
"model",
"including",
"data",
"model",
"and",
"training",
"options",
".",
"Each",
"field",
"can",
"be",
"queried",
"with",
"the",
"get",
"method",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L675-L688 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py | NearestNeighborsModel._get | def _get(self, field):
"""
Return the value of a given field. The list of all queryable fields is
detailed below, and can be obtained with the
:func:`~turicreate.nearest_neighbors.NearestNeighborsModel._list_fields`
method.
+-----------------------+----------------------------------------------+
| Field | Description |
+=======================+==============================================+
| distance | Measure of dissimilarity between two points |
+-----------------------+----------------------------------------------+
| features | Feature column names |
+-----------------------+----------------------------------------------+
| unpacked_features | Names of the individual features used |
+-----------------------+----------------------------------------------+
| label | Label column names |
+-----------------------+----------------------------------------------+
| leaf_size | Max size of leaf nodes (ball tree only) |
+-----------------------+----------------------------------------------+
| method | Method of organizing reference data |
+-----------------------+----------------------------------------------+
| num_examples | Number of reference data observations |
+-----------------------+----------------------------------------------+
| num_features | Number of features for distance computation |
+-----------------------+----------------------------------------------+
| num_unpacked_features | Number of unpacked features |
+-----------------------+----------------------------------------------+
| num_variables | Number of variables for distance computation |
+-----------------------+----------------------------------------------+
| training_time | Time to create the reference structure |
+-----------------------+----------------------------------------------+
| tree_depth | Number of levels in the tree (ball tree only)|
+-----------------------+----------------------------------------------+
Parameters
----------
field : string
Name of the field to be retrieved.
Returns
-------
out
Value of the requested field.
"""
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'field': field}
response = _turicreate.extensions._nearest_neighbors.get_value(opts)
return response['value'] | python | def _get(self, field):
"""
Return the value of a given field. The list of all queryable fields is
detailed below, and can be obtained with the
:func:`~turicreate.nearest_neighbors.NearestNeighborsModel._list_fields`
method.
+-----------------------+----------------------------------------------+
| Field | Description |
+=======================+==============================================+
| distance | Measure of dissimilarity between two points |
+-----------------------+----------------------------------------------+
| features | Feature column names |
+-----------------------+----------------------------------------------+
| unpacked_features | Names of the individual features used |
+-----------------------+----------------------------------------------+
| label | Label column names |
+-----------------------+----------------------------------------------+
| leaf_size | Max size of leaf nodes (ball tree only) |
+-----------------------+----------------------------------------------+
| method | Method of organizing reference data |
+-----------------------+----------------------------------------------+
| num_examples | Number of reference data observations |
+-----------------------+----------------------------------------------+
| num_features | Number of features for distance computation |
+-----------------------+----------------------------------------------+
| num_unpacked_features | Number of unpacked features |
+-----------------------+----------------------------------------------+
| num_variables | Number of variables for distance computation |
+-----------------------+----------------------------------------------+
| training_time | Time to create the reference structure |
+-----------------------+----------------------------------------------+
| tree_depth | Number of levels in the tree (ball tree only)|
+-----------------------+----------------------------------------------+
Parameters
----------
field : string
Name of the field to be retrieved.
Returns
-------
out
Value of the requested field.
"""
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'field': field}
response = _turicreate.extensions._nearest_neighbors.get_value(opts)
return response['value'] | [
"def",
"_get",
"(",
"self",
",",
"field",
")",
":",
"opts",
"=",
"{",
"'model'",
":",
"self",
".",
"__proxy__",
",",
"'model_name'",
":",
"self",
".",
"__name__",
",",
"'field'",
":",
"field",
"}",
"response",
"=",
"_turicreate",
".",
"extensions",
".",
"_nearest_neighbors",
".",
"get_value",
"(",
"opts",
")",
"return",
"response",
"[",
"'value'",
"]"
] | Return the value of a given field. The list of all queryable fields is
detailed below, and can be obtained with the
:func:`~turicreate.nearest_neighbors.NearestNeighborsModel._list_fields`
method.
+-----------------------+----------------------------------------------+
| Field | Description |
+=======================+==============================================+
| distance | Measure of dissimilarity between two points |
+-----------------------+----------------------------------------------+
| features | Feature column names |
+-----------------------+----------------------------------------------+
| unpacked_features | Names of the individual features used |
+-----------------------+----------------------------------------------+
| label | Label column names |
+-----------------------+----------------------------------------------+
| leaf_size | Max size of leaf nodes (ball tree only) |
+-----------------------+----------------------------------------------+
| method | Method of organizing reference data |
+-----------------------+----------------------------------------------+
| num_examples | Number of reference data observations |
+-----------------------+----------------------------------------------+
| num_features | Number of features for distance computation |
+-----------------------+----------------------------------------------+
| num_unpacked_features | Number of unpacked features |
+-----------------------+----------------------------------------------+
| num_variables | Number of variables for distance computation |
+-----------------------+----------------------------------------------+
| training_time | Time to create the reference structure |
+-----------------------+----------------------------------------------+
| tree_depth | Number of levels in the tree (ball tree only)|
+-----------------------+----------------------------------------------+
Parameters
----------
field : string
Name of the field to be retrieved.
Returns
-------
out
Value of the requested field. | [
"Return",
"the",
"value",
"of",
"a",
"given",
"field",
".",
"The",
"list",
"of",
"all",
"queryable",
"fields",
"is",
"detailed",
"below",
"and",
"can",
"be",
"obtained",
"with",
"the",
":",
"func",
":",
"~turicreate",
".",
"nearest_neighbors",
".",
"NearestNeighborsModel",
".",
"_list_fields",
"method",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L690-L739 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py | NearestNeighborsModel._training_stats | def _training_stats(self):
"""
Return a dictionary of statistics collected during creation of the
model. These statistics are also available with the ``get`` method and
are described in more detail in that method's documentation.
Returns
-------
out : dict
Dictionary of statistics compiled during creation of the
NearestNeighborsModel.
See Also
--------
summary
Examples
--------
>>> sf = turicreate.SFrame({'label': range(3),
... 'feature1': [0.98, 0.62, 0.11],
... 'feature2': [0.69, 0.58, 0.36]})
>>> model = turicreate.nearest_neighbors.create(sf, 'label')
>>> model.training_stats()
{'features': 'feature1, feature2',
'label': 'label',
'leaf_size': 1000,
'num_examples': 3,
'num_features': 2,
'num_variables': 2,
'training_time': 0.023223,
'tree_depth': 1}
"""
opts = {'model': self.__proxy__, 'model_name': self.__name__}
return _turicreate.extensions._nearest_neighbors.training_stats(opts) | python | def _training_stats(self):
"""
Return a dictionary of statistics collected during creation of the
model. These statistics are also available with the ``get`` method and
are described in more detail in that method's documentation.
Returns
-------
out : dict
Dictionary of statistics compiled during creation of the
NearestNeighborsModel.
See Also
--------
summary
Examples
--------
>>> sf = turicreate.SFrame({'label': range(3),
... 'feature1': [0.98, 0.62, 0.11],
... 'feature2': [0.69, 0.58, 0.36]})
>>> model = turicreate.nearest_neighbors.create(sf, 'label')
>>> model.training_stats()
{'features': 'feature1, feature2',
'label': 'label',
'leaf_size': 1000,
'num_examples': 3,
'num_features': 2,
'num_variables': 2,
'training_time': 0.023223,
'tree_depth': 1}
"""
opts = {'model': self.__proxy__, 'model_name': self.__name__}
return _turicreate.extensions._nearest_neighbors.training_stats(opts) | [
"def",
"_training_stats",
"(",
"self",
")",
":",
"opts",
"=",
"{",
"'model'",
":",
"self",
".",
"__proxy__",
",",
"'model_name'",
":",
"self",
".",
"__name__",
"}",
"return",
"_turicreate",
".",
"extensions",
".",
"_nearest_neighbors",
".",
"training_stats",
"(",
"opts",
")"
] | Return a dictionary of statistics collected during creation of the
model. These statistics are also available with the ``get`` method and
are described in more detail in that method's documentation.
Returns
-------
out : dict
Dictionary of statistics compiled during creation of the
NearestNeighborsModel.
See Also
--------
summary
Examples
--------
>>> sf = turicreate.SFrame({'label': range(3),
... 'feature1': [0.98, 0.62, 0.11],
... 'feature2': [0.69, 0.58, 0.36]})
>>> model = turicreate.nearest_neighbors.create(sf, 'label')
>>> model.training_stats()
{'features': 'feature1, feature2',
'label': 'label',
'leaf_size': 1000,
'num_examples': 3,
'num_features': 2,
'num_variables': 2,
'training_time': 0.023223,
'tree_depth': 1} | [
"Return",
"a",
"dictionary",
"of",
"statistics",
"collected",
"during",
"creation",
"of",
"the",
"model",
".",
"These",
"statistics",
"are",
"also",
"available",
"with",
"the",
"get",
"method",
"and",
"are",
"described",
"in",
"more",
"detail",
"in",
"that",
"method",
"s",
"documentation",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L741-L775 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py | NearestNeighborsModel.query | def query(self, dataset, label=None, k=5, radius=None, verbose=True):
"""
For each row of the input 'dataset', retrieve the nearest neighbors
from the model's stored data. In general, the query dataset does not
need to be the same as the reference data stored in the model, but if
it is, the 'include_self_edges' parameter can be set to False to
exclude results that match query points to themselves.
Parameters
----------
dataset : SFrame
Query data. Must contain columns with the same names and types as
the features used to train the model. Additional columns are
allowed, but ignored. Please see the nearest neighbors
:func:`~turicreate.nearest_neighbors.create` documentation for more
detail on allowable data types.
label : str, optional
Name of the query SFrame column with row labels. If 'label' is not
specified, row numbers are used to identify query dataset rows in
the output SFrame.
k : int, optional
Number of nearest neighbors to return from the reference set for
each query observation. The default is 5 neighbors, but setting it
to ``None`` will return all neighbors within ``radius`` of the
query point.
radius : float, optional
Only neighbors whose distance to a query point is smaller than this
value are returned. The default is ``None``, in which case the
``k`` nearest neighbors are returned for each query point,
regardless of distance.
verbose: bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame
An SFrame with the k-nearest neighbors of each query observation.
The result contains four columns: the first is the label of the
query observation, the second is the label of the nearby reference
observation, the third is the distance between the query and
reference observations, and the fourth is the rank of the reference
observation among the query's k-nearest neighbors.
See Also
--------
similarity_graph
Notes
-----
- The `dataset` input to this method *can* have missing values (in
contrast to the reference dataset used to create the nearest
neighbors model). Missing numeric values are imputed to be the mean
of the corresponding feature in the reference dataset, and missing
strings are imputed to be empty strings.
- If both ``k`` and ``radius`` are set to ``None``, each query point
returns all of the reference set. If the reference dataset has
:math:`n` rows and the query dataset has :math:`m` rows, the output
is an SFrame with :math:`nm` rows.
- For models created with the 'lsh' method, the query results may have
fewer query labels than input query points. Because LSH is an
approximate method, a query point may have fewer than 'k' neighbors.
If LSH returns no neighbors at all for a query, the query point is
omitted from the results.
Examples
--------
First construct a toy SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'label': range(3),
... 'feature1': [0.98, 0.62, 0.11],
... 'feature2': [0.69, 0.58, 0.36]})
>>> model = turicreate.nearest_neighbors.create(sf, 'label')
A new SFrame contains query observations with same schema as the
reference SFrame. This SFrame is passed to the ``query`` method.
>>> queries = turicreate.SFrame({'label': range(3),
... 'feature1': [0.05, 0.61, 0.99],
... 'feature2': [0.06, 0.97, 0.86]})
>>> model.query(queries, 'label', k=2)
+-------------+-----------------+----------------+------+
| query_label | reference_label | distance | rank |
+-------------+-----------------+----------------+------+
| 0 | 2 | 0.305941170816 | 1 |
| 0 | 1 | 0.771556867638 | 2 |
| 1 | 1 | 0.390128184063 | 1 |
| 1 | 0 | 0.464004310325 | 2 |
| 2 | 0 | 0.170293863659 | 1 |
| 2 | 1 | 0.464004310325 | 2 |
+-------------+-----------------+----------------+------+
"""
## Validate the 'dataset' input
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Get model features
ref_features = self.features
sf_features = _tkutl._toolkits_select_columns(dataset, ref_features)
## Validate and preprocess the 'label' input
if label is None:
query_labels = _turicreate.SArray.from_sequence(len(dataset))
else:
if not label in dataset.column_names():
raise ValueError(
"Input 'label' must be a string matching the name of a " +\
"column in the reference SFrame 'dataset'.")
if not dataset[label].dtype == str and not dataset[label].dtype == int:
raise TypeError("The label column must contain integers or strings.")
if label in ref_features:
raise ValueError("The label column cannot be one of the features.")
query_labels = dataset[label]
## Validate neighborhood parameters 'k' and 'radius'
if k is not None:
if not isinstance(k, int):
raise ValueError("Input 'k' must be an integer.")
if k <= 0:
raise ValueError("Input 'k' must be larger than 0.")
if radius is not None:
if not isinstance(radius, (int, float)):
raise ValueError("Input 'radius' must be an integer or float.")
if radius < 0:
raise ValueError("Input 'radius' must be non-negative.")
## Set k and radius to special values to indicate 'None'
if k is None:
k = -1
if radius is None:
radius = -1.0
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'features': sf_features,
'query_labels': query_labels,
'k': k,
'radius': radius}
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.query(opts)
return result['neighbors'] | python | def query(self, dataset, label=None, k=5, radius=None, verbose=True):
"""
For each row of the input 'dataset', retrieve the nearest neighbors
from the model's stored data. In general, the query dataset does not
need to be the same as the reference data stored in the model, but if
it is, the 'include_self_edges' parameter can be set to False to
exclude results that match query points to themselves.
Parameters
----------
dataset : SFrame
Query data. Must contain columns with the same names and types as
the features used to train the model. Additional columns are
allowed, but ignored. Please see the nearest neighbors
:func:`~turicreate.nearest_neighbors.create` documentation for more
detail on allowable data types.
label : str, optional
Name of the query SFrame column with row labels. If 'label' is not
specified, row numbers are used to identify query dataset rows in
the output SFrame.
k : int, optional
Number of nearest neighbors to return from the reference set for
each query observation. The default is 5 neighbors, but setting it
to ``None`` will return all neighbors within ``radius`` of the
query point.
radius : float, optional
Only neighbors whose distance to a query point is smaller than this
value are returned. The default is ``None``, in which case the
``k`` nearest neighbors are returned for each query point,
regardless of distance.
verbose: bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame
An SFrame with the k-nearest neighbors of each query observation.
The result contains four columns: the first is the label of the
query observation, the second is the label of the nearby reference
observation, the third is the distance between the query and
reference observations, and the fourth is the rank of the reference
observation among the query's k-nearest neighbors.
See Also
--------
similarity_graph
Notes
-----
- The `dataset` input to this method *can* have missing values (in
contrast to the reference dataset used to create the nearest
neighbors model). Missing numeric values are imputed to be the mean
of the corresponding feature in the reference dataset, and missing
strings are imputed to be empty strings.
- If both ``k`` and ``radius`` are set to ``None``, each query point
returns all of the reference set. If the reference dataset has
:math:`n` rows and the query dataset has :math:`m` rows, the output
is an SFrame with :math:`nm` rows.
- For models created with the 'lsh' method, the query results may have
fewer query labels than input query points. Because LSH is an
approximate method, a query point may have fewer than 'k' neighbors.
If LSH returns no neighbors at all for a query, the query point is
omitted from the results.
Examples
--------
First construct a toy SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'label': range(3),
... 'feature1': [0.98, 0.62, 0.11],
... 'feature2': [0.69, 0.58, 0.36]})
>>> model = turicreate.nearest_neighbors.create(sf, 'label')
A new SFrame contains query observations with same schema as the
reference SFrame. This SFrame is passed to the ``query`` method.
>>> queries = turicreate.SFrame({'label': range(3),
... 'feature1': [0.05, 0.61, 0.99],
... 'feature2': [0.06, 0.97, 0.86]})
>>> model.query(queries, 'label', k=2)
+-------------+-----------------+----------------+------+
| query_label | reference_label | distance | rank |
+-------------+-----------------+----------------+------+
| 0 | 2 | 0.305941170816 | 1 |
| 0 | 1 | 0.771556867638 | 2 |
| 1 | 1 | 0.390128184063 | 1 |
| 1 | 0 | 0.464004310325 | 2 |
| 2 | 0 | 0.170293863659 | 1 |
| 2 | 1 | 0.464004310325 | 2 |
+-------------+-----------------+----------------+------+
"""
## Validate the 'dataset' input
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Get model features
ref_features = self.features
sf_features = _tkutl._toolkits_select_columns(dataset, ref_features)
## Validate and preprocess the 'label' input
if label is None:
query_labels = _turicreate.SArray.from_sequence(len(dataset))
else:
if not label in dataset.column_names():
raise ValueError(
"Input 'label' must be a string matching the name of a " +\
"column in the reference SFrame 'dataset'.")
if not dataset[label].dtype == str and not dataset[label].dtype == int:
raise TypeError("The label column must contain integers or strings.")
if label in ref_features:
raise ValueError("The label column cannot be one of the features.")
query_labels = dataset[label]
## Validate neighborhood parameters 'k' and 'radius'
if k is not None:
if not isinstance(k, int):
raise ValueError("Input 'k' must be an integer.")
if k <= 0:
raise ValueError("Input 'k' must be larger than 0.")
if radius is not None:
if not isinstance(radius, (int, float)):
raise ValueError("Input 'radius' must be an integer or float.")
if radius < 0:
raise ValueError("Input 'radius' must be non-negative.")
## Set k and radius to special values to indicate 'None'
if k is None:
k = -1
if radius is None:
radius = -1.0
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'features': sf_features,
'query_labels': query_labels,
'k': k,
'radius': radius}
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.query(opts)
return result['neighbors'] | [
"def",
"query",
"(",
"self",
",",
"dataset",
",",
"label",
"=",
"None",
",",
"k",
"=",
"5",
",",
"radius",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"## Validate the 'dataset' input",
"_tkutl",
".",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"_tkutl",
".",
"_raise_error_if_sframe_empty",
"(",
"dataset",
",",
"\"dataset\"",
")",
"## Get model features",
"ref_features",
"=",
"self",
".",
"features",
"sf_features",
"=",
"_tkutl",
".",
"_toolkits_select_columns",
"(",
"dataset",
",",
"ref_features",
")",
"## Validate and preprocess the 'label' input",
"if",
"label",
"is",
"None",
":",
"query_labels",
"=",
"_turicreate",
".",
"SArray",
".",
"from_sequence",
"(",
"len",
"(",
"dataset",
")",
")",
"else",
":",
"if",
"not",
"label",
"in",
"dataset",
".",
"column_names",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Input 'label' must be a string matching the name of a \"",
"+",
"\"column in the reference SFrame 'dataset'.\"",
")",
"if",
"not",
"dataset",
"[",
"label",
"]",
".",
"dtype",
"==",
"str",
"and",
"not",
"dataset",
"[",
"label",
"]",
".",
"dtype",
"==",
"int",
":",
"raise",
"TypeError",
"(",
"\"The label column must contain integers or strings.\"",
")",
"if",
"label",
"in",
"ref_features",
":",
"raise",
"ValueError",
"(",
"\"The label column cannot be one of the features.\"",
")",
"query_labels",
"=",
"dataset",
"[",
"label",
"]",
"## Validate neighborhood parameters 'k' and 'radius'",
"if",
"k",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"k",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"\"Input 'k' must be an integer.\"",
")",
"if",
"k",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Input 'k' must be larger than 0.\"",
")",
"if",
"radius",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"radius",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Input 'radius' must be an integer or float.\"",
")",
"if",
"radius",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Input 'radius' must be non-negative.\"",
")",
"## Set k and radius to special values to indicate 'None'",
"if",
"k",
"is",
"None",
":",
"k",
"=",
"-",
"1",
"if",
"radius",
"is",
"None",
":",
"radius",
"=",
"-",
"1.0",
"opts",
"=",
"{",
"'model'",
":",
"self",
".",
"__proxy__",
",",
"'model_name'",
":",
"self",
".",
"__name__",
",",
"'features'",
":",
"sf_features",
",",
"'query_labels'",
":",
"query_labels",
",",
"'k'",
":",
"k",
",",
"'radius'",
":",
"radius",
"}",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"result",
"=",
"_turicreate",
".",
"extensions",
".",
"_nearest_neighbors",
".",
"query",
"(",
"opts",
")",
"return",
"result",
"[",
"'neighbors'",
"]"
] | For each row of the input 'dataset', retrieve the nearest neighbors
from the model's stored data. In general, the query dataset does not
need to be the same as the reference data stored in the model, but if
it is, the 'include_self_edges' parameter can be set to False to
exclude results that match query points to themselves.
Parameters
----------
dataset : SFrame
Query data. Must contain columns with the same names and types as
the features used to train the model. Additional columns are
allowed, but ignored. Please see the nearest neighbors
:func:`~turicreate.nearest_neighbors.create` documentation for more
detail on allowable data types.
label : str, optional
Name of the query SFrame column with row labels. If 'label' is not
specified, row numbers are used to identify query dataset rows in
the output SFrame.
k : int, optional
Number of nearest neighbors to return from the reference set for
each query observation. The default is 5 neighbors, but setting it
to ``None`` will return all neighbors within ``radius`` of the
query point.
radius : float, optional
Only neighbors whose distance to a query point is smaller than this
value are returned. The default is ``None``, in which case the
``k`` nearest neighbors are returned for each query point,
regardless of distance.
verbose: bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame
An SFrame with the k-nearest neighbors of each query observation.
The result contains four columns: the first is the label of the
query observation, the second is the label of the nearby reference
observation, the third is the distance between the query and
reference observations, and the fourth is the rank of the reference
observation among the query's k-nearest neighbors.
See Also
--------
similarity_graph
Notes
-----
- The `dataset` input to this method *can* have missing values (in
contrast to the reference dataset used to create the nearest
neighbors model). Missing numeric values are imputed to be the mean
of the corresponding feature in the reference dataset, and missing
strings are imputed to be empty strings.
- If both ``k`` and ``radius`` are set to ``None``, each query point
returns all of the reference set. If the reference dataset has
:math:`n` rows and the query dataset has :math:`m` rows, the output
is an SFrame with :math:`nm` rows.
- For models created with the 'lsh' method, the query results may have
fewer query labels than input query points. Because LSH is an
approximate method, a query point may have fewer than 'k' neighbors.
If LSH returns no neighbors at all for a query, the query point is
omitted from the results.
Examples
--------
First construct a toy SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'label': range(3),
... 'feature1': [0.98, 0.62, 0.11],
... 'feature2': [0.69, 0.58, 0.36]})
>>> model = turicreate.nearest_neighbors.create(sf, 'label')
A new SFrame contains query observations with same schema as the
reference SFrame. This SFrame is passed to the ``query`` method.
>>> queries = turicreate.SFrame({'label': range(3),
... 'feature1': [0.05, 0.61, 0.99],
... 'feature2': [0.06, 0.97, 0.86]})
>>> model.query(queries, 'label', k=2)
+-------------+-----------------+----------------+------+
| query_label | reference_label | distance | rank |
+-------------+-----------------+----------------+------+
| 0 | 2 | 0.305941170816 | 1 |
| 0 | 1 | 0.771556867638 | 2 |
| 1 | 1 | 0.390128184063 | 1 |
| 1 | 0 | 0.464004310325 | 2 |
| 2 | 0 | 0.170293863659 | 1 |
| 2 | 1 | 0.464004310325 | 2 |
+-------------+-----------------+----------------+------+ | [
"For",
"each",
"row",
"of",
"the",
"input",
"dataset",
"retrieve",
"the",
"nearest",
"neighbors",
"from",
"the",
"model",
"s",
"stored",
"data",
".",
"In",
"general",
"the",
"query",
"dataset",
"does",
"not",
"need",
"to",
"be",
"the",
"same",
"as",
"the",
"reference",
"data",
"stored",
"in",
"the",
"model",
"but",
"if",
"it",
"is",
"the",
"include_self_edges",
"parameter",
"can",
"be",
"set",
"to",
"False",
"to",
"exclude",
"results",
"that",
"match",
"query",
"points",
"to",
"themselves",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L777-L935 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py | NearestNeighborsModel.similarity_graph | def similarity_graph(self, k=5, radius=None, include_self_edges=False,
output_type='SGraph', verbose=True):
"""
Construct the similarity graph on the reference dataset, which is
already stored in the model. This is conceptually very similar to
running `query` with the reference set, but this method is optimized
for the purpose, syntactically simpler, and automatically removes
self-edges.
Parameters
----------
k : int, optional
Maximum number of neighbors to return for each point in the
dataset. Setting this to ``None`` deactivates the constraint, so
that all neighbors are returned within ``radius`` of a given point.
radius : float, optional
For a given point, only neighbors within this distance are
returned. The default is ``None``, in which case the ``k`` nearest
neighbors are returned for each query point, regardless of
distance.
include_self_edges : bool, optional
For most distance functions, each point in the model's reference
dataset is its own nearest neighbor. If this parameter is set to
False, this result is ignored, and the nearest neighbors are
returned *excluding* the point itself.
output_type : {'SGraph', 'SFrame'}, optional
By default, the results are returned in the form of an SGraph,
where each point in the reference dataset is a vertex and an edge A
-> B indicates that vertex B is a nearest neighbor of vertex A. If
'output_type' is set to 'SFrame', the output is in the same form as
the results of the 'query' method: an SFrame with columns
indicating the query label (in this case the query data is the same
as the reference data), reference label, distance between the two
points, and the rank of the neighbor.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame or SGraph
The type of the output object depends on the 'output_type'
parameter. See the parameter description for more detail.
Notes
-----
- If both ``k`` and ``radius`` are set to ``None``, each data point is
matched to the entire dataset. If the reference dataset has
:math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an
SGraph with :math:`n^2` edges).
- For models created with the 'lsh' method, the output similarity graph
may have fewer vertices than there are data points in the original
reference set. Because LSH is an approximate method, a query point
may have fewer than 'k' neighbors. If LSH returns no neighbors at all
for a query and self-edges are excluded, the query point is omitted
from the results.
Examples
--------
First construct an SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11],
... 'x2': [0.69, 0.58, 0.36]})
...
>>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean')
Unlike the ``query`` method, there is no need for a second dataset with
``similarity_graph``.
>>> g = model.similarity_graph(k=1) # an SGraph
>>> g.edges
+----------+----------+----------------+------+
| __src_id | __dst_id | distance | rank |
+----------+----------+----------------+------+
| 0 | 1 | 0.376430604494 | 1 |
| 2 | 1 | 0.55542776308 | 1 |
| 1 | 0 | 0.376430604494 | 1 |
+----------+----------+----------------+------+
"""
## Validate inputs.
if k is not None:
if not isinstance(k, int):
raise ValueError("Input 'k' must be an integer.")
if k <= 0:
raise ValueError("Input 'k' must be larger than 0.")
if radius is not None:
if not isinstance(radius, (int, float)):
raise ValueError("Input 'radius' must be an integer or float.")
if radius < 0:
raise ValueError("Input 'radius' must be non-negative.")
## Set k and radius to special values to indicate 'None'
if k is None:
k = -1
if radius is None:
radius = -1.0
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'k': k,
'radius': radius,
'include_self_edges': include_self_edges}
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.similarity_graph(opts)
knn = result['neighbors']
if output_type == "SFrame":
return knn
else:
sg = _SGraph(edges=knn, src_field='query_label',
dst_field='reference_label')
return sg | python | def similarity_graph(self, k=5, radius=None, include_self_edges=False,
output_type='SGraph', verbose=True):
"""
Construct the similarity graph on the reference dataset, which is
already stored in the model. This is conceptually very similar to
running `query` with the reference set, but this method is optimized
for the purpose, syntactically simpler, and automatically removes
self-edges.
Parameters
----------
k : int, optional
Maximum number of neighbors to return for each point in the
dataset. Setting this to ``None`` deactivates the constraint, so
that all neighbors are returned within ``radius`` of a given point.
radius : float, optional
For a given point, only neighbors within this distance are
returned. The default is ``None``, in which case the ``k`` nearest
neighbors are returned for each query point, regardless of
distance.
include_self_edges : bool, optional
For most distance functions, each point in the model's reference
dataset is its own nearest neighbor. If this parameter is set to
False, this result is ignored, and the nearest neighbors are
returned *excluding* the point itself.
output_type : {'SGraph', 'SFrame'}, optional
By default, the results are returned in the form of an SGraph,
where each point in the reference dataset is a vertex and an edge A
-> B indicates that vertex B is a nearest neighbor of vertex A. If
'output_type' is set to 'SFrame', the output is in the same form as
the results of the 'query' method: an SFrame with columns
indicating the query label (in this case the query data is the same
as the reference data), reference label, distance between the two
points, and the rank of the neighbor.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame or SGraph
The type of the output object depends on the 'output_type'
parameter. See the parameter description for more detail.
Notes
-----
- If both ``k`` and ``radius`` are set to ``None``, each data point is
matched to the entire dataset. If the reference dataset has
:math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an
SGraph with :math:`n^2` edges).
- For models created with the 'lsh' method, the output similarity graph
may have fewer vertices than there are data points in the original
reference set. Because LSH is an approximate method, a query point
may have fewer than 'k' neighbors. If LSH returns no neighbors at all
for a query and self-edges are excluded, the query point is omitted
from the results.
Examples
--------
First construct an SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11],
... 'x2': [0.69, 0.58, 0.36]})
...
>>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean')
Unlike the ``query`` method, there is no need for a second dataset with
``similarity_graph``.
>>> g = model.similarity_graph(k=1) # an SGraph
>>> g.edges
+----------+----------+----------------+------+
| __src_id | __dst_id | distance | rank |
+----------+----------+----------------+------+
| 0 | 1 | 0.376430604494 | 1 |
| 2 | 1 | 0.55542776308 | 1 |
| 1 | 0 | 0.376430604494 | 1 |
+----------+----------+----------------+------+
"""
## Validate inputs.
if k is not None:
if not isinstance(k, int):
raise ValueError("Input 'k' must be an integer.")
if k <= 0:
raise ValueError("Input 'k' must be larger than 0.")
if radius is not None:
if not isinstance(radius, (int, float)):
raise ValueError("Input 'radius' must be an integer or float.")
if radius < 0:
raise ValueError("Input 'radius' must be non-negative.")
## Set k and radius to special values to indicate 'None'
if k is None:
k = -1
if radius is None:
radius = -1.0
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'k': k,
'radius': radius,
'include_self_edges': include_self_edges}
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.similarity_graph(opts)
knn = result['neighbors']
if output_type == "SFrame":
return knn
else:
sg = _SGraph(edges=knn, src_field='query_label',
dst_field='reference_label')
return sg | [
"def",
"similarity_graph",
"(",
"self",
",",
"k",
"=",
"5",
",",
"radius",
"=",
"None",
",",
"include_self_edges",
"=",
"False",
",",
"output_type",
"=",
"'SGraph'",
",",
"verbose",
"=",
"True",
")",
":",
"## Validate inputs.",
"if",
"k",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"k",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"\"Input 'k' must be an integer.\"",
")",
"if",
"k",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Input 'k' must be larger than 0.\"",
")",
"if",
"radius",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"radius",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Input 'radius' must be an integer or float.\"",
")",
"if",
"radius",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Input 'radius' must be non-negative.\"",
")",
"## Set k and radius to special values to indicate 'None'",
"if",
"k",
"is",
"None",
":",
"k",
"=",
"-",
"1",
"if",
"radius",
"is",
"None",
":",
"radius",
"=",
"-",
"1.0",
"opts",
"=",
"{",
"'model'",
":",
"self",
".",
"__proxy__",
",",
"'model_name'",
":",
"self",
".",
"__name__",
",",
"'k'",
":",
"k",
",",
"'radius'",
":",
"radius",
",",
"'include_self_edges'",
":",
"include_self_edges",
"}",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"result",
"=",
"_turicreate",
".",
"extensions",
".",
"_nearest_neighbors",
".",
"similarity_graph",
"(",
"opts",
")",
"knn",
"=",
"result",
"[",
"'neighbors'",
"]",
"if",
"output_type",
"==",
"\"SFrame\"",
":",
"return",
"knn",
"else",
":",
"sg",
"=",
"_SGraph",
"(",
"edges",
"=",
"knn",
",",
"src_field",
"=",
"'query_label'",
",",
"dst_field",
"=",
"'reference_label'",
")",
"return",
"sg"
] | Construct the similarity graph on the reference dataset, which is
already stored in the model. This is conceptually very similar to
running `query` with the reference set, but this method is optimized
for the purpose, syntactically simpler, and automatically removes
self-edges.
Parameters
----------
k : int, optional
Maximum number of neighbors to return for each point in the
dataset. Setting this to ``None`` deactivates the constraint, so
that all neighbors are returned within ``radius`` of a given point.
radius : float, optional
For a given point, only neighbors within this distance are
returned. The default is ``None``, in which case the ``k`` nearest
neighbors are returned for each query point, regardless of
distance.
include_self_edges : bool, optional
For most distance functions, each point in the model's reference
dataset is its own nearest neighbor. If this parameter is set to
False, this result is ignored, and the nearest neighbors are
returned *excluding* the point itself.
output_type : {'SGraph', 'SFrame'}, optional
By default, the results are returned in the form of an SGraph,
where each point in the reference dataset is a vertex and an edge A
-> B indicates that vertex B is a nearest neighbor of vertex A. If
'output_type' is set to 'SFrame', the output is in the same form as
the results of the 'query' method: an SFrame with columns
indicating the query label (in this case the query data is the same
as the reference data), reference label, distance between the two
points, and the rank of the neighbor.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame or SGraph
The type of the output object depends on the 'output_type'
parameter. See the parameter description for more detail.
Notes
-----
- If both ``k`` and ``radius`` are set to ``None``, each data point is
matched to the entire dataset. If the reference dataset has
:math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an
SGraph with :math:`n^2` edges).
- For models created with the 'lsh' method, the output similarity graph
may have fewer vertices than there are data points in the original
reference set. Because LSH is an approximate method, a query point
may have fewer than 'k' neighbors. If LSH returns no neighbors at all
for a query and self-edges are excluded, the query point is omitted
from the results.
Examples
--------
First construct an SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11],
... 'x2': [0.69, 0.58, 0.36]})
...
>>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean')
Unlike the ``query`` method, there is no need for a second dataset with
``similarity_graph``.
>>> g = model.similarity_graph(k=1) # an SGraph
>>> g.edges
+----------+----------+----------------+------+
| __src_id | __dst_id | distance | rank |
+----------+----------+----------------+------+
| 0 | 1 | 0.376430604494 | 1 |
| 2 | 1 | 0.55542776308 | 1 |
| 1 | 0 | 0.376430604494 | 1 |
+----------+----------+----------------+------+ | [
"Construct",
"the",
"similarity",
"graph",
"on",
"the",
"reference",
"dataset",
"which",
"is",
"already",
"stored",
"in",
"the",
"model",
".",
"This",
"is",
"conceptually",
"very",
"similar",
"to",
"running",
"query",
"with",
"the",
"reference",
"set",
"but",
"this",
"method",
"is",
"optimized",
"for",
"the",
"purpose",
"syntactically",
"simpler",
"and",
"automatically",
"removes",
"self",
"-",
"edges",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L937-L1060 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/activity_classifier/util.py | random_split_by_session | def random_split_by_session(dataset, session_id, fraction=0.9, seed=None):
"""
Randomly split an SFrame into two SFrames based on the `session_id` such
that one split contains data for a `fraction` of the sessions while the
second split contains all data for the rest of the sessions.
Parameters
----------
dataset : SFrame
Dataset to split. It must contain a column of session ids.
session_id : string, optional
The name of the column in `dataset` that corresponds to the
a unique identifier for each session.
fraction : float, optional
Fraction of the sessions to fetch for the first returned SFrame. Must
be between 0 and 1. Once the sessions are split, all data from a single
session is in the same SFrame.
seed : int, optional
Seed for the random number generator used to split.
Examples
--------
.. sourcecode:: python
# Split the data so that train has 90% of the users.
>>> train, valid = tc.activity_classifier.util.random_split_by_session(
... dataset, session_id='session_id', fraction=0.9)
# For example: If dataset has 2055 sessions
>>> len(dataset['session_id'].unique())
2055
# The training set now has 90% of the sessions
>>> len(train['session_id'].unique())
1850
# The validation set has the remaining 10% of the sessions
>>> len(valid['session_id'].unique())
205
"""
from random import Random
_raise_error_if_not_of_type(dataset, _SFrame, 'dataset')
_raise_error_if_not_of_type(session_id, str, 'session_id')
_raise_error_if_not_of_type(fraction, float, 'fraction')
_raise_error_if_not_of_type(seed, [int, type(None)], 'seed')
_numeric_param_check_range('fraction', fraction, 0, 1)
if session_id not in dataset.column_names():
raise _ToolkitError(
'Input "dataset" must contain a column called %s.' % session_id)
if seed is None:
# Include the nanosecond component as well.
import time
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
# The cython bindings require this to be an int, so cast if we can.
try:
seed = int(seed)
except ValueError:
raise ValueError('The \'seed\' parameter must be of type int.')
random = Random()
# Create a random binary filter (boolean SArray), using the same probability across all lines
# that belong to the same session. In expectancy - the desired fraction of the sessions will
# go to the training set.
# Since boolean filters preserve order - there is no need to re-sort the lines within each session.
# The boolean filter is a pseudorandom function of the session_id and the
# global seed above, allowing the train-test split to vary across runs using
# the same dataset.
def random_session_pick(session_id_hash):
random.seed(session_id_hash)
return random.uniform(0, 1) < fraction
chosen_filter = dataset[session_id].hash(seed).apply(random_session_pick)
train = dataset[chosen_filter]
valid = dataset[1 - chosen_filter]
return train, valid | python | def random_split_by_session(dataset, session_id, fraction=0.9, seed=None):
"""
Randomly split an SFrame into two SFrames based on the `session_id` such
that one split contains data for a `fraction` of the sessions while the
second split contains all data for the rest of the sessions.
Parameters
----------
dataset : SFrame
Dataset to split. It must contain a column of session ids.
session_id : string, optional
The name of the column in `dataset` that corresponds to the
a unique identifier for each session.
fraction : float, optional
Fraction of the sessions to fetch for the first returned SFrame. Must
be between 0 and 1. Once the sessions are split, all data from a single
session is in the same SFrame.
seed : int, optional
Seed for the random number generator used to split.
Examples
--------
.. sourcecode:: python
# Split the data so that train has 90% of the users.
>>> train, valid = tc.activity_classifier.util.random_split_by_session(
... dataset, session_id='session_id', fraction=0.9)
# For example: If dataset has 2055 sessions
>>> len(dataset['session_id'].unique())
2055
# The training set now has 90% of the sessions
>>> len(train['session_id'].unique())
1850
# The validation set has the remaining 10% of the sessions
>>> len(valid['session_id'].unique())
205
"""
from random import Random
_raise_error_if_not_of_type(dataset, _SFrame, 'dataset')
_raise_error_if_not_of_type(session_id, str, 'session_id')
_raise_error_if_not_of_type(fraction, float, 'fraction')
_raise_error_if_not_of_type(seed, [int, type(None)], 'seed')
_numeric_param_check_range('fraction', fraction, 0, 1)
if session_id not in dataset.column_names():
raise _ToolkitError(
'Input "dataset" must contain a column called %s.' % session_id)
if seed is None:
# Include the nanosecond component as well.
import time
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
# The cython bindings require this to be an int, so cast if we can.
try:
seed = int(seed)
except ValueError:
raise ValueError('The \'seed\' parameter must be of type int.')
random = Random()
# Create a random binary filter (boolean SArray), using the same probability across all lines
# that belong to the same session. In expectancy - the desired fraction of the sessions will
# go to the training set.
# Since boolean filters preserve order - there is no need to re-sort the lines within each session.
# The boolean filter is a pseudorandom function of the session_id and the
# global seed above, allowing the train-test split to vary across runs using
# the same dataset.
def random_session_pick(session_id_hash):
random.seed(session_id_hash)
return random.uniform(0, 1) < fraction
chosen_filter = dataset[session_id].hash(seed).apply(random_session_pick)
train = dataset[chosen_filter]
valid = dataset[1 - chosen_filter]
return train, valid | [
"def",
"random_split_by_session",
"(",
"dataset",
",",
"session_id",
",",
"fraction",
"=",
"0.9",
",",
"seed",
"=",
"None",
")",
":",
"from",
"random",
"import",
"Random",
"_raise_error_if_not_of_type",
"(",
"dataset",
",",
"_SFrame",
",",
"'dataset'",
")",
"_raise_error_if_not_of_type",
"(",
"session_id",
",",
"str",
",",
"'session_id'",
")",
"_raise_error_if_not_of_type",
"(",
"fraction",
",",
"float",
",",
"'fraction'",
")",
"_raise_error_if_not_of_type",
"(",
"seed",
",",
"[",
"int",
",",
"type",
"(",
"None",
")",
"]",
",",
"'seed'",
")",
"_numeric_param_check_range",
"(",
"'fraction'",
",",
"fraction",
",",
"0",
",",
"1",
")",
"if",
"session_id",
"not",
"in",
"dataset",
".",
"column_names",
"(",
")",
":",
"raise",
"_ToolkitError",
"(",
"'Input \"dataset\" must contain a column called %s.'",
"%",
"session_id",
")",
"if",
"seed",
"is",
"None",
":",
"# Include the nanosecond component as well.",
"import",
"time",
"seed",
"=",
"abs",
"(",
"hash",
"(",
"\"%0.20f\"",
"%",
"time",
".",
"time",
"(",
")",
")",
")",
"%",
"(",
"2",
"**",
"31",
")",
"# The cython bindings require this to be an int, so cast if we can.",
"try",
":",
"seed",
"=",
"int",
"(",
"seed",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'The \\'seed\\' parameter must be of type int.'",
")",
"random",
"=",
"Random",
"(",
")",
"# Create a random binary filter (boolean SArray), using the same probability across all lines",
"# that belong to the same session. In expectancy - the desired fraction of the sessions will",
"# go to the training set.",
"# Since boolean filters preserve order - there is no need to re-sort the lines within each session.",
"# The boolean filter is a pseudorandom function of the session_id and the",
"# global seed above, allowing the train-test split to vary across runs using",
"# the same dataset.",
"def",
"random_session_pick",
"(",
"session_id_hash",
")",
":",
"random",
".",
"seed",
"(",
"session_id_hash",
")",
"return",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
"<",
"fraction",
"chosen_filter",
"=",
"dataset",
"[",
"session_id",
"]",
".",
"hash",
"(",
"seed",
")",
".",
"apply",
"(",
"random_session_pick",
")",
"train",
"=",
"dataset",
"[",
"chosen_filter",
"]",
"valid",
"=",
"dataset",
"[",
"1",
"-",
"chosen_filter",
"]",
"return",
"train",
",",
"valid"
] | Randomly split an SFrame into two SFrames based on the `session_id` such
that one split contains data for a `fraction` of the sessions while the
second split contains all data for the rest of the sessions.
Parameters
----------
dataset : SFrame
Dataset to split. It must contain a column of session ids.
session_id : string, optional
The name of the column in `dataset` that corresponds to the
a unique identifier for each session.
fraction : float, optional
Fraction of the sessions to fetch for the first returned SFrame. Must
be between 0 and 1. Once the sessions are split, all data from a single
session is in the same SFrame.
seed : int, optional
Seed for the random number generator used to split.
Examples
--------
.. sourcecode:: python
# Split the data so that train has 90% of the users.
>>> train, valid = tc.activity_classifier.util.random_split_by_session(
... dataset, session_id='session_id', fraction=0.9)
# For example: If dataset has 2055 sessions
>>> len(dataset['session_id'].unique())
2055
# The training set now has 90% of the sessions
>>> len(train['session_id'].unique())
1850
# The validation set has the remaining 10% of the sessions
>>> len(valid['session_id'].unique())
205 | [
"Randomly",
"split",
"an",
"SFrame",
"into",
"two",
"SFrames",
"based",
"on",
"the",
"session_id",
"such",
"that",
"one",
"split",
"contains",
"data",
"for",
"a",
"fraction",
"of",
"the",
"sessions",
"while",
"the",
"second",
"split",
"contains",
"all",
"data",
"for",
"the",
"rest",
"of",
"the",
"sessions",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/activity_classifier/util.py#L20-L104 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | read_msbuild_xml | def read_msbuild_xml(path, values={}):
"""Reads the MS Build XML file at the path and returns its contents.
Keyword arguments:
values -- The map to append the contents to (default {})
"""
# Attempt to read the file contents
try:
document = parse(path)
except Exception as e:
logging.exception('Could not read MS Build XML file at %s', path)
return values
# Convert the XML to JSON format
logging.info('Processing MS Build XML file at %s', path)
# Get the rule node
rule = document.getElementsByTagName('Rule')[0]
rule_name = rule.attributes['Name'].value
logging.info('Found rules for %s', rule_name)
# Proprocess Argument values
__preprocess_arguments(rule)
# Get all the values
converted_values = []
__convert(rule, 'EnumProperty', converted_values, __convert_enum)
__convert(rule, 'BoolProperty', converted_values, __convert_bool)
__convert(rule, 'StringListProperty', converted_values,
__convert_string_list)
__convert(rule, 'StringProperty', converted_values, __convert_string)
__convert(rule, 'IntProperty', converted_values, __convert_string)
values[rule_name] = converted_values
return values | python | def read_msbuild_xml(path, values={}):
"""Reads the MS Build XML file at the path and returns its contents.
Keyword arguments:
values -- The map to append the contents to (default {})
"""
# Attempt to read the file contents
try:
document = parse(path)
except Exception as e:
logging.exception('Could not read MS Build XML file at %s', path)
return values
# Convert the XML to JSON format
logging.info('Processing MS Build XML file at %s', path)
# Get the rule node
rule = document.getElementsByTagName('Rule')[0]
rule_name = rule.attributes['Name'].value
logging.info('Found rules for %s', rule_name)
# Proprocess Argument values
__preprocess_arguments(rule)
# Get all the values
converted_values = []
__convert(rule, 'EnumProperty', converted_values, __convert_enum)
__convert(rule, 'BoolProperty', converted_values, __convert_bool)
__convert(rule, 'StringListProperty', converted_values,
__convert_string_list)
__convert(rule, 'StringProperty', converted_values, __convert_string)
__convert(rule, 'IntProperty', converted_values, __convert_string)
values[rule_name] = converted_values
return values | [
"def",
"read_msbuild_xml",
"(",
"path",
",",
"values",
"=",
"{",
"}",
")",
":",
"# Attempt to read the file contents",
"try",
":",
"document",
"=",
"parse",
"(",
"path",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"'Could not read MS Build XML file at %s'",
",",
"path",
")",
"return",
"values",
"# Convert the XML to JSON format",
"logging",
".",
"info",
"(",
"'Processing MS Build XML file at %s'",
",",
"path",
")",
"# Get the rule node",
"rule",
"=",
"document",
".",
"getElementsByTagName",
"(",
"'Rule'",
")",
"[",
"0",
"]",
"rule_name",
"=",
"rule",
".",
"attributes",
"[",
"'Name'",
"]",
".",
"value",
"logging",
".",
"info",
"(",
"'Found rules for %s'",
",",
"rule_name",
")",
"# Proprocess Argument values",
"__preprocess_arguments",
"(",
"rule",
")",
"# Get all the values",
"converted_values",
"=",
"[",
"]",
"__convert",
"(",
"rule",
",",
"'EnumProperty'",
",",
"converted_values",
",",
"__convert_enum",
")",
"__convert",
"(",
"rule",
",",
"'BoolProperty'",
",",
"converted_values",
",",
"__convert_bool",
")",
"__convert",
"(",
"rule",
",",
"'StringListProperty'",
",",
"converted_values",
",",
"__convert_string_list",
")",
"__convert",
"(",
"rule",
",",
"'StringProperty'",
",",
"converted_values",
",",
"__convert_string",
")",
"__convert",
"(",
"rule",
",",
"'IntProperty'",
",",
"converted_values",
",",
"__convert_string",
")",
"values",
"[",
"rule_name",
"]",
"=",
"converted_values",
"return",
"values"
] | Reads the MS Build XML file at the path and returns its contents.
Keyword arguments:
values -- The map to append the contents to (default {}) | [
"Reads",
"the",
"MS",
"Build",
"XML",
"file",
"at",
"the",
"path",
"and",
"returns",
"its",
"contents",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L38-L76 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | read_msbuild_json | def read_msbuild_json(path, values=[]):
"""Reads the MS Build JSON file at the path and returns its contents.
Keyword arguments:
values -- The list to append the contents to (default [])
"""
if not os.path.exists(path):
logging.info('Could not find MS Build JSON file at %s', path)
return values
try:
values.extend(__read_json_file(path))
except Exception as e:
logging.exception('Could not read MS Build JSON file at %s', path)
return values
logging.info('Processing MS Build JSON file at %s', path)
return values | python | def read_msbuild_json(path, values=[]):
"""Reads the MS Build JSON file at the path and returns its contents.
Keyword arguments:
values -- The list to append the contents to (default [])
"""
if not os.path.exists(path):
logging.info('Could not find MS Build JSON file at %s', path)
return values
try:
values.extend(__read_json_file(path))
except Exception as e:
logging.exception('Could not read MS Build JSON file at %s', path)
return values
logging.info('Processing MS Build JSON file at %s', path)
return values | [
"def",
"read_msbuild_json",
"(",
"path",
",",
"values",
"=",
"[",
"]",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"logging",
".",
"info",
"(",
"'Could not find MS Build JSON file at %s'",
",",
"path",
")",
"return",
"values",
"try",
":",
"values",
".",
"extend",
"(",
"__read_json_file",
"(",
"path",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"'Could not read MS Build JSON file at %s'",
",",
"path",
")",
"return",
"values",
"logging",
".",
"info",
"(",
"'Processing MS Build JSON file at %s'",
",",
"path",
")",
"return",
"values"
] | Reads the MS Build JSON file at the path and returns its contents.
Keyword arguments:
values -- The list to append the contents to (default []) | [
"Reads",
"the",
"MS",
"Build",
"JSON",
"file",
"at",
"the",
"path",
"and",
"returns",
"its",
"contents",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L79-L97 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | main | def main():
"""Script entrypoint."""
# Parse the arguments
parser = argparse.ArgumentParser(
description='Convert MSBuild XML to JSON format')
parser.add_argument(
'-t', '--toolchain', help='The name of the toolchain', required=True)
parser.add_argument(
'-o', '--output', help='The output directory', default='')
parser.add_argument(
'-r',
'--overwrite',
help='Whether previously output should be overwritten',
dest='overwrite',
action='store_true')
parser.set_defaults(overwrite=False)
parser.add_argument(
'-d',
'--debug',
help="Debug tool output",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING)
parser.add_argument(
'-v',
'--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO)
parser.add_argument('input', help='The input files', nargs='+')
args = parser.parse_args()
toolchain = args.toolchain
logging.basicConfig(level=args.loglevel)
logging.info('Creating %s toolchain files', toolchain)
values = {}
# Iterate through the inputs
for input in args.input:
input = __get_path(input)
read_msbuild_xml(input, values)
# Determine if the output directory needs to be created
output_dir = __get_path(args.output)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
logging.info('Created output directory %s', output_dir)
for key, value in values.items():
output_path = __output_path(toolchain, key, output_dir)
if os.path.exists(output_path) and not args.overwrite:
logging.info('Comparing previous output to current')
__merge_json_values(value, read_msbuild_json(output_path))
else:
logging.info('Original output will be overwritten')
logging.info('Writing MS Build JSON file at %s', output_path)
__write_json_file(output_path, value) | python | def main():
"""Script entrypoint."""
# Parse the arguments
parser = argparse.ArgumentParser(
description='Convert MSBuild XML to JSON format')
parser.add_argument(
'-t', '--toolchain', help='The name of the toolchain', required=True)
parser.add_argument(
'-o', '--output', help='The output directory', default='')
parser.add_argument(
'-r',
'--overwrite',
help='Whether previously output should be overwritten',
dest='overwrite',
action='store_true')
parser.set_defaults(overwrite=False)
parser.add_argument(
'-d',
'--debug',
help="Debug tool output",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING)
parser.add_argument(
'-v',
'--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO)
parser.add_argument('input', help='The input files', nargs='+')
args = parser.parse_args()
toolchain = args.toolchain
logging.basicConfig(level=args.loglevel)
logging.info('Creating %s toolchain files', toolchain)
values = {}
# Iterate through the inputs
for input in args.input:
input = __get_path(input)
read_msbuild_xml(input, values)
# Determine if the output directory needs to be created
output_dir = __get_path(args.output)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
logging.info('Created output directory %s', output_dir)
for key, value in values.items():
output_path = __output_path(toolchain, key, output_dir)
if os.path.exists(output_path) and not args.overwrite:
logging.info('Comparing previous output to current')
__merge_json_values(value, read_msbuild_json(output_path))
else:
logging.info('Original output will be overwritten')
logging.info('Writing MS Build JSON file at %s', output_path)
__write_json_file(output_path, value) | [
"def",
"main",
"(",
")",
":",
"# Parse the arguments",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Convert MSBuild XML to JSON format'",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--toolchain'",
",",
"help",
"=",
"'The name of the toolchain'",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output'",
",",
"help",
"=",
"'The output directory'",
",",
"default",
"=",
"''",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--overwrite'",
",",
"help",
"=",
"'Whether previously output should be overwritten'",
",",
"dest",
"=",
"'overwrite'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"set_defaults",
"(",
"overwrite",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--debug'",
",",
"help",
"=",
"\"Debug tool output\"",
",",
"action",
"=",
"\"store_const\"",
",",
"dest",
"=",
"\"loglevel\"",
",",
"const",
"=",
"logging",
".",
"DEBUG",
",",
"default",
"=",
"logging",
".",
"WARNING",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"help",
"=",
"\"Verbose output\"",
",",
"action",
"=",
"\"store_const\"",
",",
"dest",
"=",
"\"loglevel\"",
",",
"const",
"=",
"logging",
".",
"INFO",
")",
"parser",
".",
"add_argument",
"(",
"'input'",
",",
"help",
"=",
"'The input files'",
",",
"nargs",
"=",
"'+'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"toolchain",
"=",
"args",
".",
"toolchain",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"args",
".",
"loglevel",
")",
"logging",
".",
"info",
"(",
"'Creating %s toolchain files'",
",",
"toolchain",
")",
"values",
"=",
"{",
"}",
"# Iterate through the inputs",
"for",
"input",
"in",
"args",
".",
"input",
":",
"input",
"=",
"__get_path",
"(",
"input",
")",
"read_msbuild_xml",
"(",
"input",
",",
"values",
")",
"# Determine if the output directory needs to be created",
"output_dir",
"=",
"__get_path",
"(",
"args",
".",
"output",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"output_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"output_dir",
")",
"logging",
".",
"info",
"(",
"'Created output directory %s'",
",",
"output_dir",
")",
"for",
"key",
",",
"value",
"in",
"values",
".",
"items",
"(",
")",
":",
"output_path",
"=",
"__output_path",
"(",
"toolchain",
",",
"key",
",",
"output_dir",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"output_path",
")",
"and",
"not",
"args",
".",
"overwrite",
":",
"logging",
".",
"info",
"(",
"'Comparing previous output to current'",
")",
"__merge_json_values",
"(",
"value",
",",
"read_msbuild_json",
"(",
"output_path",
")",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'Original output will be overwritten'",
")",
"logging",
".",
"info",
"(",
"'Writing MS Build JSON file at %s'",
",",
"output_path",
")",
"__write_json_file",
"(",
"output_path",
",",
"value",
")"
] | Script entrypoint. | [
"Script",
"entrypoint",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L100-L168 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __merge_json_values | def __merge_json_values(current, previous):
"""Merges the values between the current and previous run of the script."""
for value in current:
name = value['name']
# Find the previous value
previous_value = __find_and_remove_value(previous, value)
if previous_value is not None:
flags = value['flags']
previous_flags = previous_value['flags']
if flags != previous_flags:
logging.warning(
'Flags for %s are different. Using previous value.', name)
value['flags'] = previous_flags
else:
logging.warning('Value %s is a new value', name)
for value in previous:
name = value['name']
logging.warning(
'Value %s not present in current run. Appending value.', name)
current.append(value) | python | def __merge_json_values(current, previous):
"""Merges the values between the current and previous run of the script."""
for value in current:
name = value['name']
# Find the previous value
previous_value = __find_and_remove_value(previous, value)
if previous_value is not None:
flags = value['flags']
previous_flags = previous_value['flags']
if flags != previous_flags:
logging.warning(
'Flags for %s are different. Using previous value.', name)
value['flags'] = previous_flags
else:
logging.warning('Value %s is a new value', name)
for value in previous:
name = value['name']
logging.warning(
'Value %s not present in current run. Appending value.', name)
current.append(value) | [
"def",
"__merge_json_values",
"(",
"current",
",",
"previous",
")",
":",
"for",
"value",
"in",
"current",
":",
"name",
"=",
"value",
"[",
"'name'",
"]",
"# Find the previous value",
"previous_value",
"=",
"__find_and_remove_value",
"(",
"previous",
",",
"value",
")",
"if",
"previous_value",
"is",
"not",
"None",
":",
"flags",
"=",
"value",
"[",
"'flags'",
"]",
"previous_flags",
"=",
"previous_value",
"[",
"'flags'",
"]",
"if",
"flags",
"!=",
"previous_flags",
":",
"logging",
".",
"warning",
"(",
"'Flags for %s are different. Using previous value.'",
",",
"name",
")",
"value",
"[",
"'flags'",
"]",
"=",
"previous_flags",
"else",
":",
"logging",
".",
"warning",
"(",
"'Value %s is a new value'",
",",
"name",
")",
"for",
"value",
"in",
"previous",
":",
"name",
"=",
"value",
"[",
"'name'",
"]",
"logging",
".",
"warning",
"(",
"'Value %s not present in current run. Appending value.'",
",",
"name",
")",
"current",
".",
"append",
"(",
"value",
")"
] | Merges the values between the current and previous run of the script. | [
"Merges",
"the",
"values",
"between",
"the",
"current",
"and",
"previous",
"run",
"of",
"the",
"script",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L173-L198 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __find_and_remove_value | def __find_and_remove_value(list, compare):
"""Finds the value in the list that corresponds with the value of compare."""
# next throws if there are no matches
try:
found = next(value for value in list
if value['name'] == compare['name'] and value['switch'] ==
compare['switch'])
except:
return None
list.remove(found)
return found | python | def __find_and_remove_value(list, compare):
"""Finds the value in the list that corresponds with the value of compare."""
# next throws if there are no matches
try:
found = next(value for value in list
if value['name'] == compare['name'] and value['switch'] ==
compare['switch'])
except:
return None
list.remove(found)
return found | [
"def",
"__find_and_remove_value",
"(",
"list",
",",
"compare",
")",
":",
"# next throws if there are no matches",
"try",
":",
"found",
"=",
"next",
"(",
"value",
"for",
"value",
"in",
"list",
"if",
"value",
"[",
"'name'",
"]",
"==",
"compare",
"[",
"'name'",
"]",
"and",
"value",
"[",
"'switch'",
"]",
"==",
"compare",
"[",
"'switch'",
"]",
")",
"except",
":",
"return",
"None",
"list",
".",
"remove",
"(",
"found",
")",
"return",
"found"
] | Finds the value in the list that corresponds with the value of compare. | [
"Finds",
"the",
"value",
"in",
"the",
"list",
"that",
"corresponds",
"with",
"the",
"value",
"of",
"compare",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L201-L213 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __convert | def __convert(root, tag, values, func):
"""Converts the tag type found in the root and converts them using the func
and appends them to the values.
"""
elements = root.getElementsByTagName(tag)
for element in elements:
converted = func(element)
# Append to the list
__append_list(values, converted) | python | def __convert(root, tag, values, func):
"""Converts the tag type found in the root and converts them using the func
and appends them to the values.
"""
elements = root.getElementsByTagName(tag)
for element in elements:
converted = func(element)
# Append to the list
__append_list(values, converted) | [
"def",
"__convert",
"(",
"root",
",",
"tag",
",",
"values",
",",
"func",
")",
":",
"elements",
"=",
"root",
".",
"getElementsByTagName",
"(",
"tag",
")",
"for",
"element",
"in",
"elements",
":",
"converted",
"=",
"func",
"(",
"element",
")",
"# Append to the list",
"__append_list",
"(",
"values",
",",
"converted",
")"
] | Converts the tag type found in the root and converts them using the func
and appends them to the values. | [
"Converts",
"the",
"tag",
"type",
"found",
"in",
"the",
"root",
"and",
"converts",
"them",
"using",
"the",
"func",
"and",
"appends",
"them",
"to",
"the",
"values",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L218-L228 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __convert_enum | def __convert_enum(node):
"""Converts an EnumProperty node to JSON format."""
name = __get_attribute(node, 'Name')
logging.debug('Found EnumProperty named %s', name)
converted_values = []
for value in node.getElementsByTagName('EnumValue'):
converted = __convert_node(value)
converted['value'] = converted['name']
converted['name'] = name
# Modify flags when there is an argument child
__with_argument(value, converted)
converted_values.append(converted)
return converted_values | python | def __convert_enum(node):
"""Converts an EnumProperty node to JSON format."""
name = __get_attribute(node, 'Name')
logging.debug('Found EnumProperty named %s', name)
converted_values = []
for value in node.getElementsByTagName('EnumValue'):
converted = __convert_node(value)
converted['value'] = converted['name']
converted['name'] = name
# Modify flags when there is an argument child
__with_argument(value, converted)
converted_values.append(converted)
return converted_values | [
"def",
"__convert_enum",
"(",
"node",
")",
":",
"name",
"=",
"__get_attribute",
"(",
"node",
",",
"'Name'",
")",
"logging",
".",
"debug",
"(",
"'Found EnumProperty named %s'",
",",
"name",
")",
"converted_values",
"=",
"[",
"]",
"for",
"value",
"in",
"node",
".",
"getElementsByTagName",
"(",
"'EnumValue'",
")",
":",
"converted",
"=",
"__convert_node",
"(",
"value",
")",
"converted",
"[",
"'value'",
"]",
"=",
"converted",
"[",
"'name'",
"]",
"converted",
"[",
"'name'",
"]",
"=",
"name",
"# Modify flags when there is an argument child",
"__with_argument",
"(",
"value",
",",
"converted",
")",
"converted_values",
".",
"append",
"(",
"converted",
")",
"return",
"converted_values"
] | Converts an EnumProperty node to JSON format. | [
"Converts",
"an",
"EnumProperty",
"node",
"to",
"JSON",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L231-L249 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __convert_bool | def __convert_bool(node):
"""Converts an BoolProperty node to JSON format."""
converted = __convert_node(node, default_value='true')
# Check for a switch for reversing the value
reverse_switch = __get_attribute(node, 'ReverseSwitch')
if reverse_switch:
converted_reverse = copy.deepcopy(converted)
converted_reverse['switch'] = reverse_switch
converted_reverse['value'] = 'false'
return [converted_reverse, converted]
# Modify flags when there is an argument child
__with_argument(node, converted)
return __check_for_flag(converted) | python | def __convert_bool(node):
"""Converts an BoolProperty node to JSON format."""
converted = __convert_node(node, default_value='true')
# Check for a switch for reversing the value
reverse_switch = __get_attribute(node, 'ReverseSwitch')
if reverse_switch:
converted_reverse = copy.deepcopy(converted)
converted_reverse['switch'] = reverse_switch
converted_reverse['value'] = 'false'
return [converted_reverse, converted]
# Modify flags when there is an argument child
__with_argument(node, converted)
return __check_for_flag(converted) | [
"def",
"__convert_bool",
"(",
"node",
")",
":",
"converted",
"=",
"__convert_node",
"(",
"node",
",",
"default_value",
"=",
"'true'",
")",
"# Check for a switch for reversing the value",
"reverse_switch",
"=",
"__get_attribute",
"(",
"node",
",",
"'ReverseSwitch'",
")",
"if",
"reverse_switch",
":",
"converted_reverse",
"=",
"copy",
".",
"deepcopy",
"(",
"converted",
")",
"converted_reverse",
"[",
"'switch'",
"]",
"=",
"reverse_switch",
"converted_reverse",
"[",
"'value'",
"]",
"=",
"'false'",
"return",
"[",
"converted_reverse",
",",
"converted",
"]",
"# Modify flags when there is an argument child",
"__with_argument",
"(",
"node",
",",
"converted",
")",
"return",
"__check_for_flag",
"(",
"converted",
")"
] | Converts an BoolProperty node to JSON format. | [
"Converts",
"an",
"BoolProperty",
"node",
"to",
"JSON",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L252-L270 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __convert_string_list | def __convert_string_list(node):
"""Converts a StringListProperty node to JSON format."""
converted = __convert_node(node)
# Determine flags for the string list
flags = vsflags(VSFlags.UserValue)
# Check for a separator to determine if it is semicolon appendable
# If not present assume the value should be ;
separator = __get_attribute(node, 'Separator', default_value=';')
if separator == ';':
flags = vsflags(flags, VSFlags.SemicolonAppendable)
converted['flags'] = flags
return __check_for_flag(converted) | python | def __convert_string_list(node):
"""Converts a StringListProperty node to JSON format."""
converted = __convert_node(node)
# Determine flags for the string list
flags = vsflags(VSFlags.UserValue)
# Check for a separator to determine if it is semicolon appendable
# If not present assume the value should be ;
separator = __get_attribute(node, 'Separator', default_value=';')
if separator == ';':
flags = vsflags(flags, VSFlags.SemicolonAppendable)
converted['flags'] = flags
return __check_for_flag(converted) | [
"def",
"__convert_string_list",
"(",
"node",
")",
":",
"converted",
"=",
"__convert_node",
"(",
"node",
")",
"# Determine flags for the string list",
"flags",
"=",
"vsflags",
"(",
"VSFlags",
".",
"UserValue",
")",
"# Check for a separator to determine if it is semicolon appendable",
"# If not present assume the value should be ;",
"separator",
"=",
"__get_attribute",
"(",
"node",
",",
"'Separator'",
",",
"default_value",
"=",
"';'",
")",
"if",
"separator",
"==",
"';'",
":",
"flags",
"=",
"vsflags",
"(",
"flags",
",",
"VSFlags",
".",
"SemicolonAppendable",
")",
"converted",
"[",
"'flags'",
"]",
"=",
"flags",
"return",
"__check_for_flag",
"(",
"converted",
")"
] | Converts a StringListProperty node to JSON format. | [
"Converts",
"a",
"StringListProperty",
"node",
"to",
"JSON",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L273-L289 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __convert_string | def __convert_string(node):
"""Converts a StringProperty node to JSON format."""
converted = __convert_node(node, default_flags=vsflags(VSFlags.UserValue))
return __check_for_flag(converted) | python | def __convert_string(node):
"""Converts a StringProperty node to JSON format."""
converted = __convert_node(node, default_flags=vsflags(VSFlags.UserValue))
return __check_for_flag(converted) | [
"def",
"__convert_string",
"(",
"node",
")",
":",
"converted",
"=",
"__convert_node",
"(",
"node",
",",
"default_flags",
"=",
"vsflags",
"(",
"VSFlags",
".",
"UserValue",
")",
")",
"return",
"__check_for_flag",
"(",
"converted",
")"
] | Converts a StringProperty node to JSON format. | [
"Converts",
"a",
"StringProperty",
"node",
"to",
"JSON",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L292-L296 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __convert_node | def __convert_node(node, default_value='', default_flags=vsflags()):
"""Converts a XML node to a JSON equivalent."""
name = __get_attribute(node, 'Name')
logging.debug('Found %s named %s', node.tagName, name)
converted = {}
converted['name'] = name
converted['switch'] = __get_attribute(node, 'Switch')
converted['comment'] = __get_attribute(node, 'DisplayName')
converted['value'] = default_value
# Check for the Flags attribute in case it was created during preprocessing
flags = __get_attribute(node, 'Flags')
if flags:
flags = flags.split(',')
else:
flags = default_flags
converted['flags'] = flags
return converted | python | def __convert_node(node, default_value='', default_flags=vsflags()):
"""Converts a XML node to a JSON equivalent."""
name = __get_attribute(node, 'Name')
logging.debug('Found %s named %s', node.tagName, name)
converted = {}
converted['name'] = name
converted['switch'] = __get_attribute(node, 'Switch')
converted['comment'] = __get_attribute(node, 'DisplayName')
converted['value'] = default_value
# Check for the Flags attribute in case it was created during preprocessing
flags = __get_attribute(node, 'Flags')
if flags:
flags = flags.split(',')
else:
flags = default_flags
converted['flags'] = flags
return converted | [
"def",
"__convert_node",
"(",
"node",
",",
"default_value",
"=",
"''",
",",
"default_flags",
"=",
"vsflags",
"(",
")",
")",
":",
"name",
"=",
"__get_attribute",
"(",
"node",
",",
"'Name'",
")",
"logging",
".",
"debug",
"(",
"'Found %s named %s'",
",",
"node",
".",
"tagName",
",",
"name",
")",
"converted",
"=",
"{",
"}",
"converted",
"[",
"'name'",
"]",
"=",
"name",
"converted",
"[",
"'switch'",
"]",
"=",
"__get_attribute",
"(",
"node",
",",
"'Switch'",
")",
"converted",
"[",
"'comment'",
"]",
"=",
"__get_attribute",
"(",
"node",
",",
"'DisplayName'",
")",
"converted",
"[",
"'value'",
"]",
"=",
"default_value",
"# Check for the Flags attribute in case it was created during preprocessing",
"flags",
"=",
"__get_attribute",
"(",
"node",
",",
"'Flags'",
")",
"if",
"flags",
":",
"flags",
"=",
"flags",
".",
"split",
"(",
"','",
")",
"else",
":",
"flags",
"=",
"default_flags",
"converted",
"[",
"'flags'",
"]",
"=",
"flags",
"return",
"converted"
] | Converts a XML node to a JSON equivalent. | [
"Converts",
"a",
"XML",
"node",
"to",
"a",
"JSON",
"equivalent",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L299-L320 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __with_argument | def __with_argument(node, value):
"""Modifies the flags in value if the node contains an Argument."""
arguments = node.getElementsByTagName('Argument')
if arguments:
logging.debug('Found argument within %s', value['name'])
value['flags'] = vsflags(VSFlags.UserValueIgnored, VSFlags.Continue) | python | def __with_argument(node, value):
"""Modifies the flags in value if the node contains an Argument."""
arguments = node.getElementsByTagName('Argument')
if arguments:
logging.debug('Found argument within %s', value['name'])
value['flags'] = vsflags(VSFlags.UserValueIgnored, VSFlags.Continue) | [
"def",
"__with_argument",
"(",
"node",
",",
"value",
")",
":",
"arguments",
"=",
"node",
".",
"getElementsByTagName",
"(",
"'Argument'",
")",
"if",
"arguments",
":",
"logging",
".",
"debug",
"(",
"'Found argument within %s'",
",",
"value",
"[",
"'name'",
"]",
")",
"value",
"[",
"'flags'",
"]",
"=",
"vsflags",
"(",
"VSFlags",
".",
"UserValueIgnored",
",",
"VSFlags",
".",
"Continue",
")"
] | Modifies the flags in value if the node contains an Argument. | [
"Modifies",
"the",
"flags",
"in",
"value",
"if",
"the",
"node",
"contains",
"an",
"Argument",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L336-L342 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __preprocess_arguments | def __preprocess_arguments(root):
"""Preprocesses occurrences of Argument within the root.
Argument XML values reference other values within the document by name. The
referenced value does not contain a switch. This function will add the
switch associated with the argument.
"""
# Set the flags to require a value
flags = ','.join(vsflags(VSFlags.UserValueRequired))
# Search through the arguments
arguments = root.getElementsByTagName('Argument')
for argument in arguments:
reference = __get_attribute(argument, 'Property')
found = None
# Look for the argument within the root's children
for child in root.childNodes:
# Ignore Text nodes
if isinstance(child, Element):
name = __get_attribute(child, 'Name')
if name == reference:
found = child
break
if found is not None:
logging.info('Found property named %s', reference)
# Get the associated switch
switch = __get_attribute(argument.parentNode, 'Switch')
# See if there is already a switch associated with the element.
if __get_attribute(found, 'Switch'):
logging.debug('Copying node %s', reference)
clone = found.cloneNode(True)
root.insertBefore(clone, found)
found = clone
found.setAttribute('Switch', switch)
found.setAttribute('Flags', flags)
else:
logging.warning('Could not find property named %s', reference) | python | def __preprocess_arguments(root):
"""Preprocesses occurrences of Argument within the root.
Argument XML values reference other values within the document by name. The
referenced value does not contain a switch. This function will add the
switch associated with the argument.
"""
# Set the flags to require a value
flags = ','.join(vsflags(VSFlags.UserValueRequired))
# Search through the arguments
arguments = root.getElementsByTagName('Argument')
for argument in arguments:
reference = __get_attribute(argument, 'Property')
found = None
# Look for the argument within the root's children
for child in root.childNodes:
# Ignore Text nodes
if isinstance(child, Element):
name = __get_attribute(child, 'Name')
if name == reference:
found = child
break
if found is not None:
logging.info('Found property named %s', reference)
# Get the associated switch
switch = __get_attribute(argument.parentNode, 'Switch')
# See if there is already a switch associated with the element.
if __get_attribute(found, 'Switch'):
logging.debug('Copying node %s', reference)
clone = found.cloneNode(True)
root.insertBefore(clone, found)
found = clone
found.setAttribute('Switch', switch)
found.setAttribute('Flags', flags)
else:
logging.warning('Could not find property named %s', reference) | [
"def",
"__preprocess_arguments",
"(",
"root",
")",
":",
"# Set the flags to require a value",
"flags",
"=",
"','",
".",
"join",
"(",
"vsflags",
"(",
"VSFlags",
".",
"UserValueRequired",
")",
")",
"# Search through the arguments",
"arguments",
"=",
"root",
".",
"getElementsByTagName",
"(",
"'Argument'",
")",
"for",
"argument",
"in",
"arguments",
":",
"reference",
"=",
"__get_attribute",
"(",
"argument",
",",
"'Property'",
")",
"found",
"=",
"None",
"# Look for the argument within the root's children",
"for",
"child",
"in",
"root",
".",
"childNodes",
":",
"# Ignore Text nodes",
"if",
"isinstance",
"(",
"child",
",",
"Element",
")",
":",
"name",
"=",
"__get_attribute",
"(",
"child",
",",
"'Name'",
")",
"if",
"name",
"==",
"reference",
":",
"found",
"=",
"child",
"break",
"if",
"found",
"is",
"not",
"None",
":",
"logging",
".",
"info",
"(",
"'Found property named %s'",
",",
"reference",
")",
"# Get the associated switch",
"switch",
"=",
"__get_attribute",
"(",
"argument",
".",
"parentNode",
",",
"'Switch'",
")",
"# See if there is already a switch associated with the element.",
"if",
"__get_attribute",
"(",
"found",
",",
"'Switch'",
")",
":",
"logging",
".",
"debug",
"(",
"'Copying node %s'",
",",
"reference",
")",
"clone",
"=",
"found",
".",
"cloneNode",
"(",
"True",
")",
"root",
".",
"insertBefore",
"(",
"clone",
",",
"found",
")",
"found",
"=",
"clone",
"found",
".",
"setAttribute",
"(",
"'Switch'",
",",
"switch",
")",
"found",
".",
"setAttribute",
"(",
"'Flags'",
",",
"flags",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"'Could not find property named %s'",
",",
"reference",
")"
] | Preprocesses occurrences of Argument within the root.
Argument XML values reference other values within the document by name. The
referenced value does not contain a switch. This function will add the
switch associated with the argument. | [
"Preprocesses",
"occurrences",
"of",
"Argument",
"within",
"the",
"root",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L345-L387 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __get_attribute | def __get_attribute(node, name, default_value=''):
"""Retrieves the attribute of the given name from the node.
If not present then the default_value is used.
"""
if node.hasAttribute(name):
return node.attributes[name].value.strip()
else:
return default_value | python | def __get_attribute(node, name, default_value=''):
"""Retrieves the attribute of the given name from the node.
If not present then the default_value is used.
"""
if node.hasAttribute(name):
return node.attributes[name].value.strip()
else:
return default_value | [
"def",
"__get_attribute",
"(",
"node",
",",
"name",
",",
"default_value",
"=",
"''",
")",
":",
"if",
"node",
".",
"hasAttribute",
"(",
"name",
")",
":",
"return",
"node",
".",
"attributes",
"[",
"name",
"]",
".",
"value",
".",
"strip",
"(",
")",
"else",
":",
"return",
"default_value"
] | Retrieves the attribute of the given name from the node.
If not present then the default_value is used. | [
"Retrieves",
"the",
"attribute",
"of",
"the",
"given",
"name",
"from",
"the",
"node",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L390-L398 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __get_path | def __get_path(path):
"""Gets the path to the file."""
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
return os.path.normpath(path) | python | def __get_path(path):
"""Gets the path to the file."""
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
return os.path.normpath(path) | [
"def",
"__get_path",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"path",
")",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"path",
")"
] | Gets the path to the file. | [
"Gets",
"the",
"path",
"to",
"the",
"file",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L403-L408 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __output_path | def __output_path(toolchain, rule, output_dir):
"""Gets the output path for a file given the toolchain, rule and output_dir"""
filename = '%s_%s.json' % (toolchain, rule)
return os.path.join(output_dir, filename) | python | def __output_path(toolchain, rule, output_dir):
"""Gets the output path for a file given the toolchain, rule and output_dir"""
filename = '%s_%s.json' % (toolchain, rule)
return os.path.join(output_dir, filename) | [
"def",
"__output_path",
"(",
"toolchain",
",",
"rule",
",",
"output_dir",
")",
":",
"filename",
"=",
"'%s_%s.json'",
"%",
"(",
"toolchain",
",",
"rule",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"filename",
")"
] | Gets the output path for a file given the toolchain, rule and output_dir | [
"Gets",
"the",
"output",
"path",
"for",
"a",
"file",
"given",
"the",
"toolchain",
"rule",
"and",
"output_dir"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L411-L414 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __write_json_file | def __write_json_file(path, values):
"""Writes a JSON file at the path with the values provided."""
# Sort the keys to ensure ordering
sort_order = ['name', 'switch', 'comment', 'value', 'flags']
sorted_values = [
OrderedDict(
sorted(
value.items(), key=lambda value: sort_order.index(value[0])))
for value in values
]
with open(path, 'w') as f:
json.dump(sorted_values, f, indent=2, separators=(',', ': ')) | python | def __write_json_file(path, values):
"""Writes a JSON file at the path with the values provided."""
# Sort the keys to ensure ordering
sort_order = ['name', 'switch', 'comment', 'value', 'flags']
sorted_values = [
OrderedDict(
sorted(
value.items(), key=lambda value: sort_order.index(value[0])))
for value in values
]
with open(path, 'w') as f:
json.dump(sorted_values, f, indent=2, separators=(',', ': ')) | [
"def",
"__write_json_file",
"(",
"path",
",",
"values",
")",
":",
"# Sort the keys to ensure ordering",
"sort_order",
"=",
"[",
"'name'",
",",
"'switch'",
",",
"'comment'",
",",
"'value'",
",",
"'flags'",
"]",
"sorted_values",
"=",
"[",
"OrderedDict",
"(",
"sorted",
"(",
"value",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"value",
":",
"sort_order",
".",
"index",
"(",
"value",
"[",
"0",
"]",
")",
")",
")",
"for",
"value",
"in",
"values",
"]",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"sorted_values",
",",
"f",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")"
] | Writes a JSON file at the path with the values provided. | [
"Writes",
"a",
"JSON",
"file",
"at",
"the",
"path",
"with",
"the",
"values",
"provided",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L425-L437 | train |
apple/turicreate | deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py | __append_list | def __append_list(append_to, value):
"""Appends the value to the list."""
if value is not None:
if isinstance(value, list):
append_to.extend(value)
else:
append_to.append(value) | python | def __append_list(append_to, value):
"""Appends the value to the list."""
if value is not None:
if isinstance(value, list):
append_to.extend(value)
else:
append_to.append(value) | [
"def",
"__append_list",
"(",
"append_to",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"append_to",
".",
"extend",
"(",
"value",
")",
"else",
":",
"append_to",
".",
"append",
"(",
"value",
")"
] | Appends the value to the list. | [
"Appends",
"the",
"value",
"to",
"the",
"list",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L442-L448 | train |
apple/turicreate | src/unity/python/turicreate/meta/decompiler/__init__.py | decompile_func | def decompile_func(func):
'''
Decompile a function into ast.FunctionDef node.
:param func: python function (can not be a built-in)
:return: ast.FunctionDef instance.
'''
code = func.__code__
# For python 3
# defaults = func.func_defaults if sys.version_info.major < 3 else func.__defaults__
# if defaults:
# default_names = code.co_varnames[:code.co_argcount][-len(defaults):]
# else:
# default_names = []
# defaults = [_ast.Name(id='%s_default' % name, ctx=_ast.Load() , lineno=0, col_offset=0) for name in default_names]
ast_node = make_function(code, defaults=[], lineno=code.co_firstlineno)
return ast_node | python | def decompile_func(func):
'''
Decompile a function into ast.FunctionDef node.
:param func: python function (can not be a built-in)
:return: ast.FunctionDef instance.
'''
code = func.__code__
# For python 3
# defaults = func.func_defaults if sys.version_info.major < 3 else func.__defaults__
# if defaults:
# default_names = code.co_varnames[:code.co_argcount][-len(defaults):]
# else:
# default_names = []
# defaults = [_ast.Name(id='%s_default' % name, ctx=_ast.Load() , lineno=0, col_offset=0) for name in default_names]
ast_node = make_function(code, defaults=[], lineno=code.co_firstlineno)
return ast_node | [
"def",
"decompile_func",
"(",
"func",
")",
":",
"code",
"=",
"func",
".",
"__code__",
"# For python 3",
"# defaults = func.func_defaults if sys.version_info.major < 3 else func.__defaults__",
"# if defaults:",
"# default_names = code.co_varnames[:code.co_argcount][-len(defaults):]",
"# else:",
"# default_names = []",
"# defaults = [_ast.Name(id='%s_default' % name, ctx=_ast.Load() , lineno=0, col_offset=0) for name in default_names]",
"ast_node",
"=",
"make_function",
"(",
"code",
",",
"defaults",
"=",
"[",
"]",
",",
"lineno",
"=",
"code",
".",
"co_firstlineno",
")",
"return",
"ast_node"
] | Decompile a function into ast.FunctionDef node.
:param func: python function (can not be a built-in)
:return: ast.FunctionDef instance. | [
"Decompile",
"a",
"function",
"into",
"ast",
".",
"FunctionDef",
"node",
".",
":",
"param",
"func",
":",
"python",
"function",
"(",
"can",
"not",
"be",
"a",
"built",
"-",
"in",
")",
":",
"return",
":",
"ast",
".",
"FunctionDef",
"instance",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/__init__.py#L25-L44 | train |
apple/turicreate | src/unity/python/turicreate/meta/decompiler/__init__.py | compile_func | def compile_func(ast_node, filename, globals, **defaults):
'''
Compile a function from an ast.FunctionDef instance.
:param ast_node: ast.FunctionDef instance
:param filename: path where function source can be found.
:param globals: will be used as func_globals
:return: A python function object
'''
function_name = ast_node.name
module = _ast.Module(body=[ast_node])
ctx = {'%s_default' % key : arg for key, arg in defaults.items()}
code = compile(module, filename, 'exec')
eval(code, globals, ctx)
function = ctx[function_name]
return function | python | def compile_func(ast_node, filename, globals, **defaults):
'''
Compile a function from an ast.FunctionDef instance.
:param ast_node: ast.FunctionDef instance
:param filename: path where function source can be found.
:param globals: will be used as func_globals
:return: A python function object
'''
function_name = ast_node.name
module = _ast.Module(body=[ast_node])
ctx = {'%s_default' % key : arg for key, arg in defaults.items()}
code = compile(module, filename, 'exec')
eval(code, globals, ctx)
function = ctx[function_name]
return function | [
"def",
"compile_func",
"(",
"ast_node",
",",
"filename",
",",
"globals",
",",
"*",
"*",
"defaults",
")",
":",
"function_name",
"=",
"ast_node",
".",
"name",
"module",
"=",
"_ast",
".",
"Module",
"(",
"body",
"=",
"[",
"ast_node",
"]",
")",
"ctx",
"=",
"{",
"'%s_default'",
"%",
"key",
":",
"arg",
"for",
"key",
",",
"arg",
"in",
"defaults",
".",
"items",
"(",
")",
"}",
"code",
"=",
"compile",
"(",
"module",
",",
"filename",
",",
"'exec'",
")",
"eval",
"(",
"code",
",",
"globals",
",",
"ctx",
")",
"function",
"=",
"ctx",
"[",
"function_name",
"]",
"return",
"function"
] | Compile a function from an ast.FunctionDef instance.
:param ast_node: ast.FunctionDef instance
:param filename: path where function source can be found.
:param globals: will be used as func_globals
:return: A python function object | [
"Compile",
"a",
"function",
"from",
"an",
"ast",
".",
"FunctionDef",
"instance",
".",
":",
"param",
"ast_node",
":",
"ast",
".",
"FunctionDef",
"instance",
":",
"param",
"filename",
":",
"path",
"where",
"function",
"source",
"can",
"be",
"found",
".",
":",
"param",
"globals",
":",
"will",
"be",
"used",
"as",
"func_globals",
":",
"return",
":",
"A",
"python",
"function",
"object"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/__init__.py#L46-L68 | train |
apple/turicreate | src/unity/python/turicreate/meta/decompiler/__init__.py | decompile_pyc | def decompile_pyc(bin_pyc, output=sys.stdout):
'''
decompile apython pyc or pyo binary file.
:param bin_pyc: input file objects
:param output: output file objects
'''
from turicreate.meta.asttools import python_source
bin = bin_pyc.read()
code = marshal.loads(bin[8:])
mod_ast = make_module(code)
python_source(mod_ast, file=output) | python | def decompile_pyc(bin_pyc, output=sys.stdout):
'''
decompile apython pyc or pyo binary file.
:param bin_pyc: input file objects
:param output: output file objects
'''
from turicreate.meta.asttools import python_source
bin = bin_pyc.read()
code = marshal.loads(bin[8:])
mod_ast = make_module(code)
python_source(mod_ast, file=output) | [
"def",
"decompile_pyc",
"(",
"bin_pyc",
",",
"output",
"=",
"sys",
".",
"stdout",
")",
":",
"from",
"turicreate",
".",
"meta",
".",
"asttools",
"import",
"python_source",
"bin",
"=",
"bin_pyc",
".",
"read",
"(",
")",
"code",
"=",
"marshal",
".",
"loads",
"(",
"bin",
"[",
"8",
":",
"]",
")",
"mod_ast",
"=",
"make_module",
"(",
"code",
")",
"python_source",
"(",
"mod_ast",
",",
"file",
"=",
"output",
")"
] | decompile apython pyc or pyo binary file.
:param bin_pyc: input file objects
:param output: output file objects | [
"decompile",
"apython",
"pyc",
"or",
"pyo",
"binary",
"file",
".",
":",
"param",
"bin_pyc",
":",
"input",
"file",
"objects",
":",
"param",
"output",
":",
"output",
"file",
"objects"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/__init__.py#L89-L105 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py | MacroCollection.ParseInput | def ParseInput(self, a_file):
"""Consumes input extracting definitions.
Args:
a_file: The file like stream to parse.
Raises:
PDDMError if there are any issues.
"""
input_lines = a_file.read().splitlines()
self.ParseLines(input_lines) | python | def ParseInput(self, a_file):
"""Consumes input extracting definitions.
Args:
a_file: The file like stream to parse.
Raises:
PDDMError if there are any issues.
"""
input_lines = a_file.read().splitlines()
self.ParseLines(input_lines) | [
"def",
"ParseInput",
"(",
"self",
",",
"a_file",
")",
":",
"input_lines",
"=",
"a_file",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"self",
".",
"ParseLines",
"(",
"input_lines",
")"
] | Consumes input extracting definitions.
Args:
a_file: The file like stream to parse.
Raises:
PDDMError if there are any issues. | [
"Consumes",
"input",
"extracting",
"definitions",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py#L182-L192 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py | MacroCollection.ParseLines | def ParseLines(self, input_lines):
"""Parses list of lines.
Args:
input_lines: A list of strings of input to parse (no newlines on the
strings).
Raises:
PDDMError if there are any issues.
"""
current_macro = None
for line in input_lines:
if line.startswith('PDDM-'):
directive = line.split(' ', 1)[0]
if directive == 'PDDM-DEFINE':
name, args = self._ParseDefineLine(line)
if self._macros.get(name):
raise PDDMError('Attempt to redefine macro: "%s"' % line)
current_macro = self.MacroDefinition(name, args)
self._macros[name] = current_macro
continue
if directive == 'PDDM-DEFINE-END':
if not current_macro:
raise PDDMError('Got DEFINE-END directive without an active macro:'
' "%s"' % line)
current_macro = None
continue
raise PDDMError('Hit a line with an unknown directive: "%s"' % line)
if current_macro:
current_macro.AppendLine(line)
continue
# Allow blank lines between macro definitions.
if line.strip() == '':
continue
raise PDDMError('Hit a line that wasn\'t a directive and no open macro'
' definition: "%s"' % line) | python | def ParseLines(self, input_lines):
"""Parses list of lines.
Args:
input_lines: A list of strings of input to parse (no newlines on the
strings).
Raises:
PDDMError if there are any issues.
"""
current_macro = None
for line in input_lines:
if line.startswith('PDDM-'):
directive = line.split(' ', 1)[0]
if directive == 'PDDM-DEFINE':
name, args = self._ParseDefineLine(line)
if self._macros.get(name):
raise PDDMError('Attempt to redefine macro: "%s"' % line)
current_macro = self.MacroDefinition(name, args)
self._macros[name] = current_macro
continue
if directive == 'PDDM-DEFINE-END':
if not current_macro:
raise PDDMError('Got DEFINE-END directive without an active macro:'
' "%s"' % line)
current_macro = None
continue
raise PDDMError('Hit a line with an unknown directive: "%s"' % line)
if current_macro:
current_macro.AppendLine(line)
continue
# Allow blank lines between macro definitions.
if line.strip() == '':
continue
raise PDDMError('Hit a line that wasn\'t a directive and no open macro'
' definition: "%s"' % line) | [
"def",
"ParseLines",
"(",
"self",
",",
"input_lines",
")",
":",
"current_macro",
"=",
"None",
"for",
"line",
"in",
"input_lines",
":",
"if",
"line",
".",
"startswith",
"(",
"'PDDM-'",
")",
":",
"directive",
"=",
"line",
".",
"split",
"(",
"' '",
",",
"1",
")",
"[",
"0",
"]",
"if",
"directive",
"==",
"'PDDM-DEFINE'",
":",
"name",
",",
"args",
"=",
"self",
".",
"_ParseDefineLine",
"(",
"line",
")",
"if",
"self",
".",
"_macros",
".",
"get",
"(",
"name",
")",
":",
"raise",
"PDDMError",
"(",
"'Attempt to redefine macro: \"%s\"'",
"%",
"line",
")",
"current_macro",
"=",
"self",
".",
"MacroDefinition",
"(",
"name",
",",
"args",
")",
"self",
".",
"_macros",
"[",
"name",
"]",
"=",
"current_macro",
"continue",
"if",
"directive",
"==",
"'PDDM-DEFINE-END'",
":",
"if",
"not",
"current_macro",
":",
"raise",
"PDDMError",
"(",
"'Got DEFINE-END directive without an active macro:'",
"' \"%s\"'",
"%",
"line",
")",
"current_macro",
"=",
"None",
"continue",
"raise",
"PDDMError",
"(",
"'Hit a line with an unknown directive: \"%s\"'",
"%",
"line",
")",
"if",
"current_macro",
":",
"current_macro",
".",
"AppendLine",
"(",
"line",
")",
"continue",
"# Allow blank lines between macro definitions.",
"if",
"line",
".",
"strip",
"(",
")",
"==",
"''",
":",
"continue",
"raise",
"PDDMError",
"(",
"'Hit a line that wasn\\'t a directive and no open macro'",
"' definition: \"%s\"'",
"%",
"line",
")"
] | Parses list of lines.
Args:
input_lines: A list of strings of input to parse (no newlines on the
strings).
Raises:
PDDMError if there are any issues. | [
"Parses",
"list",
"of",
"lines",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py#L194-L232 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py | MacroCollection.Expand | def Expand(self, macro_ref_str):
"""Expands the macro reference.
Args:
macro_ref_str: String of a macro reference (i.e. foo(a, b)).
Returns:
The text from the expansion.
Raises:
PDDMError if there are any issues.
"""
match = _MACRO_RE.match(macro_ref_str)
if match is None or match.group(0) != macro_ref_str:
raise PDDMError('Failed to parse macro reference: "%s"' % macro_ref_str)
if match.group('name') not in self._macros:
raise PDDMError('No macro named "%s".' % match.group('name'))
return self._Expand(match, [], macro_ref_str) | python | def Expand(self, macro_ref_str):
"""Expands the macro reference.
Args:
macro_ref_str: String of a macro reference (i.e. foo(a, b)).
Returns:
The text from the expansion.
Raises:
PDDMError if there are any issues.
"""
match = _MACRO_RE.match(macro_ref_str)
if match is None or match.group(0) != macro_ref_str:
raise PDDMError('Failed to parse macro reference: "%s"' % macro_ref_str)
if match.group('name') not in self._macros:
raise PDDMError('No macro named "%s".' % match.group('name'))
return self._Expand(match, [], macro_ref_str) | [
"def",
"Expand",
"(",
"self",
",",
"macro_ref_str",
")",
":",
"match",
"=",
"_MACRO_RE",
".",
"match",
"(",
"macro_ref_str",
")",
"if",
"match",
"is",
"None",
"or",
"match",
".",
"group",
"(",
"0",
")",
"!=",
"macro_ref_str",
":",
"raise",
"PDDMError",
"(",
"'Failed to parse macro reference: \"%s\"'",
"%",
"macro_ref_str",
")",
"if",
"match",
".",
"group",
"(",
"'name'",
")",
"not",
"in",
"self",
".",
"_macros",
":",
"raise",
"PDDMError",
"(",
"'No macro named \"%s\".'",
"%",
"match",
".",
"group",
"(",
"'name'",
")",
")",
"return",
"self",
".",
"_Expand",
"(",
"match",
",",
"[",
"]",
",",
"macro_ref_str",
")"
] | Expands the macro reference.
Args:
macro_ref_str: String of a macro reference (i.e. foo(a, b)).
Returns:
The text from the expansion.
Raises:
PDDMError if there are any issues. | [
"Expands",
"the",
"macro",
"reference",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py#L259-L276 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py | SourceFile.ProcessContent | def ProcessContent(self, strip_expansion=False):
"""Processes the file contents."""
self._ParseFile()
if strip_expansion:
# Without a collection the expansions become blank, removing them.
collection = None
else:
collection = MacroCollection()
for section in self._sections:
section.BindMacroCollection(collection)
result = ''
for section in self._sections:
result += section.text
self._processed_content = result | python | def ProcessContent(self, strip_expansion=False):
"""Processes the file contents."""
self._ParseFile()
if strip_expansion:
# Without a collection the expansions become blank, removing them.
collection = None
else:
collection = MacroCollection()
for section in self._sections:
section.BindMacroCollection(collection)
result = ''
for section in self._sections:
result += section.text
self._processed_content = result | [
"def",
"ProcessContent",
"(",
"self",
",",
"strip_expansion",
"=",
"False",
")",
":",
"self",
".",
"_ParseFile",
"(",
")",
"if",
"strip_expansion",
":",
"# Without a collection the expansions become blank, removing them.",
"collection",
"=",
"None",
"else",
":",
"collection",
"=",
"MacroCollection",
"(",
")",
"for",
"section",
"in",
"self",
".",
"_sections",
":",
"section",
".",
"BindMacroCollection",
"(",
"collection",
")",
"result",
"=",
"''",
"for",
"section",
"in",
"self",
".",
"_sections",
":",
"result",
"+=",
"section",
".",
"text",
"self",
".",
"_processed_content",
"=",
"result"
] | Processes the file contents. | [
"Processes",
"the",
"file",
"contents",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/objectivec/DevTools/pddm.py#L601-L614 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_standard_scaler.py | convert | def convert(model, input_features, output_features):
"""Convert a _imputer model to the protobuf spec.
Parameters
----------
model: Imputer
A trained Imputer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
# Test the scikit-learn model
_sklearn_util.check_expected_type(model, StandardScaler)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'mean_'))
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'scale_'))
# Set the interface params.
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
spec = _set_transform_interface_params(spec, input_features, output_features)
# Set the parameters
tr_spec = spec.scaler
for x in model.mean_:
tr_spec.shiftValue.append(-x)
for x in model.scale_:
tr_spec.scaleValue.append(1.0 / x)
return _MLModel(spec) | python | def convert(model, input_features, output_features):
"""Convert a _imputer model to the protobuf spec.
Parameters
----------
model: Imputer
A trained Imputer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
# Test the scikit-learn model
_sklearn_util.check_expected_type(model, StandardScaler)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'mean_'))
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'scale_'))
# Set the interface params.
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
spec = _set_transform_interface_params(spec, input_features, output_features)
# Set the parameters
tr_spec = spec.scaler
for x in model.mean_:
tr_spec.shiftValue.append(-x)
for x in model.scale_:
tr_spec.scaleValue.append(1.0 / x)
return _MLModel(spec) | [
"def",
"convert",
"(",
"model",
",",
"input_features",
",",
"output_features",
")",
":",
"if",
"not",
"(",
"_HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"# Test the scikit-learn model",
"_sklearn_util",
".",
"check_expected_type",
"(",
"model",
",",
"StandardScaler",
")",
"_sklearn_util",
".",
"check_fitted",
"(",
"model",
",",
"lambda",
"m",
":",
"hasattr",
"(",
"m",
",",
"'mean_'",
")",
")",
"_sklearn_util",
".",
"check_fitted",
"(",
"model",
",",
"lambda",
"m",
":",
"hasattr",
"(",
"m",
",",
"'scale_'",
")",
")",
"# Set the interface params.",
"spec",
"=",
"_Model_pb2",
".",
"Model",
"(",
")",
"spec",
".",
"specificationVersion",
"=",
"SPECIFICATION_VERSION",
"spec",
"=",
"_set_transform_interface_params",
"(",
"spec",
",",
"input_features",
",",
"output_features",
")",
"# Set the parameters",
"tr_spec",
"=",
"spec",
".",
"scaler",
"for",
"x",
"in",
"model",
".",
"mean_",
":",
"tr_spec",
".",
"shiftValue",
".",
"append",
"(",
"-",
"x",
")",
"for",
"x",
"in",
"model",
".",
"scale_",
":",
"tr_spec",
".",
"scaleValue",
".",
"append",
"(",
"1.0",
"/",
"x",
")",
"return",
"_MLModel",
"(",
"spec",
")"
] | Convert a _imputer model to the protobuf spec.
Parameters
----------
model: Imputer
A trained Imputer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"_imputer",
"model",
"to",
"the",
"protobuf",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_standard_scaler.py#L24-L64 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | reset | def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __all_attributes, __all_features, __implicit_features, __composite_properties
global __subfeature_from_value, __all_top_features, __free_features
global __all_subfeatures
# sets the default value of False for each valid attribute
for attr in VALID_ATTRIBUTES:
setattr(Feature, attr.replace("-", "_"), False)
# A map containing all features. The key is the feature name.
# The value is an instance of Feature class.
__all_features = {}
# All non-subfeatures.
__all_top_features = []
# Maps valus to the corresponding implicit feature
__implicit_features = {}
# A map containing all composite properties. The key is a Property instance,
# and the value is a list of Property instances
__composite_properties = {}
# Maps a value to the corresponding subfeature name.
__subfeature_from_value = {}
# All free features
__free_features = []
__all_subfeatures = [] | python | def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __all_attributes, __all_features, __implicit_features, __composite_properties
global __subfeature_from_value, __all_top_features, __free_features
global __all_subfeatures
# sets the default value of False for each valid attribute
for attr in VALID_ATTRIBUTES:
setattr(Feature, attr.replace("-", "_"), False)
# A map containing all features. The key is the feature name.
# The value is an instance of Feature class.
__all_features = {}
# All non-subfeatures.
__all_top_features = []
# Maps valus to the corresponding implicit feature
__implicit_features = {}
# A map containing all composite properties. The key is a Property instance,
# and the value is a list of Property instances
__composite_properties = {}
# Maps a value to the corresponding subfeature name.
__subfeature_from_value = {}
# All free features
__free_features = []
__all_subfeatures = [] | [
"def",
"reset",
"(",
")",
":",
"global",
"__all_attributes",
",",
"__all_features",
",",
"__implicit_features",
",",
"__composite_properties",
"global",
"__subfeature_from_value",
",",
"__all_top_features",
",",
"__free_features",
"global",
"__all_subfeatures",
"# sets the default value of False for each valid attribute",
"for",
"attr",
"in",
"VALID_ATTRIBUTES",
":",
"setattr",
"(",
"Feature",
",",
"attr",
".",
"replace",
"(",
"\"-\"",
",",
"\"_\"",
")",
",",
"False",
")",
"# A map containing all features. The key is the feature name.",
"# The value is an instance of Feature class.",
"__all_features",
"=",
"{",
"}",
"# All non-subfeatures.",
"__all_top_features",
"=",
"[",
"]",
"# Maps valus to the corresponding implicit feature",
"__implicit_features",
"=",
"{",
"}",
"# A map containing all composite properties. The key is a Property instance,",
"# and the value is a list of Property instances",
"__composite_properties",
"=",
"{",
"}",
"# Maps a value to the corresponding subfeature name.",
"__subfeature_from_value",
"=",
"{",
"}",
"# All free features",
"__free_features",
"=",
"[",
"]",
"__all_subfeatures",
"=",
"[",
"]"
] | Clear the module state. This is mainly for testing purposes. | [
"Clear",
"the",
"module",
"state",
".",
"This",
"is",
"mainly",
"for",
"testing",
"purposes",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L85-L116 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | feature | def feature (name, values, attributes = []):
""" Declares a new feature with the given name, values, and attributes.
name: the feature name
values: a sequence of the allowable values - may be extended later with feature.extend
attributes: a sequence of the feature's attributes (e.g. implicit, free, propagated, ...)
"""
__validate_feature_attributes (name, attributes)
feature = Feature(name, [], attributes)
__all_features[name] = feature
# Temporary measure while we have not fully moved from 'gristed strings'
__all_features["<" + name + ">"] = feature
name = add_grist(name)
if 'subfeature' in attributes:
__all_subfeatures.append(name)
else:
__all_top_features.append(feature)
extend (name, values)
# FIXME: why his is needed.
if 'free' in attributes:
__free_features.append (name)
return feature | python | def feature (name, values, attributes = []):
""" Declares a new feature with the given name, values, and attributes.
name: the feature name
values: a sequence of the allowable values - may be extended later with feature.extend
attributes: a sequence of the feature's attributes (e.g. implicit, free, propagated, ...)
"""
__validate_feature_attributes (name, attributes)
feature = Feature(name, [], attributes)
__all_features[name] = feature
# Temporary measure while we have not fully moved from 'gristed strings'
__all_features["<" + name + ">"] = feature
name = add_grist(name)
if 'subfeature' in attributes:
__all_subfeatures.append(name)
else:
__all_top_features.append(feature)
extend (name, values)
# FIXME: why his is needed.
if 'free' in attributes:
__free_features.append (name)
return feature | [
"def",
"feature",
"(",
"name",
",",
"values",
",",
"attributes",
"=",
"[",
"]",
")",
":",
"__validate_feature_attributes",
"(",
"name",
",",
"attributes",
")",
"feature",
"=",
"Feature",
"(",
"name",
",",
"[",
"]",
",",
"attributes",
")",
"__all_features",
"[",
"name",
"]",
"=",
"feature",
"# Temporary measure while we have not fully moved from 'gristed strings'",
"__all_features",
"[",
"\"<\"",
"+",
"name",
"+",
"\">\"",
"]",
"=",
"feature",
"name",
"=",
"add_grist",
"(",
"name",
")",
"if",
"'subfeature'",
"in",
"attributes",
":",
"__all_subfeatures",
".",
"append",
"(",
"name",
")",
"else",
":",
"__all_top_features",
".",
"append",
"(",
"feature",
")",
"extend",
"(",
"name",
",",
"values",
")",
"# FIXME: why his is needed.",
"if",
"'free'",
"in",
"attributes",
":",
"__free_features",
".",
"append",
"(",
"name",
")",
"return",
"feature"
] | Declares a new feature with the given name, values, and attributes.
name: the feature name
values: a sequence of the allowable values - may be extended later with feature.extend
attributes: a sequence of the feature's attributes (e.g. implicit, free, propagated, ...) | [
"Declares",
"a",
"new",
"feature",
"with",
"the",
"given",
"name",
"values",
"and",
"attributes",
".",
"name",
":",
"the",
"feature",
"name",
"values",
":",
"a",
"sequence",
"of",
"the",
"allowable",
"values",
"-",
"may",
"be",
"extended",
"later",
"with",
"feature",
".",
"extend",
"attributes",
":",
"a",
"sequence",
"of",
"the",
"feature",
"s",
"attributes",
"(",
"e",
".",
"g",
".",
"implicit",
"free",
"propagated",
"...",
")"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L136-L162 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | set_default | def set_default (feature, value):
""" Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign
"""
f = __all_features[feature]
bad_attribute = None
if f.free:
bad_attribute = "free"
elif f.optional:
bad_attribute = "optional"
if bad_attribute:
raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, f.name))
if value not in f.values:
raise InvalidValue ("The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % f.values)
f.set_default(value) | python | def set_default (feature, value):
""" Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign
"""
f = __all_features[feature]
bad_attribute = None
if f.free:
bad_attribute = "free"
elif f.optional:
bad_attribute = "optional"
if bad_attribute:
raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, f.name))
if value not in f.values:
raise InvalidValue ("The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % f.values)
f.set_default(value) | [
"def",
"set_default",
"(",
"feature",
",",
"value",
")",
":",
"f",
"=",
"__all_features",
"[",
"feature",
"]",
"bad_attribute",
"=",
"None",
"if",
"f",
".",
"free",
":",
"bad_attribute",
"=",
"\"free\"",
"elif",
"f",
".",
"optional",
":",
"bad_attribute",
"=",
"\"optional\"",
"if",
"bad_attribute",
":",
"raise",
"InvalidValue",
"(",
"\"%s property %s cannot have a default\"",
"%",
"(",
"bad_attribute",
",",
"f",
".",
"name",
")",
")",
"if",
"value",
"not",
"in",
"f",
".",
"values",
":",
"raise",
"InvalidValue",
"(",
"\"The specified default value, '%s' is invalid.\\n\"",
"%",
"value",
"+",
"\"allowed values are: %s\"",
"%",
"f",
".",
"values",
")",
"f",
".",
"set_default",
"(",
"value",
")"
] | Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign | [
"Sets",
"the",
"default",
"value",
"of",
"the",
"given",
"feature",
"overriding",
"any",
"previous",
"default",
".",
"feature",
":",
"the",
"name",
"of",
"the",
"feature",
"value",
":",
"the",
"default",
"value",
"to",
"assign"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L165-L184 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | defaults | def defaults(features):
""" Returns the default property values for the given features.
"""
assert is_iterable_typed(features, Feature)
# FIXME: should merge feature and property modules.
from . import property
result = []
for f in features:
if not f.free and not f.optional and f.default:
result.append(property.Property(f, f.default))
return result | python | def defaults(features):
""" Returns the default property values for the given features.
"""
assert is_iterable_typed(features, Feature)
# FIXME: should merge feature and property modules.
from . import property
result = []
for f in features:
if not f.free and not f.optional and f.default:
result.append(property.Property(f, f.default))
return result | [
"def",
"defaults",
"(",
"features",
")",
":",
"assert",
"is_iterable_typed",
"(",
"features",
",",
"Feature",
")",
"# FIXME: should merge feature and property modules.",
"from",
".",
"import",
"property",
"result",
"=",
"[",
"]",
"for",
"f",
"in",
"features",
":",
"if",
"not",
"f",
".",
"free",
"and",
"not",
"f",
".",
"optional",
"and",
"f",
".",
"default",
":",
"result",
".",
"append",
"(",
"property",
".",
"Property",
"(",
"f",
",",
"f",
".",
"default",
")",
")",
"return",
"result"
] | Returns the default property values for the given features. | [
"Returns",
"the",
"default",
"property",
"values",
"for",
"the",
"given",
"features",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L186-L198 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | valid | def valid (names):
""" Returns true iff all elements of names are valid features.
"""
if isinstance(names, str):
names = [names]
assert is_iterable_typed(names, basestring)
return all(name in __all_features for name in names) | python | def valid (names):
""" Returns true iff all elements of names are valid features.
"""
if isinstance(names, str):
names = [names]
assert is_iterable_typed(names, basestring)
return all(name in __all_features for name in names) | [
"def",
"valid",
"(",
"names",
")",
":",
"if",
"isinstance",
"(",
"names",
",",
"str",
")",
":",
"names",
"=",
"[",
"names",
"]",
"assert",
"is_iterable_typed",
"(",
"names",
",",
"basestring",
")",
"return",
"all",
"(",
"name",
"in",
"__all_features",
"for",
"name",
"in",
"names",
")"
] | Returns true iff all elements of names are valid features. | [
"Returns",
"true",
"iff",
"all",
"elements",
"of",
"names",
"are",
"valid",
"features",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L200-L207 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | values | def values (feature):
""" Return the values of the given feature.
"""
assert isinstance(feature, basestring)
validate_feature (feature)
return __all_features[feature].values | python | def values (feature):
""" Return the values of the given feature.
"""
assert isinstance(feature, basestring)
validate_feature (feature)
return __all_features[feature].values | [
"def",
"values",
"(",
"feature",
")",
":",
"assert",
"isinstance",
"(",
"feature",
",",
"basestring",
")",
"validate_feature",
"(",
"feature",
")",
"return",
"__all_features",
"[",
"feature",
"]",
".",
"values"
] | Return the values of the given feature. | [
"Return",
"the",
"values",
"of",
"the",
"given",
"feature",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L215-L220 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/feature.py | is_implicit_value | def is_implicit_value (value_string):
""" Returns true iff 'value_string' is a value_string
of an implicit feature.
"""
assert isinstance(value_string, basestring)
if value_string in __implicit_features:
return __implicit_features[value_string]
v = value_string.split('-')
if v[0] not in __implicit_features:
return False
feature = __implicit_features[v[0]]
for subvalue in (v[1:]):
if not __find_implied_subfeature(feature, subvalue, v[0]):
return False
return True | python | def is_implicit_value (value_string):
""" Returns true iff 'value_string' is a value_string
of an implicit feature.
"""
assert isinstance(value_string, basestring)
if value_string in __implicit_features:
return __implicit_features[value_string]
v = value_string.split('-')
if v[0] not in __implicit_features:
return False
feature = __implicit_features[v[0]]
for subvalue in (v[1:]):
if not __find_implied_subfeature(feature, subvalue, v[0]):
return False
return True | [
"def",
"is_implicit_value",
"(",
"value_string",
")",
":",
"assert",
"isinstance",
"(",
"value_string",
",",
"basestring",
")",
"if",
"value_string",
"in",
"__implicit_features",
":",
"return",
"__implicit_features",
"[",
"value_string",
"]",
"v",
"=",
"value_string",
".",
"split",
"(",
"'-'",
")",
"if",
"v",
"[",
"0",
"]",
"not",
"in",
"__implicit_features",
":",
"return",
"False",
"feature",
"=",
"__implicit_features",
"[",
"v",
"[",
"0",
"]",
"]",
"for",
"subvalue",
"in",
"(",
"v",
"[",
"1",
":",
"]",
")",
":",
"if",
"not",
"__find_implied_subfeature",
"(",
"feature",
",",
"subvalue",
",",
"v",
"[",
"0",
"]",
")",
":",
"return",
"False",
"return",
"True"
] | Returns true iff 'value_string' is a value_string
of an implicit feature. | [
"Returns",
"true",
"iff",
"value_string",
"is",
"a",
"value_string",
"of",
"an",
"implicit",
"feature",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L222-L241 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.