repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_convolution1d | def convert_convolution1d(builder, layer, input_names, output_names, keras_layer):
"""
Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
has_bias = keras_layer.use_bias
# Get the weights from _keras.
# Keras stores convolution weights as list of numpy arrays
weightList = keras_layer.get_weights()
output_shape = list(filter(None, keras_layer.output_shape))[:-1]
# Parameter
filter_length, input_dim, n_filters = weightList[0].shape
stride_width = keras_layer.strides if type(keras_layer.strides) is int \
else keras_layer.strides[0]
# Weights and bias terms
W = _np.expand_dims(weightList[0],axis=0)
b = weightList[1] if has_bias else None
dilations = [1,1]
if (type(keras_layer.dilation_rate) is list) or \
(type(keras_layer.dilation_rate) is tuple):
dilations = [1, keras_layer.dilation_rate[0]]
else:
dilations = [1, keras_layer.dilation_rate]
keras_padding = keras_layer.padding
if keras_padding == 'causal':
builder.add_padding(name = layer + '__causal_pad__',
left = filter_length-1, right=0, top=0, bottom=0, value= 0,
input_name = input_name,
output_name= input_name + '__causal_pad__')
input_name = input_name + '__causal_pad__'
keras_padding = 'valid'
builder.add_convolution(name = layer,
kernel_channels = input_dim,
output_channels = n_filters,
height = 1,
width = filter_length,
stride_height = 1,
stride_width = stride_width,
border_mode = keras_padding,
groups = 1,
W = W,
b = b,
has_bias = has_bias,
is_deconv = False,
output_shape = output_shape,
input_name = input_name,
output_name = output_name,
dilation_factors = dilations) | python | def convert_convolution1d(builder, layer, input_names, output_names, keras_layer):
"""
Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
has_bias = keras_layer.use_bias
# Get the weights from _keras.
# Keras stores convolution weights as list of numpy arrays
weightList = keras_layer.get_weights()
output_shape = list(filter(None, keras_layer.output_shape))[:-1]
# Parameter
filter_length, input_dim, n_filters = weightList[0].shape
stride_width = keras_layer.strides if type(keras_layer.strides) is int \
else keras_layer.strides[0]
# Weights and bias terms
W = _np.expand_dims(weightList[0],axis=0)
b = weightList[1] if has_bias else None
dilations = [1,1]
if (type(keras_layer.dilation_rate) is list) or \
(type(keras_layer.dilation_rate) is tuple):
dilations = [1, keras_layer.dilation_rate[0]]
else:
dilations = [1, keras_layer.dilation_rate]
keras_padding = keras_layer.padding
if keras_padding == 'causal':
builder.add_padding(name = layer + '__causal_pad__',
left = filter_length-1, right=0, top=0, bottom=0, value= 0,
input_name = input_name,
output_name= input_name + '__causal_pad__')
input_name = input_name + '__causal_pad__'
keras_padding = 'valid'
builder.add_convolution(name = layer,
kernel_channels = input_dim,
output_channels = n_filters,
height = 1,
width = filter_length,
stride_height = 1,
stride_width = stride_width,
border_mode = keras_padding,
groups = 1,
W = W,
b = b,
has_bias = has_bias,
is_deconv = False,
output_shape = output_shape,
input_name = input_name,
output_name = output_name,
dilation_factors = dilations) | [
"def",
"convert_convolution1d",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"has_bias",
"=",
"keras_layer",
".",
"use_bias",
"# Get the weights from _keras.",
"# Keras stores convolution weights as list of numpy arrays",
"weightList",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"output_shape",
"=",
"list",
"(",
"filter",
"(",
"None",
",",
"keras_layer",
".",
"output_shape",
")",
")",
"[",
":",
"-",
"1",
"]",
"# Parameter",
"filter_length",
",",
"input_dim",
",",
"n_filters",
"=",
"weightList",
"[",
"0",
"]",
".",
"shape",
"stride_width",
"=",
"keras_layer",
".",
"strides",
"if",
"type",
"(",
"keras_layer",
".",
"strides",
")",
"is",
"int",
"else",
"keras_layer",
".",
"strides",
"[",
"0",
"]",
"# Weights and bias terms",
"W",
"=",
"_np",
".",
"expand_dims",
"(",
"weightList",
"[",
"0",
"]",
",",
"axis",
"=",
"0",
")",
"b",
"=",
"weightList",
"[",
"1",
"]",
"if",
"has_bias",
"else",
"None",
"dilations",
"=",
"[",
"1",
",",
"1",
"]",
"if",
"(",
"type",
"(",
"keras_layer",
".",
"dilation_rate",
")",
"is",
"list",
")",
"or",
"(",
"type",
"(",
"keras_layer",
".",
"dilation_rate",
")",
"is",
"tuple",
")",
":",
"dilations",
"=",
"[",
"1",
",",
"keras_layer",
".",
"dilation_rate",
"[",
"0",
"]",
"]",
"else",
":",
"dilations",
"=",
"[",
"1",
",",
"keras_layer",
".",
"dilation_rate",
"]",
"keras_padding",
"=",
"keras_layer",
".",
"padding",
"if",
"keras_padding",
"==",
"'causal'",
":",
"builder",
".",
"add_padding",
"(",
"name",
"=",
"layer",
"+",
"'__causal_pad__'",
",",
"left",
"=",
"filter_length",
"-",
"1",
",",
"right",
"=",
"0",
",",
"top",
"=",
"0",
",",
"bottom",
"=",
"0",
",",
"value",
"=",
"0",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"input_name",
"+",
"'__causal_pad__'",
")",
"input_name",
"=",
"input_name",
"+",
"'__causal_pad__'",
"keras_padding",
"=",
"'valid'",
"builder",
".",
"add_convolution",
"(",
"name",
"=",
"layer",
",",
"kernel_channels",
"=",
"input_dim",
",",
"output_channels",
"=",
"n_filters",
",",
"height",
"=",
"1",
",",
"width",
"=",
"filter_length",
",",
"stride_height",
"=",
"1",
",",
"stride_width",
"=",
"stride_width",
",",
"border_mode",
"=",
"keras_padding",
",",
"groups",
"=",
"1",
",",
"W",
"=",
"W",
",",
"b",
"=",
"b",
",",
"has_bias",
"=",
"has_bias",
",",
"is_deconv",
"=",
"False",
",",
"output_shape",
"=",
"output_shape",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
",",
"dilation_factors",
"=",
"dilations",
")"
] | Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"convolution",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L386-L449 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_separable_convolution | def convert_separable_convolution(builder, layer, input_names, output_names, keras_layer):
"""
Convert separable convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
has_bias = keras_layer.use_bias
# Get the weights from _keras.
weight_list = keras_layer.get_weights()
output_blob_shape = list(filter(None, keras_layer.output_shape))
output_channels = output_blob_shape[-1]
# D: depth mutliplier
# w[0] is (H,W,Cin,D)
# w[1] is (1,1,Cin * D, Cout)
W0 = weight_list[0]
W1 = weight_list[1]
height, width, input_channels, depth_mult = W0.shape
b = weight_list[2] if has_bias else None
W0 = _np.reshape(W0, (height, width, 1, input_channels * depth_mult))
stride_height, stride_width = keras_layer.strides
# Dilations
if (type(keras_layer.dilation_rate) is list) or (type(keras_layer.dilation_rate) is tuple):
dilations = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilations = [keras_layer.dilation_rate, keras_layer.dilation_rate]
intermediate_name = output_name + '_intermin_'
builder.add_convolution(name = layer + '_step_1',
kernel_channels = 1,
output_channels = input_channels * depth_mult,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
border_mode = keras_layer.padding,
groups = input_channels,
W = W0,
b = None,
has_bias = False,
is_deconv = False,
output_shape = None,
input_name = input_name,
output_name = intermediate_name,
dilation_factors = dilations)
builder.add_convolution(name = layer + '_step_2',
kernel_channels = input_channels * depth_mult,
output_channels = output_channels,
height = 1,
width = 1,
stride_height = 1,
stride_width = 1,
border_mode = keras_layer.padding,
groups = 1,
W = W1,
b = b,
has_bias = has_bias,
is_deconv = False,
output_shape = None,
input_name = intermediate_name,
output_name = output_name,
dilation_factors = [1,1]) | python | def convert_separable_convolution(builder, layer, input_names, output_names, keras_layer):
"""
Convert separable convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
has_bias = keras_layer.use_bias
# Get the weights from _keras.
weight_list = keras_layer.get_weights()
output_blob_shape = list(filter(None, keras_layer.output_shape))
output_channels = output_blob_shape[-1]
# D: depth mutliplier
# w[0] is (H,W,Cin,D)
# w[1] is (1,1,Cin * D, Cout)
W0 = weight_list[0]
W1 = weight_list[1]
height, width, input_channels, depth_mult = W0.shape
b = weight_list[2] if has_bias else None
W0 = _np.reshape(W0, (height, width, 1, input_channels * depth_mult))
stride_height, stride_width = keras_layer.strides
# Dilations
if (type(keras_layer.dilation_rate) is list) or (type(keras_layer.dilation_rate) is tuple):
dilations = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilations = [keras_layer.dilation_rate, keras_layer.dilation_rate]
intermediate_name = output_name + '_intermin_'
builder.add_convolution(name = layer + '_step_1',
kernel_channels = 1,
output_channels = input_channels * depth_mult,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
border_mode = keras_layer.padding,
groups = input_channels,
W = W0,
b = None,
has_bias = False,
is_deconv = False,
output_shape = None,
input_name = input_name,
output_name = intermediate_name,
dilation_factors = dilations)
builder.add_convolution(name = layer + '_step_2',
kernel_channels = input_channels * depth_mult,
output_channels = output_channels,
height = 1,
width = 1,
stride_height = 1,
stride_width = 1,
border_mode = keras_layer.padding,
groups = 1,
W = W1,
b = b,
has_bias = has_bias,
is_deconv = False,
output_shape = None,
input_name = intermediate_name,
output_name = output_name,
dilation_factors = [1,1]) | [
"def",
"convert_separable_convolution",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"_check_data_format",
"(",
"keras_layer",
")",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"has_bias",
"=",
"keras_layer",
".",
"use_bias",
"# Get the weights from _keras.",
"weight_list",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"output_blob_shape",
"=",
"list",
"(",
"filter",
"(",
"None",
",",
"keras_layer",
".",
"output_shape",
")",
")",
"output_channels",
"=",
"output_blob_shape",
"[",
"-",
"1",
"]",
"# D: depth mutliplier",
"# w[0] is (H,W,Cin,D)",
"# w[1] is (1,1,Cin * D, Cout)",
"W0",
"=",
"weight_list",
"[",
"0",
"]",
"W1",
"=",
"weight_list",
"[",
"1",
"]",
"height",
",",
"width",
",",
"input_channels",
",",
"depth_mult",
"=",
"W0",
".",
"shape",
"b",
"=",
"weight_list",
"[",
"2",
"]",
"if",
"has_bias",
"else",
"None",
"W0",
"=",
"_np",
".",
"reshape",
"(",
"W0",
",",
"(",
"height",
",",
"width",
",",
"1",
",",
"input_channels",
"*",
"depth_mult",
")",
")",
"stride_height",
",",
"stride_width",
"=",
"keras_layer",
".",
"strides",
"# Dilations",
"if",
"(",
"type",
"(",
"keras_layer",
".",
"dilation_rate",
")",
"is",
"list",
")",
"or",
"(",
"type",
"(",
"keras_layer",
".",
"dilation_rate",
")",
"is",
"tuple",
")",
":",
"dilations",
"=",
"[",
"keras_layer",
".",
"dilation_rate",
"[",
"0",
"]",
",",
"keras_layer",
".",
"dilation_rate",
"[",
"1",
"]",
"]",
"else",
":",
"dilations",
"=",
"[",
"keras_layer",
".",
"dilation_rate",
",",
"keras_layer",
".",
"dilation_rate",
"]",
"intermediate_name",
"=",
"output_name",
"+",
"'_intermin_'",
"builder",
".",
"add_convolution",
"(",
"name",
"=",
"layer",
"+",
"'_step_1'",
",",
"kernel_channels",
"=",
"1",
",",
"output_channels",
"=",
"input_channels",
"*",
"depth_mult",
",",
"height",
"=",
"height",
",",
"width",
"=",
"width",
",",
"stride_height",
"=",
"stride_height",
",",
"stride_width",
"=",
"stride_width",
",",
"border_mode",
"=",
"keras_layer",
".",
"padding",
",",
"groups",
"=",
"input_channels",
",",
"W",
"=",
"W0",
",",
"b",
"=",
"None",
",",
"has_bias",
"=",
"False",
",",
"is_deconv",
"=",
"False",
",",
"output_shape",
"=",
"None",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"intermediate_name",
",",
"dilation_factors",
"=",
"dilations",
")",
"builder",
".",
"add_convolution",
"(",
"name",
"=",
"layer",
"+",
"'_step_2'",
",",
"kernel_channels",
"=",
"input_channels",
"*",
"depth_mult",
",",
"output_channels",
"=",
"output_channels",
",",
"height",
"=",
"1",
",",
"width",
"=",
"1",
",",
"stride_height",
"=",
"1",
",",
"stride_width",
"=",
"1",
",",
"border_mode",
"=",
"keras_layer",
".",
"padding",
",",
"groups",
"=",
"1",
",",
"W",
"=",
"W1",
",",
"b",
"=",
"b",
",",
"has_bias",
"=",
"has_bias",
",",
"is_deconv",
"=",
"False",
",",
"output_shape",
"=",
"None",
",",
"input_name",
"=",
"intermediate_name",
",",
"output_name",
"=",
"output_name",
",",
"dilation_factors",
"=",
"[",
"1",
",",
"1",
"]",
")"
] | Convert separable convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"separable",
"convolution",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L451-L529 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_batchnorm | def convert_batchnorm(builder, layer, input_names, output_names, keras_layer):
"""
Convert a Batch Normalization layer.
Parameters
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
axis = keras_layer.axis
nb_channels = keras_layer.input_shape[axis]
# Set parameters
# Parameter arrangement in Keras: gamma, beta, mean, variance
idx = 0
gamma, beta = None, None
if keras_layer.scale:
gamma = keras_layer.get_weights()[idx]
idx += 1
if keras_layer.center:
beta = keras_layer.get_weights()[idx]
idx += 1
mean = keras_layer.get_weights()[idx]
std = keras_layer.get_weights()[idx+1]
gamma = _np.ones(mean.shape) if gamma is None else gamma
beta = _np.zeros(mean.shape) if beta is None else beta
# compute adjusted parameters
variance = std * std
f = 1.0 / _np.sqrt(std + keras_layer.epsilon)
gamma1 = gamma*f
beta1 = beta - gamma*mean*f
mean[:] = 0.0 #mean
variance[:] = 1.0 - .00001 #stddev
builder.add_batchnorm(
name = layer,
channels = nb_channels,
gamma = gamma1,
beta = beta1,
mean = mean,
variance = variance,
input_name = input_name,
output_name = output_name) | python | def convert_batchnorm(builder, layer, input_names, output_names, keras_layer):
"""
Convert a Batch Normalization layer.
Parameters
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
axis = keras_layer.axis
nb_channels = keras_layer.input_shape[axis]
# Set parameters
# Parameter arrangement in Keras: gamma, beta, mean, variance
idx = 0
gamma, beta = None, None
if keras_layer.scale:
gamma = keras_layer.get_weights()[idx]
idx += 1
if keras_layer.center:
beta = keras_layer.get_weights()[idx]
idx += 1
mean = keras_layer.get_weights()[idx]
std = keras_layer.get_weights()[idx+1]
gamma = _np.ones(mean.shape) if gamma is None else gamma
beta = _np.zeros(mean.shape) if beta is None else beta
# compute adjusted parameters
variance = std * std
f = 1.0 / _np.sqrt(std + keras_layer.epsilon)
gamma1 = gamma*f
beta1 = beta - gamma*mean*f
mean[:] = 0.0 #mean
variance[:] = 1.0 - .00001 #stddev
builder.add_batchnorm(
name = layer,
channels = nb_channels,
gamma = gamma1,
beta = beta1,
mean = mean,
variance = variance,
input_name = input_name,
output_name = output_name) | [
"def",
"convert_batchnorm",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"axis",
"=",
"keras_layer",
".",
"axis",
"nb_channels",
"=",
"keras_layer",
".",
"input_shape",
"[",
"axis",
"]",
"# Set parameters",
"# Parameter arrangement in Keras: gamma, beta, mean, variance",
"idx",
"=",
"0",
"gamma",
",",
"beta",
"=",
"None",
",",
"None",
"if",
"keras_layer",
".",
"scale",
":",
"gamma",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"idx",
"]",
"idx",
"+=",
"1",
"if",
"keras_layer",
".",
"center",
":",
"beta",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"idx",
"]",
"idx",
"+=",
"1",
"mean",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"idx",
"]",
"std",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"idx",
"+",
"1",
"]",
"gamma",
"=",
"_np",
".",
"ones",
"(",
"mean",
".",
"shape",
")",
"if",
"gamma",
"is",
"None",
"else",
"gamma",
"beta",
"=",
"_np",
".",
"zeros",
"(",
"mean",
".",
"shape",
")",
"if",
"beta",
"is",
"None",
"else",
"beta",
"# compute adjusted parameters",
"variance",
"=",
"std",
"*",
"std",
"f",
"=",
"1.0",
"/",
"_np",
".",
"sqrt",
"(",
"std",
"+",
"keras_layer",
".",
"epsilon",
")",
"gamma1",
"=",
"gamma",
"*",
"f",
"beta1",
"=",
"beta",
"-",
"gamma",
"*",
"mean",
"*",
"f",
"mean",
"[",
":",
"]",
"=",
"0.0",
"#mean",
"variance",
"[",
":",
"]",
"=",
"1.0",
"-",
".00001",
"#stddev",
"builder",
".",
"add_batchnorm",
"(",
"name",
"=",
"layer",
",",
"channels",
"=",
"nb_channels",
",",
"gamma",
"=",
"gamma1",
",",
"beta",
"=",
"beta1",
",",
"mean",
"=",
"mean",
",",
"variance",
"=",
"variance",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
")"
] | Convert a Batch Normalization layer.
Parameters
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"a",
"Batch",
"Normalization",
"layer",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L531-L581 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_flatten | def convert_flatten(builder, layer, input_names, output_names, keras_layer):
"""
Convert a flatten layer from keras to coreml.
----------
Parameters
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = (input_names[0], output_names[0])
# blob_order == 0 if the input blob needs not be rearranged
# blob_order == 1 if the input blob needs to be rearranged
blob_order = 0
# using keras_layer.input.shape have a "?" (Dimension[None] at the front),
# making a 3D tensor with unknown batch size 4D
try:
in_shape = keras_layer.input_shape
if len(in_shape) == 4:
blob_order = 1
if len(in_shape) == 3 and in_shape[0] is None:
# handling Keras rank-3 tensor (Batch, Sequence, Channels)
permute_output_name = output_name + '__permute__'
builder.add_permute(name=layer+'__permute__', dim=(2,1,0,3),
input_name=input_name, output_name=permute_output_name)
builder.add_flatten(name=layer, mode=1,
input_name=permute_output_name, output_name=output_name)
else:
builder.add_flatten(name=layer, mode=blob_order, input_name=input_name,
output_name=output_name)
except:
builder.add_flatten(name=layer, mode=1, input_name=input_name, output_name=output_name) | python | def convert_flatten(builder, layer, input_names, output_names, keras_layer):
"""
Convert a flatten layer from keras to coreml.
----------
Parameters
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = (input_names[0], output_names[0])
# blob_order == 0 if the input blob needs not be rearranged
# blob_order == 1 if the input blob needs to be rearranged
blob_order = 0
# using keras_layer.input.shape have a "?" (Dimension[None] at the front),
# making a 3D tensor with unknown batch size 4D
try:
in_shape = keras_layer.input_shape
if len(in_shape) == 4:
blob_order = 1
if len(in_shape) == 3 and in_shape[0] is None:
# handling Keras rank-3 tensor (Batch, Sequence, Channels)
permute_output_name = output_name + '__permute__'
builder.add_permute(name=layer+'__permute__', dim=(2,1,0,3),
input_name=input_name, output_name=permute_output_name)
builder.add_flatten(name=layer, mode=1,
input_name=permute_output_name, output_name=output_name)
else:
builder.add_flatten(name=layer, mode=blob_order, input_name=input_name,
output_name=output_name)
except:
builder.add_flatten(name=layer, mode=1, input_name=input_name, output_name=output_name) | [
"def",
"convert_flatten",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"# blob_order == 0 if the input blob needs not be rearranged",
"# blob_order == 1 if the input blob needs to be rearranged",
"blob_order",
"=",
"0",
"# using keras_layer.input.shape have a \"?\" (Dimension[None] at the front),",
"# making a 3D tensor with unknown batch size 4D",
"try",
":",
"in_shape",
"=",
"keras_layer",
".",
"input_shape",
"if",
"len",
"(",
"in_shape",
")",
"==",
"4",
":",
"blob_order",
"=",
"1",
"if",
"len",
"(",
"in_shape",
")",
"==",
"3",
"and",
"in_shape",
"[",
"0",
"]",
"is",
"None",
":",
"# handling Keras rank-3 tensor (Batch, Sequence, Channels)",
"permute_output_name",
"=",
"output_name",
"+",
"'__permute__'",
"builder",
".",
"add_permute",
"(",
"name",
"=",
"layer",
"+",
"'__permute__'",
",",
"dim",
"=",
"(",
"2",
",",
"1",
",",
"0",
",",
"3",
")",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"permute_output_name",
")",
"builder",
".",
"add_flatten",
"(",
"name",
"=",
"layer",
",",
"mode",
"=",
"1",
",",
"input_name",
"=",
"permute_output_name",
",",
"output_name",
"=",
"output_name",
")",
"else",
":",
"builder",
".",
"add_flatten",
"(",
"name",
"=",
"layer",
",",
"mode",
"=",
"blob_order",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
")",
"except",
":",
"builder",
".",
"add_flatten",
"(",
"name",
"=",
"layer",
",",
"mode",
"=",
"1",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
")"
] | Convert a flatten layer from keras to coreml.
----------
Parameters
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"a",
"flatten",
"layer",
"from",
"keras",
"to",
"coreml",
".",
"----------",
"Parameters",
"keras_layer",
":",
"layer",
"A",
"keras",
"layer",
"object",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L584-L619 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_merge | def convert_merge(builder, layer, input_names, output_names, keras_layer):
"""
Convert concat layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
output_name = output_names[0]
mode = _get_elementwise_name_from_keras_layer(keras_layer)
builder.add_elementwise(name = layer, input_names = input_names,
output_name = output_name, mode = mode) | python | def convert_merge(builder, layer, input_names, output_names, keras_layer):
"""
Convert concat layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
output_name = output_names[0]
mode = _get_elementwise_name_from_keras_layer(keras_layer)
builder.add_elementwise(name = layer, input_names = input_names,
output_name = output_name, mode = mode) | [
"def",
"convert_merge",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"# Get input and output names",
"output_name",
"=",
"output_names",
"[",
"0",
"]",
"mode",
"=",
"_get_elementwise_name_from_keras_layer",
"(",
"keras_layer",
")",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"layer",
",",
"input_names",
"=",
"input_names",
",",
"output_name",
"=",
"output_name",
",",
"mode",
"=",
"mode",
")"
] | Convert concat layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"concat",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L621-L638 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_pooling | def convert_pooling(builder, layer, input_names, output_names, keras_layer):
"""
Convert pooling layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
# Pooling layer type
if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling2D) or \
isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D):
layer_type_str = 'MAX'
elif isinstance(keras_layer, _keras.layers.convolutional.AveragePooling2D) or \
isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
layer_type_str = 'AVERAGE'
else:
raise TypeError("Pooling type %s not supported" % keras_layer)
# if it's global, set the global flag
if isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D):
# 2D global pooling
global_pooling = True
height, width = (0, 0)
stride_height, stride_width = (0,0)
padding_type = 'VALID'
elif isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
# 1D global pooling: 1D global pooling seems problematic in the backend,
# use this work-around
global_pooling = False
_, width, channels = keras_layer.input_shape
height = 1
stride_height, stride_width = height, width
padding_type = 'VALID'
else:
global_pooling = False
# Set pool sizes and strides
# 1D cases:
if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \
isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
pool_size = keras_layer.pool_size if type(keras_layer.pool_size) is int else keras_layer.pool_size[0]
height, width = 1, pool_size
if keras_layer.strides is not None:
strides = keras_layer.strides if type(keras_layer.strides) is int else keras_layer.strides[0]
stride_height, stride_width = 1, strides
else:
stride_height, stride_width = 1, pool_size
# 2D cases:
else:
height, width = keras_layer.pool_size
if keras_layer.strides is None:
stride_height, stride_width = height, width
else:
stride_height, stride_width = keras_layer.strides
# Padding
padding = keras_layer.padding
if keras_layer.padding == 'valid':
padding_type = 'VALID'
elif keras_layer.padding == 'same':
padding_type = 'SAME'
else:
raise TypeError("Border mode %s not supported" % padding)
builder.add_pooling(name = layer,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
layer_type = layer_type_str,
padding_type = padding_type,
input_name = input_name,
output_name = output_name,
exclude_pad_area = True,
is_global = global_pooling) | python | def convert_pooling(builder, layer, input_names, output_names, keras_layer):
"""
Convert pooling layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
# Pooling layer type
if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling2D) or \
isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D):
layer_type_str = 'MAX'
elif isinstance(keras_layer, _keras.layers.convolutional.AveragePooling2D) or \
isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
layer_type_str = 'AVERAGE'
else:
raise TypeError("Pooling type %s not supported" % keras_layer)
# if it's global, set the global flag
if isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling2D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling2D):
# 2D global pooling
global_pooling = True
height, width = (0, 0)
stride_height, stride_width = (0,0)
padding_type = 'VALID'
elif isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
# 1D global pooling: 1D global pooling seems problematic in the backend,
# use this work-around
global_pooling = False
_, width, channels = keras_layer.input_shape
height = 1
stride_height, stride_width = height, width
padding_type = 'VALID'
else:
global_pooling = False
# Set pool sizes and strides
# 1D cases:
if isinstance(keras_layer, _keras.layers.convolutional.MaxPooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalMaxPooling1D) or \
isinstance(keras_layer, _keras.layers.convolutional.AveragePooling1D) or \
isinstance(keras_layer, _keras.layers.pooling.GlobalAveragePooling1D):
pool_size = keras_layer.pool_size if type(keras_layer.pool_size) is int else keras_layer.pool_size[0]
height, width = 1, pool_size
if keras_layer.strides is not None:
strides = keras_layer.strides if type(keras_layer.strides) is int else keras_layer.strides[0]
stride_height, stride_width = 1, strides
else:
stride_height, stride_width = 1, pool_size
# 2D cases:
else:
height, width = keras_layer.pool_size
if keras_layer.strides is None:
stride_height, stride_width = height, width
else:
stride_height, stride_width = keras_layer.strides
# Padding
padding = keras_layer.padding
if keras_layer.padding == 'valid':
padding_type = 'VALID'
elif keras_layer.padding == 'same':
padding_type = 'SAME'
else:
raise TypeError("Border mode %s not supported" % padding)
builder.add_pooling(name = layer,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
layer_type = layer_type_str,
padding_type = padding_type,
input_name = input_name,
output_name = output_name,
exclude_pad_area = True,
is_global = global_pooling) | [
"def",
"convert_pooling",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"_check_data_format",
"(",
"keras_layer",
")",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"# Pooling layer type",
"if",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"MaxPooling2D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"MaxPooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalMaxPooling2D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalMaxPooling1D",
")",
":",
"layer_type_str",
"=",
"'MAX'",
"elif",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"AveragePooling2D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"AveragePooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalAveragePooling2D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalAveragePooling1D",
")",
":",
"layer_type_str",
"=",
"'AVERAGE'",
"else",
":",
"raise",
"TypeError",
"(",
"\"Pooling type %s not supported\"",
"%",
"keras_layer",
")",
"# if it's global, set the global flag",
"if",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalMaxPooling2D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalAveragePooling2D",
")",
":",
"# 2D global pooling",
"global_pooling",
"=",
"True",
"height",
",",
"width",
"=",
"(",
"0",
",",
"0",
")",
"stride_height",
",",
"stride_width",
"=",
"(",
"0",
",",
"0",
")",
"padding_type",
"=",
"'VALID'",
"elif",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalMaxPooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalAveragePooling1D",
")",
":",
"# 1D global pooling: 1D global pooling seems problematic in the backend,",
"# use this work-around",
"global_pooling",
"=",
"False",
"_",
",",
"width",
",",
"channels",
"=",
"keras_layer",
".",
"input_shape",
"height",
"=",
"1",
"stride_height",
",",
"stride_width",
"=",
"height",
",",
"width",
"padding_type",
"=",
"'VALID'",
"else",
":",
"global_pooling",
"=",
"False",
"# Set pool sizes and strides",
"# 1D cases:",
"if",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"MaxPooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalMaxPooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"convolutional",
".",
"AveragePooling1D",
")",
"or",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"pooling",
".",
"GlobalAveragePooling1D",
")",
":",
"pool_size",
"=",
"keras_layer",
".",
"pool_size",
"if",
"type",
"(",
"keras_layer",
".",
"pool_size",
")",
"is",
"int",
"else",
"keras_layer",
".",
"pool_size",
"[",
"0",
"]",
"height",
",",
"width",
"=",
"1",
",",
"pool_size",
"if",
"keras_layer",
".",
"strides",
"is",
"not",
"None",
":",
"strides",
"=",
"keras_layer",
".",
"strides",
"if",
"type",
"(",
"keras_layer",
".",
"strides",
")",
"is",
"int",
"else",
"keras_layer",
".",
"strides",
"[",
"0",
"]",
"stride_height",
",",
"stride_width",
"=",
"1",
",",
"strides",
"else",
":",
"stride_height",
",",
"stride_width",
"=",
"1",
",",
"pool_size",
"# 2D cases:",
"else",
":",
"height",
",",
"width",
"=",
"keras_layer",
".",
"pool_size",
"if",
"keras_layer",
".",
"strides",
"is",
"None",
":",
"stride_height",
",",
"stride_width",
"=",
"height",
",",
"width",
"else",
":",
"stride_height",
",",
"stride_width",
"=",
"keras_layer",
".",
"strides",
"# Padding",
"padding",
"=",
"keras_layer",
".",
"padding",
"if",
"keras_layer",
".",
"padding",
"==",
"'valid'",
":",
"padding_type",
"=",
"'VALID'",
"elif",
"keras_layer",
".",
"padding",
"==",
"'same'",
":",
"padding_type",
"=",
"'SAME'",
"else",
":",
"raise",
"TypeError",
"(",
"\"Border mode %s not supported\"",
"%",
"padding",
")",
"builder",
".",
"add_pooling",
"(",
"name",
"=",
"layer",
",",
"height",
"=",
"height",
",",
"width",
"=",
"width",
",",
"stride_height",
"=",
"stride_height",
",",
"stride_width",
"=",
"stride_width",
",",
"layer_type",
"=",
"layer_type_str",
",",
"padding_type",
"=",
"padding_type",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
",",
"exclude_pad_area",
"=",
"True",
",",
"is_global",
"=",
"global_pooling",
")"
] | Convert pooling layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"pooling",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L640-L729 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_padding | def convert_padding(builder, layer, input_names, output_names, keras_layer):
"""
Convert padding layer from keras to coreml.
Keras only supports zero padding at this time.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
is_1d = isinstance(keras_layer, _keras.layers.ZeroPadding1D)
padding = keras_layer.padding
top = left = bottom = right = 0
if is_1d:
if type(padding) is int:
left = right = padding
elif type(padding) is tuple:
if type(padding[0]) is int:
left, right = padding
elif type(padding[0]) is tuple and len(padding[0]) == 2:
left, right = padding[0]
else:
raise ValueError("Unrecognized padding option: %s" % (str(padding)))
else:
raise ValueError("Unrecognized padding option: %s" % (str(padding)))
else:
if type(padding) is int:
top = left = bottom = right = padding
elif type(padding) is tuple:
if type(padding[0]) is int:
top, left = padding
bottom, right = padding
elif type(padding[0]) is tuple:
top, bottom = padding[0]
left, right = padding[1]
else:
raise ValueError("Unrecognized padding option: %s" % (str(padding)))
else:
raise ValueError("Unrecognized padding option: %s" % (str(padding)))
# Now add the layer
builder.add_padding(name = layer,
left = left, right=right, top=top, bottom=bottom, value = 0,
input_name = input_name, output_name=output_name
) | python | def convert_padding(builder, layer, input_names, output_names, keras_layer):
"""
Convert padding layer from keras to coreml.
Keras only supports zero padding at this time.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
is_1d = isinstance(keras_layer, _keras.layers.ZeroPadding1D)
padding = keras_layer.padding
top = left = bottom = right = 0
if is_1d:
if type(padding) is int:
left = right = padding
elif type(padding) is tuple:
if type(padding[0]) is int:
left, right = padding
elif type(padding[0]) is tuple and len(padding[0]) == 2:
left, right = padding[0]
else:
raise ValueError("Unrecognized padding option: %s" % (str(padding)))
else:
raise ValueError("Unrecognized padding option: %s" % (str(padding)))
else:
if type(padding) is int:
top = left = bottom = right = padding
elif type(padding) is tuple:
if type(padding[0]) is int:
top, left = padding
bottom, right = padding
elif type(padding[0]) is tuple:
top, bottom = padding[0]
left, right = padding[1]
else:
raise ValueError("Unrecognized padding option: %s" % (str(padding)))
else:
raise ValueError("Unrecognized padding option: %s" % (str(padding)))
# Now add the layer
builder.add_padding(name = layer,
left = left, right=right, top=top, bottom=bottom, value = 0,
input_name = input_name, output_name=output_name
) | [
"def",
"convert_padding",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"_check_data_format",
"(",
"keras_layer",
")",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"is_1d",
"=",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"ZeroPadding1D",
")",
"padding",
"=",
"keras_layer",
".",
"padding",
"top",
"=",
"left",
"=",
"bottom",
"=",
"right",
"=",
"0",
"if",
"is_1d",
":",
"if",
"type",
"(",
"padding",
")",
"is",
"int",
":",
"left",
"=",
"right",
"=",
"padding",
"elif",
"type",
"(",
"padding",
")",
"is",
"tuple",
":",
"if",
"type",
"(",
"padding",
"[",
"0",
"]",
")",
"is",
"int",
":",
"left",
",",
"right",
"=",
"padding",
"elif",
"type",
"(",
"padding",
"[",
"0",
"]",
")",
"is",
"tuple",
"and",
"len",
"(",
"padding",
"[",
"0",
"]",
")",
"==",
"2",
":",
"left",
",",
"right",
"=",
"padding",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized padding option: %s\"",
"%",
"(",
"str",
"(",
"padding",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized padding option: %s\"",
"%",
"(",
"str",
"(",
"padding",
")",
")",
")",
"else",
":",
"if",
"type",
"(",
"padding",
")",
"is",
"int",
":",
"top",
"=",
"left",
"=",
"bottom",
"=",
"right",
"=",
"padding",
"elif",
"type",
"(",
"padding",
")",
"is",
"tuple",
":",
"if",
"type",
"(",
"padding",
"[",
"0",
"]",
")",
"is",
"int",
":",
"top",
",",
"left",
"=",
"padding",
"bottom",
",",
"right",
"=",
"padding",
"elif",
"type",
"(",
"padding",
"[",
"0",
"]",
")",
"is",
"tuple",
":",
"top",
",",
"bottom",
"=",
"padding",
"[",
"0",
"]",
"left",
",",
"right",
"=",
"padding",
"[",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized padding option: %s\"",
"%",
"(",
"str",
"(",
"padding",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized padding option: %s\"",
"%",
"(",
"str",
"(",
"padding",
")",
")",
")",
"# Now add the layer",
"builder",
".",
"add_padding",
"(",
"name",
"=",
"layer",
",",
"left",
"=",
"left",
",",
"right",
"=",
"right",
",",
"top",
"=",
"top",
",",
"bottom",
"=",
"bottom",
",",
"value",
"=",
"0",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
")"
] | Convert padding layer from keras to coreml.
Keras only supports zero padding at this time.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"padding",
"layer",
"from",
"keras",
"to",
"coreml",
".",
"Keras",
"only",
"supports",
"zero",
"padding",
"at",
"this",
"time",
".",
"Parameters",
"----------",
"keras_layer",
":",
"layer",
"A",
"keras",
"layer",
"object",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L731-L782 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_cropping | def convert_cropping(builder, layer, input_names, output_names, keras_layer):
"""
Convert padding layer from keras to coreml.
Keras only supports zero padding at this time.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
is_1d = isinstance(keras_layer, _keras.layers.Cropping1D)
cropping = keras_layer.cropping
top = left = bottom = right = 0
if is_1d:
if type(cropping) is int:
left = right = cropping
elif type(cropping) is tuple:
if type(cropping[0]) is int:
left, right = cropping
elif type(cropping[0]) is tuple and len(cropping[0]) == 2:
left, right = cropping[0]
else:
raise ValueError("Unrecognized cropping option: %s" % (str(cropping)))
else:
raise ValueError("Unrecognized cropping option: %s" % (str(cropping)))
else:
if type(cropping) is int:
top = left = bottom = right = cropping
elif type(cropping) is tuple:
if type(cropping[0]) is int:
top, left = cropping
bottom, right = cropping
elif type(cropping[0]) is tuple:
top, bottom = cropping[0]
left, right = cropping[1]
else:
raise ValueError("Unrecognized cropping option: %s" % (str(cropping)))
else:
raise ValueError("Unrecognized cropping option: %s" % (str(cropping)))
# Now add the layer
builder.add_crop(name = layer,
left = left, right=right, top=top, bottom=bottom, offset = [0,0],
input_names = [input_name], output_name=output_name
) | python | def convert_cropping(builder, layer, input_names, output_names, keras_layer):
"""
Convert padding layer from keras to coreml.
Keras only supports zero padding at this time.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
is_1d = isinstance(keras_layer, _keras.layers.Cropping1D)
cropping = keras_layer.cropping
top = left = bottom = right = 0
if is_1d:
if type(cropping) is int:
left = right = cropping
elif type(cropping) is tuple:
if type(cropping[0]) is int:
left, right = cropping
elif type(cropping[0]) is tuple and len(cropping[0]) == 2:
left, right = cropping[0]
else:
raise ValueError("Unrecognized cropping option: %s" % (str(cropping)))
else:
raise ValueError("Unrecognized cropping option: %s" % (str(cropping)))
else:
if type(cropping) is int:
top = left = bottom = right = cropping
elif type(cropping) is tuple:
if type(cropping[0]) is int:
top, left = cropping
bottom, right = cropping
elif type(cropping[0]) is tuple:
top, bottom = cropping[0]
left, right = cropping[1]
else:
raise ValueError("Unrecognized cropping option: %s" % (str(cropping)))
else:
raise ValueError("Unrecognized cropping option: %s" % (str(cropping)))
# Now add the layer
builder.add_crop(name = layer,
left = left, right=right, top=top, bottom=bottom, offset = [0,0],
input_names = [input_name], output_name=output_name
) | [
"def",
"convert_cropping",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"_check_data_format",
"(",
"keras_layer",
")",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"is_1d",
"=",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"Cropping1D",
")",
"cropping",
"=",
"keras_layer",
".",
"cropping",
"top",
"=",
"left",
"=",
"bottom",
"=",
"right",
"=",
"0",
"if",
"is_1d",
":",
"if",
"type",
"(",
"cropping",
")",
"is",
"int",
":",
"left",
"=",
"right",
"=",
"cropping",
"elif",
"type",
"(",
"cropping",
")",
"is",
"tuple",
":",
"if",
"type",
"(",
"cropping",
"[",
"0",
"]",
")",
"is",
"int",
":",
"left",
",",
"right",
"=",
"cropping",
"elif",
"type",
"(",
"cropping",
"[",
"0",
"]",
")",
"is",
"tuple",
"and",
"len",
"(",
"cropping",
"[",
"0",
"]",
")",
"==",
"2",
":",
"left",
",",
"right",
"=",
"cropping",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized cropping option: %s\"",
"%",
"(",
"str",
"(",
"cropping",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized cropping option: %s\"",
"%",
"(",
"str",
"(",
"cropping",
")",
")",
")",
"else",
":",
"if",
"type",
"(",
"cropping",
")",
"is",
"int",
":",
"top",
"=",
"left",
"=",
"bottom",
"=",
"right",
"=",
"cropping",
"elif",
"type",
"(",
"cropping",
")",
"is",
"tuple",
":",
"if",
"type",
"(",
"cropping",
"[",
"0",
"]",
")",
"is",
"int",
":",
"top",
",",
"left",
"=",
"cropping",
"bottom",
",",
"right",
"=",
"cropping",
"elif",
"type",
"(",
"cropping",
"[",
"0",
"]",
")",
"is",
"tuple",
":",
"top",
",",
"bottom",
"=",
"cropping",
"[",
"0",
"]",
"left",
",",
"right",
"=",
"cropping",
"[",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized cropping option: %s\"",
"%",
"(",
"str",
"(",
"cropping",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized cropping option: %s\"",
"%",
"(",
"str",
"(",
"cropping",
")",
")",
")",
"# Now add the layer",
"builder",
".",
"add_crop",
"(",
"name",
"=",
"layer",
",",
"left",
"=",
"left",
",",
"right",
"=",
"right",
",",
"top",
"=",
"top",
",",
"bottom",
"=",
"bottom",
",",
"offset",
"=",
"[",
"0",
",",
"0",
"]",
",",
"input_names",
"=",
"[",
"input_name",
"]",
",",
"output_name",
"=",
"output_name",
")"
] | Convert padding layer from keras to coreml.
Keras only supports zero padding at this time.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"padding",
"layer",
"from",
"keras",
"to",
"coreml",
".",
"Keras",
"only",
"supports",
"zero",
"padding",
"at",
"this",
"time",
".",
"Parameters",
"----------",
"keras_layer",
":",
"layer",
"A",
"keras",
"layer",
"object",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L784-L835 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_upsample | def convert_upsample(builder, layer, input_names, output_names, keras_layer):
"""
Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
is_1d = isinstance(keras_layer, _keras.layers.UpSampling1D)
# Currently, we only support upsample of same dims
fh = fw = 1
if is_1d:
if type(keras_layer.size) is tuple and len(keras_layer.size) == 1:
fh, fw = 1, keras_layer.size[0]
elif type(keras_layer.size) is int:
fh, fw = 1, keras_layer.size
else:
raise ValueError("Unrecognized upsample factor format %s" % (str(keras_layer.size)))
else:
if type(keras_layer.size) is int:
fh = fw = keras_layer.size
elif len(keras_layer.size) == 2:
if keras_layer.size[0] != keras_layer.size[1]:
raise ValueError("Upsample with different rows and columns not supported.")
else:
fh = keras_layer.size[0]
fw = keras_layer.size[1]
else:
raise ValueError("Unrecognized upsample factor format %s" % (str(keras_layer.size)))
builder.add_upsample(name = layer,
scaling_factor_h = fh,
scaling_factor_w = fw,
input_name = input_name,
output_name = output_name) | python | def convert_upsample(builder, layer, input_names, output_names, keras_layer):
"""
Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
is_1d = isinstance(keras_layer, _keras.layers.UpSampling1D)
# Currently, we only support upsample of same dims
fh = fw = 1
if is_1d:
if type(keras_layer.size) is tuple and len(keras_layer.size) == 1:
fh, fw = 1, keras_layer.size[0]
elif type(keras_layer.size) is int:
fh, fw = 1, keras_layer.size
else:
raise ValueError("Unrecognized upsample factor format %s" % (str(keras_layer.size)))
else:
if type(keras_layer.size) is int:
fh = fw = keras_layer.size
elif len(keras_layer.size) == 2:
if keras_layer.size[0] != keras_layer.size[1]:
raise ValueError("Upsample with different rows and columns not supported.")
else:
fh = keras_layer.size[0]
fw = keras_layer.size[1]
else:
raise ValueError("Unrecognized upsample factor format %s" % (str(keras_layer.size)))
builder.add_upsample(name = layer,
scaling_factor_h = fh,
scaling_factor_w = fw,
input_name = input_name,
output_name = output_name) | [
"def",
"convert_upsample",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"_check_data_format",
"(",
"keras_layer",
")",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"is_1d",
"=",
"isinstance",
"(",
"keras_layer",
",",
"_keras",
".",
"layers",
".",
"UpSampling1D",
")",
"# Currently, we only support upsample of same dims",
"fh",
"=",
"fw",
"=",
"1",
"if",
"is_1d",
":",
"if",
"type",
"(",
"keras_layer",
".",
"size",
")",
"is",
"tuple",
"and",
"len",
"(",
"keras_layer",
".",
"size",
")",
"==",
"1",
":",
"fh",
",",
"fw",
"=",
"1",
",",
"keras_layer",
".",
"size",
"[",
"0",
"]",
"elif",
"type",
"(",
"keras_layer",
".",
"size",
")",
"is",
"int",
":",
"fh",
",",
"fw",
"=",
"1",
",",
"keras_layer",
".",
"size",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized upsample factor format %s\"",
"%",
"(",
"str",
"(",
"keras_layer",
".",
"size",
")",
")",
")",
"else",
":",
"if",
"type",
"(",
"keras_layer",
".",
"size",
")",
"is",
"int",
":",
"fh",
"=",
"fw",
"=",
"keras_layer",
".",
"size",
"elif",
"len",
"(",
"keras_layer",
".",
"size",
")",
"==",
"2",
":",
"if",
"keras_layer",
".",
"size",
"[",
"0",
"]",
"!=",
"keras_layer",
".",
"size",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"Upsample with different rows and columns not supported.\"",
")",
"else",
":",
"fh",
"=",
"keras_layer",
".",
"size",
"[",
"0",
"]",
"fw",
"=",
"keras_layer",
".",
"size",
"[",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized upsample factor format %s\"",
"%",
"(",
"str",
"(",
"keras_layer",
".",
"size",
")",
")",
")",
"builder",
".",
"add_upsample",
"(",
"name",
"=",
"layer",
",",
"scaling_factor_h",
"=",
"fh",
",",
"scaling_factor_w",
"=",
"fw",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
")"
] | Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"convolution",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L837-L880 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_permute | def convert_permute(builder, layer, input_names, output_names, keras_layer):
"""
Convert a softmax layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = (input_names[0], output_names[0])
keras_dims = keras_layer.dims
# Keras permute layer index begins at 1
if len(keras_dims) == 3:
# Keras input tensor interpret as (H,W,C)
x = list(_np.array(keras_dims))
arr = [2, 3, 1] # HWC in Keras
arr_permuted = [arr[x[0] - 1], arr[x[1] - 1], arr[x[2] - 1]]
arr_permuted = [arr_permuted[2], arr_permuted[0], arr_permuted[1]] # coreml format: channel first
# add a sequence axis
dim = [0] + arr_permuted
dim = tuple(dim)
elif len(keras_dims) == 4:
# Here we use Keras converter as a place holder for inserting
# permutations - the values here are not valid Keras dim parameters
# but parameters we need to use to convert to CoreML model
dim = keras_dims
else:
raise NotImplementedError('Supports only 3d permutation.')
builder.add_permute(name = layer, dim=dim, input_name = input_name,
output_name = output_name) | python | def convert_permute(builder, layer, input_names, output_names, keras_layer):
"""
Convert a softmax layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = (input_names[0], output_names[0])
keras_dims = keras_layer.dims
# Keras permute layer index begins at 1
if len(keras_dims) == 3:
# Keras input tensor interpret as (H,W,C)
x = list(_np.array(keras_dims))
arr = [2, 3, 1] # HWC in Keras
arr_permuted = [arr[x[0] - 1], arr[x[1] - 1], arr[x[2] - 1]]
arr_permuted = [arr_permuted[2], arr_permuted[0], arr_permuted[1]] # coreml format: channel first
# add a sequence axis
dim = [0] + arr_permuted
dim = tuple(dim)
elif len(keras_dims) == 4:
# Here we use Keras converter as a place holder for inserting
# permutations - the values here are not valid Keras dim parameters
# but parameters we need to use to convert to CoreML model
dim = keras_dims
else:
raise NotImplementedError('Supports only 3d permutation.')
builder.add_permute(name = layer, dim=dim, input_name = input_name,
output_name = output_name) | [
"def",
"convert_permute",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"keras_dims",
"=",
"keras_layer",
".",
"dims",
"# Keras permute layer index begins at 1",
"if",
"len",
"(",
"keras_dims",
")",
"==",
"3",
":",
"# Keras input tensor interpret as (H,W,C)",
"x",
"=",
"list",
"(",
"_np",
".",
"array",
"(",
"keras_dims",
")",
")",
"arr",
"=",
"[",
"2",
",",
"3",
",",
"1",
"]",
"# HWC in Keras",
"arr_permuted",
"=",
"[",
"arr",
"[",
"x",
"[",
"0",
"]",
"-",
"1",
"]",
",",
"arr",
"[",
"x",
"[",
"1",
"]",
"-",
"1",
"]",
",",
"arr",
"[",
"x",
"[",
"2",
"]",
"-",
"1",
"]",
"]",
"arr_permuted",
"=",
"[",
"arr_permuted",
"[",
"2",
"]",
",",
"arr_permuted",
"[",
"0",
"]",
",",
"arr_permuted",
"[",
"1",
"]",
"]",
"# coreml format: channel first",
"# add a sequence axis",
"dim",
"=",
"[",
"0",
"]",
"+",
"arr_permuted",
"dim",
"=",
"tuple",
"(",
"dim",
")",
"elif",
"len",
"(",
"keras_dims",
")",
"==",
"4",
":",
"# Here we use Keras converter as a place holder for inserting",
"# permutations - the values here are not valid Keras dim parameters",
"# but parameters we need to use to convert to CoreML model",
"dim",
"=",
"keras_dims",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Supports only 3d permutation.'",
")",
"builder",
".",
"add_permute",
"(",
"name",
"=",
"layer",
",",
"dim",
"=",
"dim",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
")"
] | Convert a softmax layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"a",
"softmax",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L882-L916 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_simple_rnn | def convert_simple_rnn(builder, layer, input_names, output_names, keras_layer):
"""
Convert an SimpleRNN layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
hidden_size = keras_layer.units
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
W_h = _np.zeros((hidden_size, hidden_size))
W_x = _np.zeros((hidden_size, input_size))
b = None
implementation = keras_layer.implementation if hasattr(keras_layer,
'implementation') else 0
if implementation == 0:
W_h = keras_layer.get_weights()[1].T
W_x = keras_layer.get_weights()[0].T
if keras_layer.use_bias:
b = keras_layer.get_weights()[2]
# Set actication type
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_simple_rnn(
name = layer,
W_h = W_h, W_x = W_x, b = b,
hidden_size = hidden_size,
input_size = input_size,
activation = activation_str,
input_names = input_names,
output_names = output_names,
output_all=output_all,
reverse_input=reverse_input) | python | def convert_simple_rnn(builder, layer, input_names, output_names, keras_layer):
"""
Convert an SimpleRNN layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
hidden_size = keras_layer.units
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
W_h = _np.zeros((hidden_size, hidden_size))
W_x = _np.zeros((hidden_size, input_size))
b = None
implementation = keras_layer.implementation if hasattr(keras_layer,
'implementation') else 0
if implementation == 0:
W_h = keras_layer.get_weights()[1].T
W_x = keras_layer.get_weights()[0].T
if keras_layer.use_bias:
b = keras_layer.get_weights()[2]
# Set actication type
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_simple_rnn(
name = layer,
W_h = W_h, W_x = W_x, b = b,
hidden_size = hidden_size,
input_size = input_size,
activation = activation_str,
input_names = input_names,
output_names = output_names,
output_all=output_all,
reverse_input=reverse_input) | [
"def",
"convert_simple_rnn",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"# Get input and output names",
"hidden_size",
"=",
"keras_layer",
".",
"units",
"input_size",
"=",
"keras_layer",
".",
"input_shape",
"[",
"-",
"1",
"]",
"output_all",
"=",
"keras_layer",
".",
"return_sequences",
"reverse_input",
"=",
"keras_layer",
".",
"go_backwards",
"W_h",
"=",
"_np",
".",
"zeros",
"(",
"(",
"hidden_size",
",",
"hidden_size",
")",
")",
"W_x",
"=",
"_np",
".",
"zeros",
"(",
"(",
"hidden_size",
",",
"input_size",
")",
")",
"b",
"=",
"None",
"implementation",
"=",
"keras_layer",
".",
"implementation",
"if",
"hasattr",
"(",
"keras_layer",
",",
"'implementation'",
")",
"else",
"0",
"if",
"implementation",
"==",
"0",
":",
"W_h",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"1",
"]",
".",
"T",
"W_x",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"0",
"]",
".",
"T",
"if",
"keras_layer",
".",
"use_bias",
":",
"b",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"2",
"]",
"# Set actication type",
"activation_str",
"=",
"_get_recurrent_activation_name_from_keras",
"(",
"keras_layer",
".",
"activation",
")",
"# Add to the network",
"builder",
".",
"add_simple_rnn",
"(",
"name",
"=",
"layer",
",",
"W_h",
"=",
"W_h",
",",
"W_x",
"=",
"W_x",
",",
"b",
"=",
"b",
",",
"hidden_size",
"=",
"hidden_size",
",",
"input_size",
"=",
"input_size",
",",
"activation",
"=",
"activation_str",
",",
"input_names",
"=",
"input_names",
",",
"output_names",
"=",
"output_names",
",",
"output_all",
"=",
"output_all",
",",
"reverse_input",
"=",
"reverse_input",
")"
] | Convert an SimpleRNN layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"an",
"SimpleRNN",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L951-L995 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_lstm | def convert_lstm(builder, layer, input_names, output_names, keras_layer):
"""
Convert an LSTM layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
hidden_size = keras_layer.units
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
# Keras: [W_x, W_h, b] each in I F C O
# CoreML: I F O G; W_h and W_x are separated
W_h, W_x, b = ([], [], [])
keras_W_h = keras_layer.get_weights()[1].T
W_h.append(keras_W_h[0 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[1 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[3 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[2 * hidden_size:][:hidden_size])
keras_W_x = keras_layer.get_weights()[0].T
W_x.append(keras_W_x[0 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[1 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[3 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[2 * hidden_size:][:hidden_size])
if keras_layer.use_bias:
keras_b = keras_layer.get_weights()[2]
b.append(keras_b[0 * hidden_size:][:hidden_size])
b.append(keras_b[1 * hidden_size:][:hidden_size])
b.append(keras_b[3 * hidden_size:][:hidden_size])
b.append(keras_b[2 * hidden_size:][:hidden_size])
if len(b) == 0:
b = None
# Set activation type
inner_activation_str = _get_recurrent_activation_name_from_keras(keras_layer.recurrent_activation)
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_unilstm(
name = layer,
W_h = W_h, W_x = W_x, b = b,
hidden_size = hidden_size,
input_size = input_size,
input_names = input_names,
output_names = output_names,
inner_activation = inner_activation_str,
cell_state_update_activation = activation_str,
output_activation = activation_str,
output_all = output_all,
forget_bias = keras_layer.unit_forget_bias,
reverse_input = reverse_input) | python | def convert_lstm(builder, layer, input_names, output_names, keras_layer):
"""
Convert an LSTM layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
hidden_size = keras_layer.units
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
# Keras: [W_x, W_h, b] each in I F C O
# CoreML: I F O G; W_h and W_x are separated
W_h, W_x, b = ([], [], [])
keras_W_h = keras_layer.get_weights()[1].T
W_h.append(keras_W_h[0 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[1 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[3 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[2 * hidden_size:][:hidden_size])
keras_W_x = keras_layer.get_weights()[0].T
W_x.append(keras_W_x[0 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[1 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[3 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[2 * hidden_size:][:hidden_size])
if keras_layer.use_bias:
keras_b = keras_layer.get_weights()[2]
b.append(keras_b[0 * hidden_size:][:hidden_size])
b.append(keras_b[1 * hidden_size:][:hidden_size])
b.append(keras_b[3 * hidden_size:][:hidden_size])
b.append(keras_b[2 * hidden_size:][:hidden_size])
if len(b) == 0:
b = None
# Set activation type
inner_activation_str = _get_recurrent_activation_name_from_keras(keras_layer.recurrent_activation)
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_unilstm(
name = layer,
W_h = W_h, W_x = W_x, b = b,
hidden_size = hidden_size,
input_size = input_size,
input_names = input_names,
output_names = output_names,
inner_activation = inner_activation_str,
cell_state_update_activation = activation_str,
output_activation = activation_str,
output_all = output_all,
forget_bias = keras_layer.unit_forget_bias,
reverse_input = reverse_input) | [
"def",
"convert_lstm",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"hidden_size",
"=",
"keras_layer",
".",
"units",
"input_size",
"=",
"keras_layer",
".",
"input_shape",
"[",
"-",
"1",
"]",
"output_all",
"=",
"keras_layer",
".",
"return_sequences",
"reverse_input",
"=",
"keras_layer",
".",
"go_backwards",
"# Keras: [W_x, W_h, b] each in I F C O",
"# CoreML: I F O G; W_h and W_x are separated",
"W_h",
",",
"W_x",
",",
"b",
"=",
"(",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
")",
"keras_W_h",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"1",
"]",
".",
"T",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"3",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"keras_W_x",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"0",
"]",
".",
"T",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"3",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"if",
"keras_layer",
".",
"use_bias",
":",
"keras_b",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"2",
"]",
"b",
".",
"append",
"(",
"keras_b",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b",
".",
"append",
"(",
"keras_b",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b",
".",
"append",
"(",
"keras_b",
"[",
"3",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b",
".",
"append",
"(",
"keras_b",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"if",
"len",
"(",
"b",
")",
"==",
"0",
":",
"b",
"=",
"None",
"# Set activation type",
"inner_activation_str",
"=",
"_get_recurrent_activation_name_from_keras",
"(",
"keras_layer",
".",
"recurrent_activation",
")",
"activation_str",
"=",
"_get_recurrent_activation_name_from_keras",
"(",
"keras_layer",
".",
"activation",
")",
"# Add to the network",
"builder",
".",
"add_unilstm",
"(",
"name",
"=",
"layer",
",",
"W_h",
"=",
"W_h",
",",
"W_x",
"=",
"W_x",
",",
"b",
"=",
"b",
",",
"hidden_size",
"=",
"hidden_size",
",",
"input_size",
"=",
"input_size",
",",
"input_names",
"=",
"input_names",
",",
"output_names",
"=",
"output_names",
",",
"inner_activation",
"=",
"inner_activation_str",
",",
"cell_state_update_activation",
"=",
"activation_str",
",",
"output_activation",
"=",
"activation_str",
",",
"output_all",
"=",
"output_all",
",",
"forget_bias",
"=",
"keras_layer",
".",
"unit_forget_bias",
",",
"reverse_input",
"=",
"reverse_input",
")"
] | Convert an LSTM layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"an",
"LSTM",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L997-L1055 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_gru | def convert_gru(builder, layer, input_names, output_names, keras_layer):
"""
Convert a GRU layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
hidden_size = keras_layer.units
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
# Keras: Z R O
# CoreML: Z R O
W_h, W_x, b = ([], [], [])
keras_W_h = keras_layer.get_weights()[1].T
W_h.append(keras_W_h[0 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[1 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[2 * hidden_size:][:hidden_size])
keras_W_x = keras_layer.get_weights()[0].T
W_x.append(keras_W_x[0 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[1 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[2 * hidden_size:][:hidden_size])
if keras_layer.use_bias:
keras_b = keras_layer.get_weights()[2]
b.append(keras_b[0 * hidden_size:][:hidden_size])
b.append(keras_b[1 * hidden_size:][:hidden_size])
b.append(keras_b[2 * hidden_size:][:hidden_size])
if len(b) == 0:
b = None
# Set actication type
inner_activation_str = _get_recurrent_activation_name_from_keras(keras_layer.recurrent_activation)
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_gru(
name = layer,
W_h = W_h, W_x = W_x, b = b,
input_size = input_size,
hidden_size = hidden_size,
input_names = input_names,
output_names = output_names,
activation = activation_str,
inner_activation = inner_activation_str,
output_all = output_all,
reverse_input = reverse_input) | python | def convert_gru(builder, layer, input_names, output_names, keras_layer):
"""
Convert a GRU layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
hidden_size = keras_layer.units
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
# Keras: Z R O
# CoreML: Z R O
W_h, W_x, b = ([], [], [])
keras_W_h = keras_layer.get_weights()[1].T
W_h.append(keras_W_h[0 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[1 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[2 * hidden_size:][:hidden_size])
keras_W_x = keras_layer.get_weights()[0].T
W_x.append(keras_W_x[0 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[1 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[2 * hidden_size:][:hidden_size])
if keras_layer.use_bias:
keras_b = keras_layer.get_weights()[2]
b.append(keras_b[0 * hidden_size:][:hidden_size])
b.append(keras_b[1 * hidden_size:][:hidden_size])
b.append(keras_b[2 * hidden_size:][:hidden_size])
if len(b) == 0:
b = None
# Set actication type
inner_activation_str = _get_recurrent_activation_name_from_keras(keras_layer.recurrent_activation)
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_gru(
name = layer,
W_h = W_h, W_x = W_x, b = b,
input_size = input_size,
hidden_size = hidden_size,
input_names = input_names,
output_names = output_names,
activation = activation_str,
inner_activation = inner_activation_str,
output_all = output_all,
reverse_input = reverse_input) | [
"def",
"convert_gru",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"hidden_size",
"=",
"keras_layer",
".",
"units",
"input_size",
"=",
"keras_layer",
".",
"input_shape",
"[",
"-",
"1",
"]",
"output_all",
"=",
"keras_layer",
".",
"return_sequences",
"reverse_input",
"=",
"keras_layer",
".",
"go_backwards",
"# Keras: Z R O",
"# CoreML: Z R O",
"W_h",
",",
"W_x",
",",
"b",
"=",
"(",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
")",
"keras_W_h",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"1",
"]",
".",
"T",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"keras_W_x",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"0",
"]",
".",
"T",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"if",
"keras_layer",
".",
"use_bias",
":",
"keras_b",
"=",
"keras_layer",
".",
"get_weights",
"(",
")",
"[",
"2",
"]",
"b",
".",
"append",
"(",
"keras_b",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b",
".",
"append",
"(",
"keras_b",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b",
".",
"append",
"(",
"keras_b",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"if",
"len",
"(",
"b",
")",
"==",
"0",
":",
"b",
"=",
"None",
"# Set actication type",
"inner_activation_str",
"=",
"_get_recurrent_activation_name_from_keras",
"(",
"keras_layer",
".",
"recurrent_activation",
")",
"activation_str",
"=",
"_get_recurrent_activation_name_from_keras",
"(",
"keras_layer",
".",
"activation",
")",
"# Add to the network",
"builder",
".",
"add_gru",
"(",
"name",
"=",
"layer",
",",
"W_h",
"=",
"W_h",
",",
"W_x",
"=",
"W_x",
",",
"b",
"=",
"b",
",",
"input_size",
"=",
"input_size",
",",
"hidden_size",
"=",
"hidden_size",
",",
"input_names",
"=",
"input_names",
",",
"output_names",
"=",
"output_names",
",",
"activation",
"=",
"activation_str",
",",
"inner_activation",
"=",
"inner_activation_str",
",",
"output_all",
"=",
"output_all",
",",
"reverse_input",
"=",
"reverse_input",
")"
] | Convert a GRU layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"a",
"GRU",
"layer",
"from",
"keras",
"to",
"coreml",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L1057-L1112 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py | convert_bidirectional | def convert_bidirectional(builder, layer, input_names, output_names, keras_layer):
"""
Convert a bidirectional layer from keras to coreml.
Currently assumes the units are LSTMs.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_size = keras_layer.input_shape[-1]
lstm_layer = keras_layer.forward_layer
if (type(lstm_layer) != _keras.layers.recurrent.LSTM):
raise TypeError('Bidirectional layers only supported with LSTM')
if lstm_layer.go_backwards:
raise TypeError(' \'go_backwards\' mode not supported with Bidirectional layers')
output_all = keras_layer.return_sequences
hidden_size = lstm_layer.units
# Keras: I C F O; W_x, W_h, b
# CoreML: I F O G; W_h and W_x are separated
# Keras has all forward weights, followed by backward in the same order
W_h, W_x, b = ([], [], [])
keras_W_h = keras_layer.forward_layer.get_weights()[1].T
W_h.append(keras_W_h[0 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[1 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[3 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[2 * hidden_size:][:hidden_size])
keras_W_x = keras_layer.forward_layer.get_weights()[0].T
W_x.append(keras_W_x[0 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[1 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[3 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[2 * hidden_size:][:hidden_size])
if keras_layer.forward_layer.use_bias:
keras_b = keras_layer.forward_layer.get_weights()[2]
b.append(keras_b[0 * hidden_size:][:hidden_size])
b.append(keras_b[1 * hidden_size:][:hidden_size])
b.append(keras_b[3 * hidden_size:][:hidden_size])
b.append(keras_b[2 * hidden_size:][:hidden_size])
if len(b) == 0:
b = None
W_h_back, W_x_back, b_back = ([],[],[])
keras_W_h = keras_layer.backward_layer.get_weights()[1].T
W_h_back.append(keras_W_h[0 * hidden_size:][:hidden_size])
W_h_back.append(keras_W_h[1 * hidden_size:][:hidden_size])
W_h_back.append(keras_W_h[3 * hidden_size:][:hidden_size])
W_h_back.append(keras_W_h[2 * hidden_size:][:hidden_size])
keras_W_x = keras_layer.backward_layer.get_weights()[0].T
W_x_back.append(keras_W_x[0 * hidden_size:][:hidden_size])
W_x_back.append(keras_W_x[1 * hidden_size:][:hidden_size])
W_x_back.append(keras_W_x[3 * hidden_size:][:hidden_size])
W_x_back.append(keras_W_x[2 * hidden_size:][:hidden_size])
if keras_layer.backward_layer.use_bias:
keras_b = keras_layer.backward_layer.get_weights()[2]
b_back.append(keras_b[0 * hidden_size:][:hidden_size])
b_back.append(keras_b[1 * hidden_size:][:hidden_size])
b_back.append(keras_b[3 * hidden_size:][:hidden_size])
b_back.append(keras_b[2 * hidden_size:][:hidden_size])
if len(b_back) == 0:
b_back = None
if (b == None and b_back != None) or (b != None and b_back == None):
raise ValueError('Unsupported Bi-directional LSTM configuration. Bias must be enabled/disabled for both directions.')
# Set activation type
inner_activation_str = _get_recurrent_activation_name_from_keras(lstm_layer.recurrent_activation)
activation_str = _get_recurrent_activation_name_from_keras(lstm_layer.activation)
output_name_1 = output_names[0]
if hasattr(keras_layer, 'merge_mode'):
merge_mode = keras_layer.merge_mode
if merge_mode not in ['concat','sum','mul','ave']:
raise NotImplementedError('merge_mode \'%s\' in Bidirectional LSTM not supported currently' % merge_mode)
if merge_mode != 'concat':
output_name_1 += '_concatenated_bilstm_output'
# Add to the network
builder.add_bidirlstm(
name = layer,
W_h = W_h, W_x = W_x, b = b,
W_h_back = W_h_back, W_x_back = W_x_back, b_back = b_back,
hidden_size=hidden_size,
input_size=input_size,
input_names=input_names,
output_names=[output_name_1] + output_names[1:],
inner_activation = inner_activation_str,
cell_state_update_activation = activation_str,
output_activation = activation_str,
forget_bias = lstm_layer.unit_forget_bias,
output_all = output_all)
if output_name_1 != output_names[0]:
mode = 'CONCAT'
if merge_mode == 'sum':
mode = 'ADD'
elif merge_mode == 'ave':
mode = 'AVE'
elif merge_mode == 'mul':
mode = 'MULTIPLY'
builder.add_split(name = layer + '_split',
input_name= output_name_1,
output_names= [output_names[0] + '_forward', output_names[0] + '_backward'])
builder.add_elementwise(name = layer + '_elementwise',
input_names = [output_names[0] + '_forward', output_names[0] + '_backward'],
output_name = output_names[0],
mode = mode) | python | def convert_bidirectional(builder, layer, input_names, output_names, keras_layer):
"""
Convert a bidirectional layer from keras to coreml.
Currently assumes the units are LSTMs.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_size = keras_layer.input_shape[-1]
lstm_layer = keras_layer.forward_layer
if (type(lstm_layer) != _keras.layers.recurrent.LSTM):
raise TypeError('Bidirectional layers only supported with LSTM')
if lstm_layer.go_backwards:
raise TypeError(' \'go_backwards\' mode not supported with Bidirectional layers')
output_all = keras_layer.return_sequences
hidden_size = lstm_layer.units
# Keras: I C F O; W_x, W_h, b
# CoreML: I F O G; W_h and W_x are separated
# Keras has all forward weights, followed by backward in the same order
W_h, W_x, b = ([], [], [])
keras_W_h = keras_layer.forward_layer.get_weights()[1].T
W_h.append(keras_W_h[0 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[1 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[3 * hidden_size:][:hidden_size])
W_h.append(keras_W_h[2 * hidden_size:][:hidden_size])
keras_W_x = keras_layer.forward_layer.get_weights()[0].T
W_x.append(keras_W_x[0 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[1 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[3 * hidden_size:][:hidden_size])
W_x.append(keras_W_x[2 * hidden_size:][:hidden_size])
if keras_layer.forward_layer.use_bias:
keras_b = keras_layer.forward_layer.get_weights()[2]
b.append(keras_b[0 * hidden_size:][:hidden_size])
b.append(keras_b[1 * hidden_size:][:hidden_size])
b.append(keras_b[3 * hidden_size:][:hidden_size])
b.append(keras_b[2 * hidden_size:][:hidden_size])
if len(b) == 0:
b = None
W_h_back, W_x_back, b_back = ([],[],[])
keras_W_h = keras_layer.backward_layer.get_weights()[1].T
W_h_back.append(keras_W_h[0 * hidden_size:][:hidden_size])
W_h_back.append(keras_W_h[1 * hidden_size:][:hidden_size])
W_h_back.append(keras_W_h[3 * hidden_size:][:hidden_size])
W_h_back.append(keras_W_h[2 * hidden_size:][:hidden_size])
keras_W_x = keras_layer.backward_layer.get_weights()[0].T
W_x_back.append(keras_W_x[0 * hidden_size:][:hidden_size])
W_x_back.append(keras_W_x[1 * hidden_size:][:hidden_size])
W_x_back.append(keras_W_x[3 * hidden_size:][:hidden_size])
W_x_back.append(keras_W_x[2 * hidden_size:][:hidden_size])
if keras_layer.backward_layer.use_bias:
keras_b = keras_layer.backward_layer.get_weights()[2]
b_back.append(keras_b[0 * hidden_size:][:hidden_size])
b_back.append(keras_b[1 * hidden_size:][:hidden_size])
b_back.append(keras_b[3 * hidden_size:][:hidden_size])
b_back.append(keras_b[2 * hidden_size:][:hidden_size])
if len(b_back) == 0:
b_back = None
if (b == None and b_back != None) or (b != None and b_back == None):
raise ValueError('Unsupported Bi-directional LSTM configuration. Bias must be enabled/disabled for both directions.')
# Set activation type
inner_activation_str = _get_recurrent_activation_name_from_keras(lstm_layer.recurrent_activation)
activation_str = _get_recurrent_activation_name_from_keras(lstm_layer.activation)
output_name_1 = output_names[0]
if hasattr(keras_layer, 'merge_mode'):
merge_mode = keras_layer.merge_mode
if merge_mode not in ['concat','sum','mul','ave']:
raise NotImplementedError('merge_mode \'%s\' in Bidirectional LSTM not supported currently' % merge_mode)
if merge_mode != 'concat':
output_name_1 += '_concatenated_bilstm_output'
# Add to the network
builder.add_bidirlstm(
name = layer,
W_h = W_h, W_x = W_x, b = b,
W_h_back = W_h_back, W_x_back = W_x_back, b_back = b_back,
hidden_size=hidden_size,
input_size=input_size,
input_names=input_names,
output_names=[output_name_1] + output_names[1:],
inner_activation = inner_activation_str,
cell_state_update_activation = activation_str,
output_activation = activation_str,
forget_bias = lstm_layer.unit_forget_bias,
output_all = output_all)
if output_name_1 != output_names[0]:
mode = 'CONCAT'
if merge_mode == 'sum':
mode = 'ADD'
elif merge_mode == 'ave':
mode = 'AVE'
elif merge_mode == 'mul':
mode = 'MULTIPLY'
builder.add_split(name = layer + '_split',
input_name= output_name_1,
output_names= [output_names[0] + '_forward', output_names[0] + '_backward'])
builder.add_elementwise(name = layer + '_elementwise',
input_names = [output_names[0] + '_forward', output_names[0] + '_backward'],
output_name = output_names[0],
mode = mode) | [
"def",
"convert_bidirectional",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"input_size",
"=",
"keras_layer",
".",
"input_shape",
"[",
"-",
"1",
"]",
"lstm_layer",
"=",
"keras_layer",
".",
"forward_layer",
"if",
"(",
"type",
"(",
"lstm_layer",
")",
"!=",
"_keras",
".",
"layers",
".",
"recurrent",
".",
"LSTM",
")",
":",
"raise",
"TypeError",
"(",
"'Bidirectional layers only supported with LSTM'",
")",
"if",
"lstm_layer",
".",
"go_backwards",
":",
"raise",
"TypeError",
"(",
"' \\'go_backwards\\' mode not supported with Bidirectional layers'",
")",
"output_all",
"=",
"keras_layer",
".",
"return_sequences",
"hidden_size",
"=",
"lstm_layer",
".",
"units",
"# Keras: I C F O; W_x, W_h, b",
"# CoreML: I F O G; W_h and W_x are separated",
"# Keras has all forward weights, followed by backward in the same order",
"W_h",
",",
"W_x",
",",
"b",
"=",
"(",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
")",
"keras_W_h",
"=",
"keras_layer",
".",
"forward_layer",
".",
"get_weights",
"(",
")",
"[",
"1",
"]",
".",
"T",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"3",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h",
".",
"append",
"(",
"keras_W_h",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"keras_W_x",
"=",
"keras_layer",
".",
"forward_layer",
".",
"get_weights",
"(",
")",
"[",
"0",
"]",
".",
"T",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"3",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x",
".",
"append",
"(",
"keras_W_x",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"if",
"keras_layer",
".",
"forward_layer",
".",
"use_bias",
":",
"keras_b",
"=",
"keras_layer",
".",
"forward_layer",
".",
"get_weights",
"(",
")",
"[",
"2",
"]",
"b",
".",
"append",
"(",
"keras_b",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b",
".",
"append",
"(",
"keras_b",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b",
".",
"append",
"(",
"keras_b",
"[",
"3",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b",
".",
"append",
"(",
"keras_b",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"if",
"len",
"(",
"b",
")",
"==",
"0",
":",
"b",
"=",
"None",
"W_h_back",
",",
"W_x_back",
",",
"b_back",
"=",
"(",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
")",
"keras_W_h",
"=",
"keras_layer",
".",
"backward_layer",
".",
"get_weights",
"(",
")",
"[",
"1",
"]",
".",
"T",
"W_h_back",
".",
"append",
"(",
"keras_W_h",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h_back",
".",
"append",
"(",
"keras_W_h",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h_back",
".",
"append",
"(",
"keras_W_h",
"[",
"3",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_h_back",
".",
"append",
"(",
"keras_W_h",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"keras_W_x",
"=",
"keras_layer",
".",
"backward_layer",
".",
"get_weights",
"(",
")",
"[",
"0",
"]",
".",
"T",
"W_x_back",
".",
"append",
"(",
"keras_W_x",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x_back",
".",
"append",
"(",
"keras_W_x",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x_back",
".",
"append",
"(",
"keras_W_x",
"[",
"3",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"W_x_back",
".",
"append",
"(",
"keras_W_x",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"if",
"keras_layer",
".",
"backward_layer",
".",
"use_bias",
":",
"keras_b",
"=",
"keras_layer",
".",
"backward_layer",
".",
"get_weights",
"(",
")",
"[",
"2",
"]",
"b_back",
".",
"append",
"(",
"keras_b",
"[",
"0",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b_back",
".",
"append",
"(",
"keras_b",
"[",
"1",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b_back",
".",
"append",
"(",
"keras_b",
"[",
"3",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"b_back",
".",
"append",
"(",
"keras_b",
"[",
"2",
"*",
"hidden_size",
":",
"]",
"[",
":",
"hidden_size",
"]",
")",
"if",
"len",
"(",
"b_back",
")",
"==",
"0",
":",
"b_back",
"=",
"None",
"if",
"(",
"b",
"==",
"None",
"and",
"b_back",
"!=",
"None",
")",
"or",
"(",
"b",
"!=",
"None",
"and",
"b_back",
"==",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'Unsupported Bi-directional LSTM configuration. Bias must be enabled/disabled for both directions.'",
")",
"# Set activation type",
"inner_activation_str",
"=",
"_get_recurrent_activation_name_from_keras",
"(",
"lstm_layer",
".",
"recurrent_activation",
")",
"activation_str",
"=",
"_get_recurrent_activation_name_from_keras",
"(",
"lstm_layer",
".",
"activation",
")",
"output_name_1",
"=",
"output_names",
"[",
"0",
"]",
"if",
"hasattr",
"(",
"keras_layer",
",",
"'merge_mode'",
")",
":",
"merge_mode",
"=",
"keras_layer",
".",
"merge_mode",
"if",
"merge_mode",
"not",
"in",
"[",
"'concat'",
",",
"'sum'",
",",
"'mul'",
",",
"'ave'",
"]",
":",
"raise",
"NotImplementedError",
"(",
"'merge_mode \\'%s\\' in Bidirectional LSTM not supported currently'",
"%",
"merge_mode",
")",
"if",
"merge_mode",
"!=",
"'concat'",
":",
"output_name_1",
"+=",
"'_concatenated_bilstm_output'",
"# Add to the network",
"builder",
".",
"add_bidirlstm",
"(",
"name",
"=",
"layer",
",",
"W_h",
"=",
"W_h",
",",
"W_x",
"=",
"W_x",
",",
"b",
"=",
"b",
",",
"W_h_back",
"=",
"W_h_back",
",",
"W_x_back",
"=",
"W_x_back",
",",
"b_back",
"=",
"b_back",
",",
"hidden_size",
"=",
"hidden_size",
",",
"input_size",
"=",
"input_size",
",",
"input_names",
"=",
"input_names",
",",
"output_names",
"=",
"[",
"output_name_1",
"]",
"+",
"output_names",
"[",
"1",
":",
"]",
",",
"inner_activation",
"=",
"inner_activation_str",
",",
"cell_state_update_activation",
"=",
"activation_str",
",",
"output_activation",
"=",
"activation_str",
",",
"forget_bias",
"=",
"lstm_layer",
".",
"unit_forget_bias",
",",
"output_all",
"=",
"output_all",
")",
"if",
"output_name_1",
"!=",
"output_names",
"[",
"0",
"]",
":",
"mode",
"=",
"'CONCAT'",
"if",
"merge_mode",
"==",
"'sum'",
":",
"mode",
"=",
"'ADD'",
"elif",
"merge_mode",
"==",
"'ave'",
":",
"mode",
"=",
"'AVE'",
"elif",
"merge_mode",
"==",
"'mul'",
":",
"mode",
"=",
"'MULTIPLY'",
"builder",
".",
"add_split",
"(",
"name",
"=",
"layer",
"+",
"'_split'",
",",
"input_name",
"=",
"output_name_1",
",",
"output_names",
"=",
"[",
"output_names",
"[",
"0",
"]",
"+",
"'_forward'",
",",
"output_names",
"[",
"0",
"]",
"+",
"'_backward'",
"]",
")",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"layer",
"+",
"'_elementwise'",
",",
"input_names",
"=",
"[",
"output_names",
"[",
"0",
"]",
"+",
"'_forward'",
",",
"output_names",
"[",
"0",
"]",
"+",
"'_backward'",
"]",
",",
"output_name",
"=",
"output_names",
"[",
"0",
"]",
",",
"mode",
"=",
"mode",
")"
] | Convert a bidirectional layer from keras to coreml.
Currently assumes the units are LSTMs.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | [
"Convert",
"a",
"bidirectional",
"layer",
"from",
"keras",
"to",
"coreml",
".",
"Currently",
"assumes",
"the",
"units",
"are",
"LSTMs",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L1114-L1232 | train |
apple/turicreate | src/unity/python/turicreate/meta/decompiler/simple_instructions.py | SimpleInstructions.SLICE_0 | def SLICE_0(self, instr):
'obj[:]'
value = self.ast_stack.pop()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=None, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Load(), **kw)
self.ast_stack.append(subscr) | python | def SLICE_0(self, instr):
'obj[:]'
value = self.ast_stack.pop()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=None, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Load(), **kw)
self.ast_stack.append(subscr) | [
"def",
"SLICE_0",
"(",
"self",
",",
"instr",
")",
":",
"value",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"kw",
"=",
"dict",
"(",
"lineno",
"=",
"instr",
".",
"lineno",
",",
"col_offset",
"=",
"0",
")",
"slice",
"=",
"_ast",
".",
"Slice",
"(",
"lower",
"=",
"None",
",",
"step",
"=",
"None",
",",
"upper",
"=",
"None",
",",
"*",
"*",
"kw",
")",
"subscr",
"=",
"_ast",
".",
"Subscript",
"(",
"value",
"=",
"value",
",",
"slice",
"=",
"slice",
",",
"ctx",
"=",
"_ast",
".",
"Load",
"(",
")",
",",
"*",
"*",
"kw",
")",
"self",
".",
"ast_stack",
".",
"append",
"(",
"subscr",
")"
] | obj[:] | [
"obj",
"[",
":",
"]"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/simple_instructions.py#L723-L731 | train |
apple/turicreate | src/unity/python/turicreate/meta/decompiler/simple_instructions.py | SimpleInstructions.STORE_SLICE_1 | def STORE_SLICE_1(self, instr):
'obj[lower:] = expr'
lower = self.ast_stack.pop()
value = self.ast_stack.pop()
expr = self.ast_stack.pop()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Store(), **kw)
assign = _ast.Assign(targets=[subscr], value=expr, **kw)
self.ast_stack.append(assign) | python | def STORE_SLICE_1(self, instr):
'obj[lower:] = expr'
lower = self.ast_stack.pop()
value = self.ast_stack.pop()
expr = self.ast_stack.pop()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Store(), **kw)
assign = _ast.Assign(targets=[subscr], value=expr, **kw)
self.ast_stack.append(assign) | [
"def",
"STORE_SLICE_1",
"(",
"self",
",",
"instr",
")",
":",
"lower",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"value",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"expr",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"kw",
"=",
"dict",
"(",
"lineno",
"=",
"instr",
".",
"lineno",
",",
"col_offset",
"=",
"0",
")",
"slice",
"=",
"_ast",
".",
"Slice",
"(",
"lower",
"=",
"lower",
",",
"step",
"=",
"None",
",",
"upper",
"=",
"None",
",",
"*",
"*",
"kw",
")",
"subscr",
"=",
"_ast",
".",
"Subscript",
"(",
"value",
"=",
"value",
",",
"slice",
"=",
"slice",
",",
"ctx",
"=",
"_ast",
".",
"Store",
"(",
")",
",",
"*",
"*",
"kw",
")",
"assign",
"=",
"_ast",
".",
"Assign",
"(",
"targets",
"=",
"[",
"subscr",
"]",
",",
"value",
"=",
"expr",
",",
"*",
"*",
"kw",
")",
"self",
".",
"ast_stack",
".",
"append",
"(",
"assign",
")"
] | obj[lower:] = expr | [
"obj",
"[",
"lower",
":",
"]",
"=",
"expr"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/simple_instructions.py#L802-L813 | train |
apple/turicreate | src/unity/python/turicreate/meta/decompiler/simple_instructions.py | SimpleInstructions.STORE_SLICE_3 | def STORE_SLICE_3(self, instr):
'obj[lower:upper] = expr'
upper = self.ast_stack.pop()
lower = self.ast_stack.pop()
value = self.ast_stack.pop()
expr = self.ast_stack.pop()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=None, upper=upper, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Store(), **kw)
if isinstance(expr, _ast.AugAssign):
assign = expr
result = cmp_ast(expr.target, subscr)
assert result
else:
assign = _ast.Assign(targets=[subscr], value=expr, **kw)
self.ast_stack.append(assign) | python | def STORE_SLICE_3(self, instr):
'obj[lower:upper] = expr'
upper = self.ast_stack.pop()
lower = self.ast_stack.pop()
value = self.ast_stack.pop()
expr = self.ast_stack.pop()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=None, upper=upper, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Store(), **kw)
if isinstance(expr, _ast.AugAssign):
assign = expr
result = cmp_ast(expr.target, subscr)
assert result
else:
assign = _ast.Assign(targets=[subscr], value=expr, **kw)
self.ast_stack.append(assign) | [
"def",
"STORE_SLICE_3",
"(",
"self",
",",
"instr",
")",
":",
"upper",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"lower",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"value",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"expr",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"kw",
"=",
"dict",
"(",
"lineno",
"=",
"instr",
".",
"lineno",
",",
"col_offset",
"=",
"0",
")",
"slice",
"=",
"_ast",
".",
"Slice",
"(",
"lower",
"=",
"lower",
",",
"step",
"=",
"None",
",",
"upper",
"=",
"upper",
",",
"*",
"*",
"kw",
")",
"subscr",
"=",
"_ast",
".",
"Subscript",
"(",
"value",
"=",
"value",
",",
"slice",
"=",
"slice",
",",
"ctx",
"=",
"_ast",
".",
"Store",
"(",
")",
",",
"*",
"*",
"kw",
")",
"if",
"isinstance",
"(",
"expr",
",",
"_ast",
".",
"AugAssign",
")",
":",
"assign",
"=",
"expr",
"result",
"=",
"cmp_ast",
"(",
"expr",
".",
"target",
",",
"subscr",
")",
"assert",
"result",
"else",
":",
"assign",
"=",
"_ast",
".",
"Assign",
"(",
"targets",
"=",
"[",
"subscr",
"]",
",",
"value",
"=",
"expr",
",",
"*",
"*",
"kw",
")",
"self",
".",
"ast_stack",
".",
"append",
"(",
"assign",
")"
] | obj[lower:upper] = expr | [
"obj",
"[",
"lower",
":",
"upper",
"]",
"=",
"expr"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/simple_instructions.py#L829-L849 | train |
apple/turicreate | src/unity/python/turicreate/meta/decompiler/simple_instructions.py | SimpleInstructions.DELETE_SLICE_0 | def DELETE_SLICE_0(self, instr):
'obj[:] = expr'
value = self.ast_stack.pop()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=None, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Del(), **kw)
delete = _ast.Delete(targets=[subscr], **kw)
self.ast_stack.append(delete) | python | def DELETE_SLICE_0(self, instr):
'obj[:] = expr'
value = self.ast_stack.pop()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=None, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Del(), **kw)
delete = _ast.Delete(targets=[subscr], **kw)
self.ast_stack.append(delete) | [
"def",
"DELETE_SLICE_0",
"(",
"self",
",",
"instr",
")",
":",
"value",
"=",
"self",
".",
"ast_stack",
".",
"pop",
"(",
")",
"kw",
"=",
"dict",
"(",
"lineno",
"=",
"instr",
".",
"lineno",
",",
"col_offset",
"=",
"0",
")",
"slice",
"=",
"_ast",
".",
"Slice",
"(",
"lower",
"=",
"None",
",",
"step",
"=",
"None",
",",
"upper",
"=",
"None",
",",
"*",
"*",
"kw",
")",
"subscr",
"=",
"_ast",
".",
"Subscript",
"(",
"value",
"=",
"value",
",",
"slice",
"=",
"slice",
",",
"ctx",
"=",
"_ast",
".",
"Del",
"(",
")",
",",
"*",
"*",
"kw",
")",
"delete",
"=",
"_ast",
".",
"Delete",
"(",
"targets",
"=",
"[",
"subscr",
"]",
",",
"*",
"*",
"kw",
")",
"self",
".",
"ast_stack",
".",
"append",
"(",
"delete",
")"
] | obj[:] = expr | [
"obj",
"[",
":",
"]",
"=",
"expr"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/decompiler/simple_instructions.py#L851-L860 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/item_content_recommender.py | create | def create(item_data, item_id,
observation_data = None,
user_id = None, target = None,
weights = 'auto',
similarity_metrics = 'auto',
item_data_transform = 'auto',
max_item_neighborhood_size = 64, verbose=True):
"""Create a content-based recommender model in which the similarity
between the items recommended is determined by the content of
those items rather than learned from user interaction data.
The similarity score between two items is calculated by first
computing the similarity between the item data for each column,
then taking a weighted average of the per-column similarities to
get the final similarity. The recommendations are generated
according to the average similarity of a candidate item to all the
items in a user's set of rated items.
Parameters
----------
item_data : SFrame
An SFrame giving the content of the items to use to learn the
structure of similar items. The SFrame must have one column
that matches the name of the `item_id`; this gives a unique
identifier that can then be used to make recommendations. The rest
of the columns are then used in the distance calculations
below.
item_id : string
The name of the column in item_data (and `observation_data`,
if given) that represents the item ID.
observation_data : None (optional)
An SFrame giving user and item interaction data. This
information is stored in the model, and the recommender will
recommend the items with the most similar content to the
items that were present and/or highly rated for that user.
user_id : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the user identifier.
target : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the target or rating.
weights : dict or 'auto' (optional)
If given, then weights must be a dictionary of column names
present in item_data to weights between the column names. If
'auto' is given, the all columns are weighted equally.
max_item_neighborhood_size : int, 64
For each item, we hold this many similar items to use when
aggregating models for predictions. Decreasing this value
decreases the memory required by the model and decreases the
time required to generate recommendations, but it may also
decrease recommendation accuracy.
verbose : True or False (optional)
If set to False, then less information is printed.
Examples
--------
>>> item_data = tc.SFrame({"my_item_id" : range(4),
"data_1" : [ [1, 0], [1, 0], [0, 1], [0.5, 0.5] ],
"data_2" : [ [0, 1], [1, 0], [0, 1], [0.5, 0.5] ] })
>>> m = tc.recommender.item_content_recommender.create(item_data, "my_item_id")
>>> m.recommend_from_interactions([0])
Columns:
my_item_id int
score float
rank int
Rows: 3
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 1 | 0.5 | 2 |
| 2 | 0.5 | 3 |
+------------+----------------+------+
[3 rows x 3 columns]
>>> m.recommend_from_interactions([0, 1])
Columns:
my_item_id int
score float
rank int
Rows: 2
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 2 | 0.25 | 2 |
+------------+----------------+------+
[2 rows x 3 columns]
"""
from turicreate._cython.cy_server import QuietProgress
# item_data is correct type
if not isinstance(item_data, _SFrame) or item_data.num_rows() == 0:
raise TypeError("`item_data` argument must be a non-empty SFrame giving item data to use for similarities.")
# Error checking on column names
item_columns = set(item_data.column_names())
if item_id not in item_columns:
raise ValueError("Item column given as 'item_id = %s', but this is not found in `item_data` SFrame."
% item_id)
# Now, get the set ready to test for other argument issues.
item_columns.remove(item_id)
if weights != 'auto':
if type(weights) is not dict:
raise TypeError("`weights` parameter must be 'auto' or a dictionary of column "
"names in `item_data` to weight values.")
bad_columns = [col_name for col_name in item_columns if col_name not in item_columns]
if bad_columns:
raise ValueError("Columns %s given in weights, but these are not found in item_data."
% ', '.join(bad_columns))
# Now, set any columns not given in the weights column to be
# weight 0.
for col_name in item_columns:
weights.setdefault(col_name, 0)
################################################################################
# Now, check the feature transformer stuff.
# Pass it through a feature transformer.
if item_data_transform == 'auto':
item_data_transform = _turicreate.toolkits._feature_engineering.AutoVectorizer(excluded_features = [item_id])
if not isinstance(item_data_transform, _turicreate.toolkits._feature_engineering.TransformerBase):
raise TypeError("item_data_transform must be 'auto' or a valid feature_engineering transformer instance.")
# Transform the input data.
item_data = item_data_transform.fit_transform(item_data)
# Translate any string columns to actually work in nearest
# neighbors by making it a categorical list. Also translate lists
# into dicts, and normalize numeric columns.
gaussian_kernel_metrics = set()
for c in item_columns:
if item_data[c].dtype is str:
item_data[c] = item_data[c].apply(lambda s: {s : 1})
elif item_data[c].dtype in [float, int]:
item_data[c] = (item_data[c] - item_data[c].mean()) / max(item_data[c].std(), 1e-8)
gaussian_kernel_metrics.add(c)
if verbose:
print("Applying transform:")
print(item_data_transform)
opts = {}
model_proxy = _turicreate.extensions.item_content_recommender()
model_proxy.init_options(opts)
# The user_id is implicit if none is given.
if user_id is None:
user_id = "__implicit_user__"
normalization_factor = 1
# Set the observation data.
if observation_data is None:
# In this case, it's important to make this a string type. If
# the user column is not given, it may be given at recommend
# time, in which case it is cast to a string type and cast
# back if necessary.
empty_user = _turicreate.SArray([], dtype=str)
empty_item = _turicreate.SArray([], dtype=item_data[item_id].dtype)
observation_data = _turicreate.SFrame( {user_id : empty_user, item_id : empty_item} )
# Now, work out stuff for the observation_data component
normalization_factor = 1
# 1 for the item_id column.
if item_data.num_columns() >= 3:
if weights == "auto":
# TODO: automatically tune this.
weights = {col_name : 1 for col_name in item_data.column_names() if col_name != item_id}
# Use the abs value here in case users pass in weights with negative values.
normalization_factor = sum(abs(v) for v in weights.values())
if normalization_factor == 0:
raise ValueError("Weights cannot all be set to 0.")
distance = [([col_name], ("gaussian_kernel" if col_name in gaussian_kernel_metrics else "cosine"), weight)
for col_name, weight in weights.items()]
else:
distance = "cosine"
# Now, build the nearest neighbors model:
nn = _turicreate.nearest_neighbors.create(item_data, label=item_id, distance = distance, verbose = verbose)
graph = nn.query(item_data, label = item_id, k=max_item_neighborhood_size, verbose = verbose)
graph = graph.rename({"query_label" : item_id,
"reference_label" : "similar",
"distance" : "score"}, inplace=True)
def process_weights(x):
return max(-1, min(1, 1 - x / normalization_factor))
graph["score"] = graph["score"].apply(process_weights)
opts = {'user_id': user_id,
'item_id': item_id,
'target': target,
'similarity_type' : "cosine",
'max_item_neighborhood_size' : max_item_neighborhood_size}
user_data = _turicreate.SFrame()
extra_data = {"nearest_items" : graph}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return ItemContentRecommender(model_proxy) | python | def create(item_data, item_id,
observation_data = None,
user_id = None, target = None,
weights = 'auto',
similarity_metrics = 'auto',
item_data_transform = 'auto',
max_item_neighborhood_size = 64, verbose=True):
"""Create a content-based recommender model in which the similarity
between the items recommended is determined by the content of
those items rather than learned from user interaction data.
The similarity score between two items is calculated by first
computing the similarity between the item data for each column,
then taking a weighted average of the per-column similarities to
get the final similarity. The recommendations are generated
according to the average similarity of a candidate item to all the
items in a user's set of rated items.
Parameters
----------
item_data : SFrame
An SFrame giving the content of the items to use to learn the
structure of similar items. The SFrame must have one column
that matches the name of the `item_id`; this gives a unique
identifier that can then be used to make recommendations. The rest
of the columns are then used in the distance calculations
below.
item_id : string
The name of the column in item_data (and `observation_data`,
if given) that represents the item ID.
observation_data : None (optional)
An SFrame giving user and item interaction data. This
information is stored in the model, and the recommender will
recommend the items with the most similar content to the
items that were present and/or highly rated for that user.
user_id : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the user identifier.
target : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the target or rating.
weights : dict or 'auto' (optional)
If given, then weights must be a dictionary of column names
present in item_data to weights between the column names. If
'auto' is given, the all columns are weighted equally.
max_item_neighborhood_size : int, 64
For each item, we hold this many similar items to use when
aggregating models for predictions. Decreasing this value
decreases the memory required by the model and decreases the
time required to generate recommendations, but it may also
decrease recommendation accuracy.
verbose : True or False (optional)
If set to False, then less information is printed.
Examples
--------
>>> item_data = tc.SFrame({"my_item_id" : range(4),
"data_1" : [ [1, 0], [1, 0], [0, 1], [0.5, 0.5] ],
"data_2" : [ [0, 1], [1, 0], [0, 1], [0.5, 0.5] ] })
>>> m = tc.recommender.item_content_recommender.create(item_data, "my_item_id")
>>> m.recommend_from_interactions([0])
Columns:
my_item_id int
score float
rank int
Rows: 3
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 1 | 0.5 | 2 |
| 2 | 0.5 | 3 |
+------------+----------------+------+
[3 rows x 3 columns]
>>> m.recommend_from_interactions([0, 1])
Columns:
my_item_id int
score float
rank int
Rows: 2
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 2 | 0.25 | 2 |
+------------+----------------+------+
[2 rows x 3 columns]
"""
from turicreate._cython.cy_server import QuietProgress
# item_data is correct type
if not isinstance(item_data, _SFrame) or item_data.num_rows() == 0:
raise TypeError("`item_data` argument must be a non-empty SFrame giving item data to use for similarities.")
# Error checking on column names
item_columns = set(item_data.column_names())
if item_id not in item_columns:
raise ValueError("Item column given as 'item_id = %s', but this is not found in `item_data` SFrame."
% item_id)
# Now, get the set ready to test for other argument issues.
item_columns.remove(item_id)
if weights != 'auto':
if type(weights) is not dict:
raise TypeError("`weights` parameter must be 'auto' or a dictionary of column "
"names in `item_data` to weight values.")
bad_columns = [col_name for col_name in item_columns if col_name not in item_columns]
if bad_columns:
raise ValueError("Columns %s given in weights, but these are not found in item_data."
% ', '.join(bad_columns))
# Now, set any columns not given in the weights column to be
# weight 0.
for col_name in item_columns:
weights.setdefault(col_name, 0)
################################################################################
# Now, check the feature transformer stuff.
# Pass it through a feature transformer.
if item_data_transform == 'auto':
item_data_transform = _turicreate.toolkits._feature_engineering.AutoVectorizer(excluded_features = [item_id])
if not isinstance(item_data_transform, _turicreate.toolkits._feature_engineering.TransformerBase):
raise TypeError("item_data_transform must be 'auto' or a valid feature_engineering transformer instance.")
# Transform the input data.
item_data = item_data_transform.fit_transform(item_data)
# Translate any string columns to actually work in nearest
# neighbors by making it a categorical list. Also translate lists
# into dicts, and normalize numeric columns.
gaussian_kernel_metrics = set()
for c in item_columns:
if item_data[c].dtype is str:
item_data[c] = item_data[c].apply(lambda s: {s : 1})
elif item_data[c].dtype in [float, int]:
item_data[c] = (item_data[c] - item_data[c].mean()) / max(item_data[c].std(), 1e-8)
gaussian_kernel_metrics.add(c)
if verbose:
print("Applying transform:")
print(item_data_transform)
opts = {}
model_proxy = _turicreate.extensions.item_content_recommender()
model_proxy.init_options(opts)
# The user_id is implicit if none is given.
if user_id is None:
user_id = "__implicit_user__"
normalization_factor = 1
# Set the observation data.
if observation_data is None:
# In this case, it's important to make this a string type. If
# the user column is not given, it may be given at recommend
# time, in which case it is cast to a string type and cast
# back if necessary.
empty_user = _turicreate.SArray([], dtype=str)
empty_item = _turicreate.SArray([], dtype=item_data[item_id].dtype)
observation_data = _turicreate.SFrame( {user_id : empty_user, item_id : empty_item} )
# Now, work out stuff for the observation_data component
normalization_factor = 1
# 1 for the item_id column.
if item_data.num_columns() >= 3:
if weights == "auto":
# TODO: automatically tune this.
weights = {col_name : 1 for col_name in item_data.column_names() if col_name != item_id}
# Use the abs value here in case users pass in weights with negative values.
normalization_factor = sum(abs(v) for v in weights.values())
if normalization_factor == 0:
raise ValueError("Weights cannot all be set to 0.")
distance = [([col_name], ("gaussian_kernel" if col_name in gaussian_kernel_metrics else "cosine"), weight)
for col_name, weight in weights.items()]
else:
distance = "cosine"
# Now, build the nearest neighbors model:
nn = _turicreate.nearest_neighbors.create(item_data, label=item_id, distance = distance, verbose = verbose)
graph = nn.query(item_data, label = item_id, k=max_item_neighborhood_size, verbose = verbose)
graph = graph.rename({"query_label" : item_id,
"reference_label" : "similar",
"distance" : "score"}, inplace=True)
def process_weights(x):
return max(-1, min(1, 1 - x / normalization_factor))
graph["score"] = graph["score"].apply(process_weights)
opts = {'user_id': user_id,
'item_id': item_id,
'target': target,
'similarity_type' : "cosine",
'max_item_neighborhood_size' : max_item_neighborhood_size}
user_data = _turicreate.SFrame()
extra_data = {"nearest_items" : graph}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return ItemContentRecommender(model_proxy) | [
"def",
"create",
"(",
"item_data",
",",
"item_id",
",",
"observation_data",
"=",
"None",
",",
"user_id",
"=",
"None",
",",
"target",
"=",
"None",
",",
"weights",
"=",
"'auto'",
",",
"similarity_metrics",
"=",
"'auto'",
",",
"item_data_transform",
"=",
"'auto'",
",",
"max_item_neighborhood_size",
"=",
"64",
",",
"verbose",
"=",
"True",
")",
":",
"from",
"turicreate",
".",
"_cython",
".",
"cy_server",
"import",
"QuietProgress",
"# item_data is correct type",
"if",
"not",
"isinstance",
"(",
"item_data",
",",
"_SFrame",
")",
"or",
"item_data",
".",
"num_rows",
"(",
")",
"==",
"0",
":",
"raise",
"TypeError",
"(",
"\"`item_data` argument must be a non-empty SFrame giving item data to use for similarities.\"",
")",
"# Error checking on column names",
"item_columns",
"=",
"set",
"(",
"item_data",
".",
"column_names",
"(",
")",
")",
"if",
"item_id",
"not",
"in",
"item_columns",
":",
"raise",
"ValueError",
"(",
"\"Item column given as 'item_id = %s', but this is not found in `item_data` SFrame.\"",
"%",
"item_id",
")",
"# Now, get the set ready to test for other argument issues.",
"item_columns",
".",
"remove",
"(",
"item_id",
")",
"if",
"weights",
"!=",
"'auto'",
":",
"if",
"type",
"(",
"weights",
")",
"is",
"not",
"dict",
":",
"raise",
"TypeError",
"(",
"\"`weights` parameter must be 'auto' or a dictionary of column \"",
"\"names in `item_data` to weight values.\"",
")",
"bad_columns",
"=",
"[",
"col_name",
"for",
"col_name",
"in",
"item_columns",
"if",
"col_name",
"not",
"in",
"item_columns",
"]",
"if",
"bad_columns",
":",
"raise",
"ValueError",
"(",
"\"Columns %s given in weights, but these are not found in item_data.\"",
"%",
"', '",
".",
"join",
"(",
"bad_columns",
")",
")",
"# Now, set any columns not given in the weights column to be",
"# weight 0.",
"for",
"col_name",
"in",
"item_columns",
":",
"weights",
".",
"setdefault",
"(",
"col_name",
",",
"0",
")",
"################################################################################",
"# Now, check the feature transformer stuff.",
"# Pass it through a feature transformer.",
"if",
"item_data_transform",
"==",
"'auto'",
":",
"item_data_transform",
"=",
"_turicreate",
".",
"toolkits",
".",
"_feature_engineering",
".",
"AutoVectorizer",
"(",
"excluded_features",
"=",
"[",
"item_id",
"]",
")",
"if",
"not",
"isinstance",
"(",
"item_data_transform",
",",
"_turicreate",
".",
"toolkits",
".",
"_feature_engineering",
".",
"TransformerBase",
")",
":",
"raise",
"TypeError",
"(",
"\"item_data_transform must be 'auto' or a valid feature_engineering transformer instance.\"",
")",
"# Transform the input data.",
"item_data",
"=",
"item_data_transform",
".",
"fit_transform",
"(",
"item_data",
")",
"# Translate any string columns to actually work in nearest",
"# neighbors by making it a categorical list. Also translate lists",
"# into dicts, and normalize numeric columns.",
"gaussian_kernel_metrics",
"=",
"set",
"(",
")",
"for",
"c",
"in",
"item_columns",
":",
"if",
"item_data",
"[",
"c",
"]",
".",
"dtype",
"is",
"str",
":",
"item_data",
"[",
"c",
"]",
"=",
"item_data",
"[",
"c",
"]",
".",
"apply",
"(",
"lambda",
"s",
":",
"{",
"s",
":",
"1",
"}",
")",
"elif",
"item_data",
"[",
"c",
"]",
".",
"dtype",
"in",
"[",
"float",
",",
"int",
"]",
":",
"item_data",
"[",
"c",
"]",
"=",
"(",
"item_data",
"[",
"c",
"]",
"-",
"item_data",
"[",
"c",
"]",
".",
"mean",
"(",
")",
")",
"/",
"max",
"(",
"item_data",
"[",
"c",
"]",
".",
"std",
"(",
")",
",",
"1e-8",
")",
"gaussian_kernel_metrics",
".",
"add",
"(",
"c",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Applying transform:\"",
")",
"print",
"(",
"item_data_transform",
")",
"opts",
"=",
"{",
"}",
"model_proxy",
"=",
"_turicreate",
".",
"extensions",
".",
"item_content_recommender",
"(",
")",
"model_proxy",
".",
"init_options",
"(",
"opts",
")",
"# The user_id is implicit if none is given.",
"if",
"user_id",
"is",
"None",
":",
"user_id",
"=",
"\"__implicit_user__\"",
"normalization_factor",
"=",
"1",
"# Set the observation data.",
"if",
"observation_data",
"is",
"None",
":",
"# In this case, it's important to make this a string type. If",
"# the user column is not given, it may be given at recommend",
"# time, in which case it is cast to a string type and cast",
"# back if necessary.",
"empty_user",
"=",
"_turicreate",
".",
"SArray",
"(",
"[",
"]",
",",
"dtype",
"=",
"str",
")",
"empty_item",
"=",
"_turicreate",
".",
"SArray",
"(",
"[",
"]",
",",
"dtype",
"=",
"item_data",
"[",
"item_id",
"]",
".",
"dtype",
")",
"observation_data",
"=",
"_turicreate",
".",
"SFrame",
"(",
"{",
"user_id",
":",
"empty_user",
",",
"item_id",
":",
"empty_item",
"}",
")",
"# Now, work out stuff for the observation_data component",
"normalization_factor",
"=",
"1",
"# 1 for the item_id column.",
"if",
"item_data",
".",
"num_columns",
"(",
")",
">=",
"3",
":",
"if",
"weights",
"==",
"\"auto\"",
":",
"# TODO: automatically tune this.",
"weights",
"=",
"{",
"col_name",
":",
"1",
"for",
"col_name",
"in",
"item_data",
".",
"column_names",
"(",
")",
"if",
"col_name",
"!=",
"item_id",
"}",
"# Use the abs value here in case users pass in weights with negative values.",
"normalization_factor",
"=",
"sum",
"(",
"abs",
"(",
"v",
")",
"for",
"v",
"in",
"weights",
".",
"values",
"(",
")",
")",
"if",
"normalization_factor",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Weights cannot all be set to 0.\"",
")",
"distance",
"=",
"[",
"(",
"[",
"col_name",
"]",
",",
"(",
"\"gaussian_kernel\"",
"if",
"col_name",
"in",
"gaussian_kernel_metrics",
"else",
"\"cosine\"",
")",
",",
"weight",
")",
"for",
"col_name",
",",
"weight",
"in",
"weights",
".",
"items",
"(",
")",
"]",
"else",
":",
"distance",
"=",
"\"cosine\"",
"# Now, build the nearest neighbors model:",
"nn",
"=",
"_turicreate",
".",
"nearest_neighbors",
".",
"create",
"(",
"item_data",
",",
"label",
"=",
"item_id",
",",
"distance",
"=",
"distance",
",",
"verbose",
"=",
"verbose",
")",
"graph",
"=",
"nn",
".",
"query",
"(",
"item_data",
",",
"label",
"=",
"item_id",
",",
"k",
"=",
"max_item_neighborhood_size",
",",
"verbose",
"=",
"verbose",
")",
"graph",
"=",
"graph",
".",
"rename",
"(",
"{",
"\"query_label\"",
":",
"item_id",
",",
"\"reference_label\"",
":",
"\"similar\"",
",",
"\"distance\"",
":",
"\"score\"",
"}",
",",
"inplace",
"=",
"True",
")",
"def",
"process_weights",
"(",
"x",
")",
":",
"return",
"max",
"(",
"-",
"1",
",",
"min",
"(",
"1",
",",
"1",
"-",
"x",
"/",
"normalization_factor",
")",
")",
"graph",
"[",
"\"score\"",
"]",
"=",
"graph",
"[",
"\"score\"",
"]",
".",
"apply",
"(",
"process_weights",
")",
"opts",
"=",
"{",
"'user_id'",
":",
"user_id",
",",
"'item_id'",
":",
"item_id",
",",
"'target'",
":",
"target",
",",
"'similarity_type'",
":",
"\"cosine\"",
",",
"'max_item_neighborhood_size'",
":",
"max_item_neighborhood_size",
"}",
"user_data",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"extra_data",
"=",
"{",
"\"nearest_items\"",
":",
"graph",
"}",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"model_proxy",
".",
"train",
"(",
"observation_data",
",",
"user_data",
",",
"item_data",
",",
"opts",
",",
"extra_data",
")",
"return",
"ItemContentRecommender",
"(",
"model_proxy",
")"
] | Create a content-based recommender model in which the similarity
between the items recommended is determined by the content of
those items rather than learned from user interaction data.
The similarity score between two items is calculated by first
computing the similarity between the item data for each column,
then taking a weighted average of the per-column similarities to
get the final similarity. The recommendations are generated
according to the average similarity of a candidate item to all the
items in a user's set of rated items.
Parameters
----------
item_data : SFrame
An SFrame giving the content of the items to use to learn the
structure of similar items. The SFrame must have one column
that matches the name of the `item_id`; this gives a unique
identifier that can then be used to make recommendations. The rest
of the columns are then used in the distance calculations
below.
item_id : string
The name of the column in item_data (and `observation_data`,
if given) that represents the item ID.
observation_data : None (optional)
An SFrame giving user and item interaction data. This
information is stored in the model, and the recommender will
recommend the items with the most similar content to the
items that were present and/or highly rated for that user.
user_id : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the user identifier.
target : None (optional)
If observation_data is given, then this specifies the column
name corresponding to the target or rating.
weights : dict or 'auto' (optional)
If given, then weights must be a dictionary of column names
present in item_data to weights between the column names. If
'auto' is given, the all columns are weighted equally.
max_item_neighborhood_size : int, 64
For each item, we hold this many similar items to use when
aggregating models for predictions. Decreasing this value
decreases the memory required by the model and decreases the
time required to generate recommendations, but it may also
decrease recommendation accuracy.
verbose : True or False (optional)
If set to False, then less information is printed.
Examples
--------
>>> item_data = tc.SFrame({"my_item_id" : range(4),
"data_1" : [ [1, 0], [1, 0], [0, 1], [0.5, 0.5] ],
"data_2" : [ [0, 1], [1, 0], [0, 1], [0.5, 0.5] ] })
>>> m = tc.recommender.item_content_recommender.create(item_data, "my_item_id")
>>> m.recommend_from_interactions([0])
Columns:
my_item_id int
score float
rank int
Rows: 3
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 1 | 0.5 | 2 |
| 2 | 0.5 | 3 |
+------------+----------------+------+
[3 rows x 3 columns]
>>> m.recommend_from_interactions([0, 1])
Columns:
my_item_id int
score float
rank int
Rows: 2
Data:
+------------+----------------+------+
| my_item_id | score | rank |
+------------+----------------+------+
| 3 | 0.707106769085 | 1 |
| 2 | 0.25 | 2 |
+------------+----------------+------+
[2 rows x 3 columns] | [
"Create",
"a",
"content",
"-",
"based",
"recommender",
"model",
"in",
"which",
"the",
"similarity",
"between",
"the",
"items",
"recommended",
"is",
"determined",
"by",
"the",
"content",
"of",
"those",
"items",
"rather",
"than",
"learned",
"from",
"user",
"interaction",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/item_content_recommender.py#L20-L255 | train |
apple/turicreate | src/unity/python/turicreate/meta/asttools/visitors/cond_symbol_visitor.py | lhs | def lhs(node):
'''
Return a set of symbols in `node` that are assigned.
:param node: ast node
:returns: set of strings.
'''
gen = ConditionalSymbolVisitor()
if isinstance(node, (list, tuple)):
gen.visit_list(node)
else:
gen.visit(node)
return gen.lhs | python | def lhs(node):
'''
Return a set of symbols in `node` that are assigned.
:param node: ast node
:returns: set of strings.
'''
gen = ConditionalSymbolVisitor()
if isinstance(node, (list, tuple)):
gen.visit_list(node)
else:
gen.visit(node)
return gen.lhs | [
"def",
"lhs",
"(",
"node",
")",
":",
"gen",
"=",
"ConditionalSymbolVisitor",
"(",
")",
"if",
"isinstance",
"(",
"node",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"gen",
".",
"visit_list",
"(",
"node",
")",
"else",
":",
"gen",
".",
"visit",
"(",
"node",
")",
"return",
"gen",
".",
"lhs"
] | Return a set of symbols in `node` that are assigned.
:param node: ast node
:returns: set of strings. | [
"Return",
"a",
"set",
"of",
"symbols",
"in",
"node",
"that",
"are",
"assigned",
".",
":",
"param",
"node",
":",
"ast",
"node",
":",
"returns",
":",
"set",
"of",
"strings",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/asttools/visitors/cond_symbol_visitor.py#L363-L377 | train |
apple/turicreate | src/unity/python/turicreate/meta/asttools/visitors/cond_symbol_visitor.py | conditional_lhs | def conditional_lhs(node):
'''
Group outputs into conditional and stable
:param node: ast node
:returns: tuple of (conditional, stable)
'''
gen = ConditionalSymbolVisitor()
gen.visit(node)
return gen.cond_lhs, gen.stable_lhs | python | def conditional_lhs(node):
'''
Group outputs into conditional and stable
:param node: ast node
:returns: tuple of (conditional, stable)
'''
gen = ConditionalSymbolVisitor()
gen.visit(node)
return gen.cond_lhs, gen.stable_lhs | [
"def",
"conditional_lhs",
"(",
"node",
")",
":",
"gen",
"=",
"ConditionalSymbolVisitor",
"(",
")",
"gen",
".",
"visit",
"(",
"node",
")",
"return",
"gen",
".",
"cond_lhs",
",",
"gen",
".",
"stable_lhs"
] | Group outputs into conditional and stable
:param node: ast node
:returns: tuple of (conditional, stable) | [
"Group",
"outputs",
"into",
"conditional",
"and",
"stable",
":",
"param",
"node",
":",
"ast",
"node",
":",
"returns",
":",
"tuple",
"of",
"(",
"conditional",
"stable",
")"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/asttools/visitors/cond_symbol_visitor.py#L395-L406 | train |
apple/turicreate | src/unity/python/turicreate/meta/asttools/visitors/cond_symbol_visitor.py | conditional_symbols | def conditional_symbols(node):
'''
Group lhs and rhs into conditional, stable and undefined
:param node: ast node
:returns: tuple of (conditional_lhs, stable_lhs),(conditional_rhs, stable_rhs), undefined
'''
gen = ConditionalSymbolVisitor()
gen.visit(node)
lhs = gen.cond_lhs, gen.stable_lhs
rhs = gen.cond_rhs, gen.stable_rhs
undefined = gen.undefined
return lhs, rhs, undefined | python | def conditional_symbols(node):
'''
Group lhs and rhs into conditional, stable and undefined
:param node: ast node
:returns: tuple of (conditional_lhs, stable_lhs),(conditional_rhs, stable_rhs), undefined
'''
gen = ConditionalSymbolVisitor()
gen.visit(node)
lhs = gen.cond_lhs, gen.stable_lhs
rhs = gen.cond_rhs, gen.stable_rhs
undefined = gen.undefined
return lhs, rhs, undefined | [
"def",
"conditional_symbols",
"(",
"node",
")",
":",
"gen",
"=",
"ConditionalSymbolVisitor",
"(",
")",
"gen",
".",
"visit",
"(",
"node",
")",
"lhs",
"=",
"gen",
".",
"cond_lhs",
",",
"gen",
".",
"stable_lhs",
"rhs",
"=",
"gen",
".",
"cond_rhs",
",",
"gen",
".",
"stable_rhs",
"undefined",
"=",
"gen",
".",
"undefined",
"return",
"lhs",
",",
"rhs",
",",
"undefined"
] | Group lhs and rhs into conditional, stable and undefined
:param node: ast node
:returns: tuple of (conditional_lhs, stable_lhs),(conditional_rhs, stable_rhs), undefined | [
"Group",
"lhs",
"and",
"rhs",
"into",
"conditional",
"stable",
"and",
"undefined",
":",
"param",
"node",
":",
"ast",
"node",
":",
"returns",
":",
"tuple",
"of",
"(",
"conditional_lhs",
"stable_lhs",
")",
"(",
"conditional_rhs",
"stable_rhs",
")",
"undefined"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/asttools/visitors/cond_symbol_visitor.py#L409-L423 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | _loadlib | def _loadlib(lib='standard'):
"""Load rabit library."""
global _LIB
if _LIB is not None:
warnings.warn('rabit.int call was ignored because it has'\
' already been initialized', level=2)
return
if lib == 'standard':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '')
elif lib == 'mock':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '_mock')
elif lib == 'mpi':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '_mpi')
else:
raise Exception('unknown rabit lib %s, can be standard, mock, mpi' % lib)
_LIB.RabitGetRank.restype = ctypes.c_int
_LIB.RabitGetWorldSize.restype = ctypes.c_int
_LIB.RabitVersionNumber.restype = ctypes.c_int | python | def _loadlib(lib='standard'):
"""Load rabit library."""
global _LIB
if _LIB is not None:
warnings.warn('rabit.int call was ignored because it has'\
' already been initialized', level=2)
return
if lib == 'standard':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '')
elif lib == 'mock':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '_mock')
elif lib == 'mpi':
_LIB = ctypes.cdll.LoadLibrary(WRAPPER_PATH % '_mpi')
else:
raise Exception('unknown rabit lib %s, can be standard, mock, mpi' % lib)
_LIB.RabitGetRank.restype = ctypes.c_int
_LIB.RabitGetWorldSize.restype = ctypes.c_int
_LIB.RabitVersionNumber.restype = ctypes.c_int | [
"def",
"_loadlib",
"(",
"lib",
"=",
"'standard'",
")",
":",
"global",
"_LIB",
"if",
"_LIB",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"'rabit.int call was ignored because it has'",
"' already been initialized'",
",",
"level",
"=",
"2",
")",
"return",
"if",
"lib",
"==",
"'standard'",
":",
"_LIB",
"=",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"WRAPPER_PATH",
"%",
"''",
")",
"elif",
"lib",
"==",
"'mock'",
":",
"_LIB",
"=",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"WRAPPER_PATH",
"%",
"'_mock'",
")",
"elif",
"lib",
"==",
"'mpi'",
":",
"_LIB",
"=",
"ctypes",
".",
"cdll",
".",
"LoadLibrary",
"(",
"WRAPPER_PATH",
"%",
"'_mpi'",
")",
"else",
":",
"raise",
"Exception",
"(",
"'unknown rabit lib %s, can be standard, mock, mpi'",
"%",
"lib",
")",
"_LIB",
".",
"RabitGetRank",
".",
"restype",
"=",
"ctypes",
".",
"c_int",
"_LIB",
".",
"RabitGetWorldSize",
".",
"restype",
"=",
"ctypes",
".",
"c_int",
"_LIB",
".",
"RabitVersionNumber",
".",
"restype",
"=",
"ctypes",
".",
"c_int"
] | Load rabit library. | [
"Load",
"rabit",
"library",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L25-L42 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | init | def init(args=None, lib='standard'):
"""Intialize the rabit module, call this once before using anything.
Parameters
----------
args: list of str, optional
The list of arguments used to initialized the rabit
usually you need to pass in sys.argv.
Defaults to sys.argv when it is None.
lib: {'standard', 'mock', 'mpi'}
Type of library we want to load
"""
if args is None:
args = sys.argv
_loadlib(lib)
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(args), arr) | python | def init(args=None, lib='standard'):
"""Intialize the rabit module, call this once before using anything.
Parameters
----------
args: list of str, optional
The list of arguments used to initialized the rabit
usually you need to pass in sys.argv.
Defaults to sys.argv when it is None.
lib: {'standard', 'mock', 'mpi'}
Type of library we want to load
"""
if args is None:
args = sys.argv
_loadlib(lib)
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(args), arr) | [
"def",
"init",
"(",
"args",
"=",
"None",
",",
"lib",
"=",
"'standard'",
")",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"sys",
".",
"argv",
"_loadlib",
"(",
"lib",
")",
"arr",
"=",
"(",
"ctypes",
".",
"c_char_p",
"*",
"len",
"(",
"args",
")",
")",
"(",
")",
"arr",
"[",
":",
"]",
"=",
"args",
"_LIB",
".",
"RabitInit",
"(",
"len",
"(",
"args",
")",
",",
"arr",
")"
] | Intialize the rabit module, call this once before using anything.
Parameters
----------
args: list of str, optional
The list of arguments used to initialized the rabit
usually you need to pass in sys.argv.
Defaults to sys.argv when it is None.
lib: {'standard', 'mock', 'mpi'}
Type of library we want to load | [
"Intialize",
"the",
"rabit",
"module",
"call",
"this",
"once",
"before",
"using",
"anything",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L56-L73 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | tracker_print | def tracker_print(msg):
"""Print message to the tracker.
This function can be used to communicate the information of
the progress to the tracker
Parameters
----------
msg : str
The message to be printed to tracker.
"""
if not isinstance(msg, str):
msg = str(msg)
_LIB.RabitTrackerPrint(ctypes.c_char_p(msg).encode('utf-8')) | python | def tracker_print(msg):
"""Print message to the tracker.
This function can be used to communicate the information of
the progress to the tracker
Parameters
----------
msg : str
The message to be printed to tracker.
"""
if not isinstance(msg, str):
msg = str(msg)
_LIB.RabitTrackerPrint(ctypes.c_char_p(msg).encode('utf-8')) | [
"def",
"tracker_print",
"(",
"msg",
")",
":",
"if",
"not",
"isinstance",
"(",
"msg",
",",
"str",
")",
":",
"msg",
"=",
"str",
"(",
"msg",
")",
"_LIB",
".",
"RabitTrackerPrint",
"(",
"ctypes",
".",
"c_char_p",
"(",
"msg",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] | Print message to the tracker.
This function can be used to communicate the information of
the progress to the tracker
Parameters
----------
msg : str
The message to be printed to tracker. | [
"Print",
"message",
"to",
"the",
"tracker",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L105-L118 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | allreduce | def allreduce(data, op, prepare_fun=None):
"""Perform allreduce, return the result.
Parameters
----------
data: numpy array
Input data.
op: int
Reduction operators, can be MIN, MAX, SUM, BITOR
prepare_fun: function
Lazy preprocessing function, if it is not None, prepare_fun(data)
will be called by the function before performing allreduce, to intialize the data
If the result of Allreduce can be recovered directly,
then prepare_fun will NOT be called
Returns
-------
result : array_like
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe.
"""
if not isinstance(data, np.ndarray):
raise Exception('allreduce only takes in numpy.ndarray')
buf = data.ravel()
if buf.base is data.base:
buf = buf.copy()
if buf.dtype not in DTYPE_ENUM__:
raise Exception('data type %s not supported' % str(buf.dtype))
if prepare_fun is None:
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, None, None)
else:
func_ptr = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def pfunc(args):
"""prepare function."""
prepare_fun(data)
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, func_ptr(pfunc), None)
return buf | python | def allreduce(data, op, prepare_fun=None):
"""Perform allreduce, return the result.
Parameters
----------
data: numpy array
Input data.
op: int
Reduction operators, can be MIN, MAX, SUM, BITOR
prepare_fun: function
Lazy preprocessing function, if it is not None, prepare_fun(data)
will be called by the function before performing allreduce, to intialize the data
If the result of Allreduce can be recovered directly,
then prepare_fun will NOT be called
Returns
-------
result : array_like
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe.
"""
if not isinstance(data, np.ndarray):
raise Exception('allreduce only takes in numpy.ndarray')
buf = data.ravel()
if buf.base is data.base:
buf = buf.copy()
if buf.dtype not in DTYPE_ENUM__:
raise Exception('data type %s not supported' % str(buf.dtype))
if prepare_fun is None:
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, None, None)
else:
func_ptr = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def pfunc(args):
"""prepare function."""
prepare_fun(data)
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, func_ptr(pfunc), None)
return buf | [
"def",
"allreduce",
"(",
"data",
",",
"op",
",",
"prepare_fun",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"Exception",
"(",
"'allreduce only takes in numpy.ndarray'",
")",
"buf",
"=",
"data",
".",
"ravel",
"(",
")",
"if",
"buf",
".",
"base",
"is",
"data",
".",
"base",
":",
"buf",
"=",
"buf",
".",
"copy",
"(",
")",
"if",
"buf",
".",
"dtype",
"not",
"in",
"DTYPE_ENUM__",
":",
"raise",
"Exception",
"(",
"'data type %s not supported'",
"%",
"str",
"(",
"buf",
".",
"dtype",
")",
")",
"if",
"prepare_fun",
"is",
"None",
":",
"_LIB",
".",
"RabitAllreduce",
"(",
"buf",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"c_void_p",
")",
",",
"buf",
".",
"size",
",",
"DTYPE_ENUM__",
"[",
"buf",
".",
"dtype",
"]",
",",
"op",
",",
"None",
",",
"None",
")",
"else",
":",
"func_ptr",
"=",
"ctypes",
".",
"CFUNCTYPE",
"(",
"None",
",",
"ctypes",
".",
"c_void_p",
")",
"def",
"pfunc",
"(",
"args",
")",
":",
"\"\"\"prepare function.\"\"\"",
"prepare_fun",
"(",
"data",
")",
"_LIB",
".",
"RabitAllreduce",
"(",
"buf",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"c_void_p",
")",
",",
"buf",
".",
"size",
",",
"DTYPE_ENUM__",
"[",
"buf",
".",
"dtype",
"]",
",",
"op",
",",
"func_ptr",
"(",
"pfunc",
")",
",",
"None",
")",
"return",
"buf"
] | Perform allreduce, return the result.
Parameters
----------
data: numpy array
Input data.
op: int
Reduction operators, can be MIN, MAX, SUM, BITOR
prepare_fun: function
Lazy preprocessing function, if it is not None, prepare_fun(data)
will be called by the function before performing allreduce, to intialize the data
If the result of Allreduce can be recovered directly,
then prepare_fun will NOT be called
Returns
-------
result : array_like
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe. | [
"Perform",
"allreduce",
"return",
"the",
"result",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L183-L226 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | _load_model | def _load_model(ptr, length):
"""
Internal function used by the module,
unpickle a model from a buffer specified by ptr, length
Arguments:
ptr: ctypes.POINTER(ctypes._char)
pointer to the memory region of buffer
length: int
the length of buffer
"""
data = (ctypes.c_char * length).from_address(ctypes.addressof(ptr.contents))
return pickle.loads(data.raw) | python | def _load_model(ptr, length):
"""
Internal function used by the module,
unpickle a model from a buffer specified by ptr, length
Arguments:
ptr: ctypes.POINTER(ctypes._char)
pointer to the memory region of buffer
length: int
the length of buffer
"""
data = (ctypes.c_char * length).from_address(ctypes.addressof(ptr.contents))
return pickle.loads(data.raw) | [
"def",
"_load_model",
"(",
"ptr",
",",
"length",
")",
":",
"data",
"=",
"(",
"ctypes",
".",
"c_char",
"*",
"length",
")",
".",
"from_address",
"(",
"ctypes",
".",
"addressof",
"(",
"ptr",
".",
"contents",
")",
")",
"return",
"pickle",
".",
"loads",
"(",
"data",
".",
"raw",
")"
] | Internal function used by the module,
unpickle a model from a buffer specified by ptr, length
Arguments:
ptr: ctypes.POINTER(ctypes._char)
pointer to the memory region of buffer
length: int
the length of buffer | [
"Internal",
"function",
"used",
"by",
"the",
"module",
"unpickle",
"a",
"model",
"from",
"a",
"buffer",
"specified",
"by",
"ptr",
"length",
"Arguments",
":",
"ptr",
":",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"_char",
")",
"pointer",
"to",
"the",
"memory",
"region",
"of",
"buffer",
"length",
":",
"int",
"the",
"length",
"of",
"buffer"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L229-L240 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | load_checkpoint | def load_checkpoint(with_local=False):
"""Load latest check point.
Parameters
----------
with_local: bool, optional
whether the checkpoint contains local model
Returns
-------
tuple : tuple
if with_local: return (version, gobal_model, local_model)
else return (version, gobal_model)
if returned version == 0, this means no model has been CheckPointed
and global_model, local_model returned will be None
"""
gptr = ctypes.POINTER(ctypes.c_char)()
global_len = ctypes.c_ulong()
if with_local:
lptr = ctypes.POINTER(ctypes.c_char)()
local_len = ctypes.c_ulong()
version = _LIB.RabitLoadCheckPoint(
ctypes.byref(gptr),
ctypes.byref(global_len),
ctypes.byref(lptr),
ctypes.byref(local_len))
if version == 0:
return (version, None, None)
return (version,
_load_model(gptr, global_len.value),
_load_model(lptr, local_len.value))
else:
version = _LIB.RabitLoadCheckPoint(
ctypes.byref(gptr),
ctypes.byref(global_len),
None, None)
if version == 0:
return (version, None)
return (version,
_load_model(gptr, global_len.value)) | python | def load_checkpoint(with_local=False):
"""Load latest check point.
Parameters
----------
with_local: bool, optional
whether the checkpoint contains local model
Returns
-------
tuple : tuple
if with_local: return (version, gobal_model, local_model)
else return (version, gobal_model)
if returned version == 0, this means no model has been CheckPointed
and global_model, local_model returned will be None
"""
gptr = ctypes.POINTER(ctypes.c_char)()
global_len = ctypes.c_ulong()
if with_local:
lptr = ctypes.POINTER(ctypes.c_char)()
local_len = ctypes.c_ulong()
version = _LIB.RabitLoadCheckPoint(
ctypes.byref(gptr),
ctypes.byref(global_len),
ctypes.byref(lptr),
ctypes.byref(local_len))
if version == 0:
return (version, None, None)
return (version,
_load_model(gptr, global_len.value),
_load_model(lptr, local_len.value))
else:
version = _LIB.RabitLoadCheckPoint(
ctypes.byref(gptr),
ctypes.byref(global_len),
None, None)
if version == 0:
return (version, None)
return (version,
_load_model(gptr, global_len.value)) | [
"def",
"load_checkpoint",
"(",
"with_local",
"=",
"False",
")",
":",
"gptr",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char",
")",
"(",
")",
"global_len",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"if",
"with_local",
":",
"lptr",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char",
")",
"(",
")",
"local_len",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"version",
"=",
"_LIB",
".",
"RabitLoadCheckPoint",
"(",
"ctypes",
".",
"byref",
"(",
"gptr",
")",
",",
"ctypes",
".",
"byref",
"(",
"global_len",
")",
",",
"ctypes",
".",
"byref",
"(",
"lptr",
")",
",",
"ctypes",
".",
"byref",
"(",
"local_len",
")",
")",
"if",
"version",
"==",
"0",
":",
"return",
"(",
"version",
",",
"None",
",",
"None",
")",
"return",
"(",
"version",
",",
"_load_model",
"(",
"gptr",
",",
"global_len",
".",
"value",
")",
",",
"_load_model",
"(",
"lptr",
",",
"local_len",
".",
"value",
")",
")",
"else",
":",
"version",
"=",
"_LIB",
".",
"RabitLoadCheckPoint",
"(",
"ctypes",
".",
"byref",
"(",
"gptr",
")",
",",
"ctypes",
".",
"byref",
"(",
"global_len",
")",
",",
"None",
",",
"None",
")",
"if",
"version",
"==",
"0",
":",
"return",
"(",
"version",
",",
"None",
")",
"return",
"(",
"version",
",",
"_load_model",
"(",
"gptr",
",",
"global_len",
".",
"value",
")",
")"
] | Load latest check point.
Parameters
----------
with_local: bool, optional
whether the checkpoint contains local model
Returns
-------
tuple : tuple
if with_local: return (version, gobal_model, local_model)
else return (version, gobal_model)
if returned version == 0, this means no model has been CheckPointed
and global_model, local_model returned will be None | [
"Load",
"latest",
"check",
"point",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L242-L281 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/wrapper/rabit.py | checkpoint | def checkpoint(global_model, local_model=None):
"""Checkpoint the model.
This means we finished a stage of execution.
Every time we call check point, there is a version number which will increase by one.
Parameters
----------
global_model: anytype that can be pickled
globally shared model/state when calling this function,
the caller need to gauranttees that global_model is the same in all nodes
local_model: anytype that can be pickled
Local model, that is specific to current node/rank.
This can be None when no local state is needed.
Notes
-----
local_model requires explicit replication of the model for fault-tolerance.
This will bring replication cost in checkpoint function.
while global_model do not need explicit replication.
It is recommended to use global_model if possible.
"""
sglobal = pickle.dumps(global_model)
if local_model is None:
_LIB.RabitCheckPoint(sglobal, len(sglobal), None, 0)
del sglobal
else:
slocal = pickle.dumps(local_model)
_LIB.RabitCheckPoint(sglobal, len(sglobal), slocal, len(slocal))
del slocal
del sglobal | python | def checkpoint(global_model, local_model=None):
"""Checkpoint the model.
This means we finished a stage of execution.
Every time we call check point, there is a version number which will increase by one.
Parameters
----------
global_model: anytype that can be pickled
globally shared model/state when calling this function,
the caller need to gauranttees that global_model is the same in all nodes
local_model: anytype that can be pickled
Local model, that is specific to current node/rank.
This can be None when no local state is needed.
Notes
-----
local_model requires explicit replication of the model for fault-tolerance.
This will bring replication cost in checkpoint function.
while global_model do not need explicit replication.
It is recommended to use global_model if possible.
"""
sglobal = pickle.dumps(global_model)
if local_model is None:
_LIB.RabitCheckPoint(sglobal, len(sglobal), None, 0)
del sglobal
else:
slocal = pickle.dumps(local_model)
_LIB.RabitCheckPoint(sglobal, len(sglobal), slocal, len(slocal))
del slocal
del sglobal | [
"def",
"checkpoint",
"(",
"global_model",
",",
"local_model",
"=",
"None",
")",
":",
"sglobal",
"=",
"pickle",
".",
"dumps",
"(",
"global_model",
")",
"if",
"local_model",
"is",
"None",
":",
"_LIB",
".",
"RabitCheckPoint",
"(",
"sglobal",
",",
"len",
"(",
"sglobal",
")",
",",
"None",
",",
"0",
")",
"del",
"sglobal",
"else",
":",
"slocal",
"=",
"pickle",
".",
"dumps",
"(",
"local_model",
")",
"_LIB",
".",
"RabitCheckPoint",
"(",
"sglobal",
",",
"len",
"(",
"sglobal",
")",
",",
"slocal",
",",
"len",
"(",
"slocal",
")",
")",
"del",
"slocal",
"del",
"sglobal"
] | Checkpoint the model.
This means we finished a stage of execution.
Every time we call check point, there is a version number which will increase by one.
Parameters
----------
global_model: anytype that can be pickled
globally shared model/state when calling this function,
the caller need to gauranttees that global_model is the same in all nodes
local_model: anytype that can be pickled
Local model, that is specific to current node/rank.
This can be None when no local state is needed.
Notes
-----
local_model requires explicit replication of the model for fault-tolerance.
This will bring replication cost in checkpoint function.
while global_model do not need explicit replication.
It is recommended to use global_model if possible. | [
"Checkpoint",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/wrapper/rabit.py#L283-L314 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/util/_output_formats.py | stack_annotations | def stack_annotations(annotations_sarray):
"""
Converts object detection annotations (ground truth or predictions) to
stacked format (an `SFrame` where each row is one object instance).
Parameters
----------
annotations_sarray: SArray
An `SArray` with unstacked predictions, exactly formatted as the
annotations column when training an object detector or when making
predictions.
Returns
-------
annotations_sframe: An `SFrame` with stacked annotations.
See also
--------
unstack_annotations
Examples
--------
Predictions are returned by the object detector in unstacked format:
>>> predictions = detector.predict(images)
By converting it to stacked format, it is easier to get an overview of
object instances:
>>> turicreate.object_detector.util.stack_annotations(predictions)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
"""
_raise_error_if_not_sarray(annotations_sarray, variable_name='annotations_sarray')
sf = _tc.SFrame({'annotations': annotations_sarray}).add_row_number('row_id')
sf = sf.stack('annotations', new_column_name='annotations', drop_na=True)
if len(sf) == 0:
cols = ['row_id', 'confidence', 'label', 'height', 'width', 'x', 'y']
return _tc.SFrame({k: [] for k in cols})
sf = sf.unpack('annotations', column_name_prefix='')
sf = sf.unpack('coordinates', column_name_prefix='')
del sf['type']
return sf | python | def stack_annotations(annotations_sarray):
"""
Converts object detection annotations (ground truth or predictions) to
stacked format (an `SFrame` where each row is one object instance).
Parameters
----------
annotations_sarray: SArray
An `SArray` with unstacked predictions, exactly formatted as the
annotations column when training an object detector or when making
predictions.
Returns
-------
annotations_sframe: An `SFrame` with stacked annotations.
See also
--------
unstack_annotations
Examples
--------
Predictions are returned by the object detector in unstacked format:
>>> predictions = detector.predict(images)
By converting it to stacked format, it is easier to get an overview of
object instances:
>>> turicreate.object_detector.util.stack_annotations(predictions)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
"""
_raise_error_if_not_sarray(annotations_sarray, variable_name='annotations_sarray')
sf = _tc.SFrame({'annotations': annotations_sarray}).add_row_number('row_id')
sf = sf.stack('annotations', new_column_name='annotations', drop_na=True)
if len(sf) == 0:
cols = ['row_id', 'confidence', 'label', 'height', 'width', 'x', 'y']
return _tc.SFrame({k: [] for k in cols})
sf = sf.unpack('annotations', column_name_prefix='')
sf = sf.unpack('coordinates', column_name_prefix='')
del sf['type']
return sf | [
"def",
"stack_annotations",
"(",
"annotations_sarray",
")",
":",
"_raise_error_if_not_sarray",
"(",
"annotations_sarray",
",",
"variable_name",
"=",
"'annotations_sarray'",
")",
"sf",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"'annotations'",
":",
"annotations_sarray",
"}",
")",
".",
"add_row_number",
"(",
"'row_id'",
")",
"sf",
"=",
"sf",
".",
"stack",
"(",
"'annotations'",
",",
"new_column_name",
"=",
"'annotations'",
",",
"drop_na",
"=",
"True",
")",
"if",
"len",
"(",
"sf",
")",
"==",
"0",
":",
"cols",
"=",
"[",
"'row_id'",
",",
"'confidence'",
",",
"'label'",
",",
"'height'",
",",
"'width'",
",",
"'x'",
",",
"'y'",
"]",
"return",
"_tc",
".",
"SFrame",
"(",
"{",
"k",
":",
"[",
"]",
"for",
"k",
"in",
"cols",
"}",
")",
"sf",
"=",
"sf",
".",
"unpack",
"(",
"'annotations'",
",",
"column_name_prefix",
"=",
"''",
")",
"sf",
"=",
"sf",
".",
"unpack",
"(",
"'coordinates'",
",",
"column_name_prefix",
"=",
"''",
")",
"del",
"sf",
"[",
"'type'",
"]",
"return",
"sf"
] | Converts object detection annotations (ground truth or predictions) to
stacked format (an `SFrame` where each row is one object instance).
Parameters
----------
annotations_sarray: SArray
An `SArray` with unstacked predictions, exactly formatted as the
annotations column when training an object detector or when making
predictions.
Returns
-------
annotations_sframe: An `SFrame` with stacked annotations.
See also
--------
unstack_annotations
Examples
--------
Predictions are returned by the object detector in unstacked format:
>>> predictions = detector.predict(images)
By converting it to stacked format, it is easier to get an overview of
object instances:
>>> turicreate.object_detector.util.stack_annotations(predictions)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns] | [
"Converts",
"object",
"detection",
"annotations",
"(",
"ground",
"truth",
"or",
"predictions",
")",
"to",
"stacked",
"format",
"(",
"an",
"SFrame",
"where",
"each",
"row",
"is",
"one",
"object",
"instance",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/util/_output_formats.py#L14-L63 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/util/_output_formats.py | unstack_annotations | def unstack_annotations(annotations_sframe, num_rows=None):
"""
Converts object detection annotations (ground truth or predictions) to
unstacked format (an `SArray` where each element is a list of object
instances).
Parameters
----------
annotations_sframe: SFrame
An `SFrame` with stacked predictions, produced by the
`stack_annotations` function.
num_rows: int
Optionally specify the number of rows in your original dataset, so that
all get represented in the unstacked format, regardless of whether or
not they had instances or not.
Returns
-------
annotations_sarray: An `SArray` with unstacked annotations.
See also
--------
stack_annotations
Examples
--------
If you have annotations in stacked format:
>>> stacked_predictions
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
They can be converted to unstacked format using this function:
>>> turicreate.object_detector.util.unstack_annotations(stacked_predictions)[0]
[{'confidence': 0.98,
'coordinates': {'height': 182.0, 'width': 80.0, 'x': 123.0, 'y': 128.0},
'label': 'dog',
'type': 'rectangle'},
{'confidence': 0.67,
'coordinates': {'height': 101.0, 'width': 129.0, 'x': 150.0, 'y': 183.0},
'label': 'cat',
'type': 'rectangle'}]
"""
_raise_error_if_not_sframe(annotations_sframe, variable_name="annotations_sframe")
cols = ['label', 'type', 'coordinates']
has_confidence = 'confidence' in annotations_sframe.column_names()
if has_confidence:
cols.append('confidence')
if num_rows is None:
if len(annotations_sframe) == 0:
num_rows = 0
else:
num_rows = annotations_sframe['row_id'].max() + 1
sf = annotations_sframe
sf['type'] = 'rectangle'
sf = sf.pack_columns(['x', 'y', 'width', 'height'], dtype=dict,
new_column_name='coordinates')
sf = sf.pack_columns(cols, dtype=dict, new_column_name='ann')
sf = sf.unstack('ann', new_column_name='annotations')
sf_all_ids = _tc.SFrame({'row_id': range(num_rows)})
sf = sf.join(sf_all_ids, on='row_id', how='right')
sf = sf.fillna('annotations', [])
sf = sf.sort('row_id')
annotations_sarray = sf['annotations']
# Sort the confidences again, since the unstack does not preserve the order
if has_confidence:
annotations_sarray = annotations_sarray.apply(
lambda x: sorted(x, key=lambda ann: ann['confidence'], reverse=True),
dtype=list)
return annotations_sarray | python | def unstack_annotations(annotations_sframe, num_rows=None):
"""
Converts object detection annotations (ground truth or predictions) to
unstacked format (an `SArray` where each element is a list of object
instances).
Parameters
----------
annotations_sframe: SFrame
An `SFrame` with stacked predictions, produced by the
`stack_annotations` function.
num_rows: int
Optionally specify the number of rows in your original dataset, so that
all get represented in the unstacked format, regardless of whether or
not they had instances or not.
Returns
-------
annotations_sarray: An `SArray` with unstacked annotations.
See also
--------
stack_annotations
Examples
--------
If you have annotations in stacked format:
>>> stacked_predictions
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
They can be converted to unstacked format using this function:
>>> turicreate.object_detector.util.unstack_annotations(stacked_predictions)[0]
[{'confidence': 0.98,
'coordinates': {'height': 182.0, 'width': 80.0, 'x': 123.0, 'y': 128.0},
'label': 'dog',
'type': 'rectangle'},
{'confidence': 0.67,
'coordinates': {'height': 101.0, 'width': 129.0, 'x': 150.0, 'y': 183.0},
'label': 'cat',
'type': 'rectangle'}]
"""
_raise_error_if_not_sframe(annotations_sframe, variable_name="annotations_sframe")
cols = ['label', 'type', 'coordinates']
has_confidence = 'confidence' in annotations_sframe.column_names()
if has_confidence:
cols.append('confidence')
if num_rows is None:
if len(annotations_sframe) == 0:
num_rows = 0
else:
num_rows = annotations_sframe['row_id'].max() + 1
sf = annotations_sframe
sf['type'] = 'rectangle'
sf = sf.pack_columns(['x', 'y', 'width', 'height'], dtype=dict,
new_column_name='coordinates')
sf = sf.pack_columns(cols, dtype=dict, new_column_name='ann')
sf = sf.unstack('ann', new_column_name='annotations')
sf_all_ids = _tc.SFrame({'row_id': range(num_rows)})
sf = sf.join(sf_all_ids, on='row_id', how='right')
sf = sf.fillna('annotations', [])
sf = sf.sort('row_id')
annotations_sarray = sf['annotations']
# Sort the confidences again, since the unstack does not preserve the order
if has_confidence:
annotations_sarray = annotations_sarray.apply(
lambda x: sorted(x, key=lambda ann: ann['confidence'], reverse=True),
dtype=list)
return annotations_sarray | [
"def",
"unstack_annotations",
"(",
"annotations_sframe",
",",
"num_rows",
"=",
"None",
")",
":",
"_raise_error_if_not_sframe",
"(",
"annotations_sframe",
",",
"variable_name",
"=",
"\"annotations_sframe\"",
")",
"cols",
"=",
"[",
"'label'",
",",
"'type'",
",",
"'coordinates'",
"]",
"has_confidence",
"=",
"'confidence'",
"in",
"annotations_sframe",
".",
"column_names",
"(",
")",
"if",
"has_confidence",
":",
"cols",
".",
"append",
"(",
"'confidence'",
")",
"if",
"num_rows",
"is",
"None",
":",
"if",
"len",
"(",
"annotations_sframe",
")",
"==",
"0",
":",
"num_rows",
"=",
"0",
"else",
":",
"num_rows",
"=",
"annotations_sframe",
"[",
"'row_id'",
"]",
".",
"max",
"(",
")",
"+",
"1",
"sf",
"=",
"annotations_sframe",
"sf",
"[",
"'type'",
"]",
"=",
"'rectangle'",
"sf",
"=",
"sf",
".",
"pack_columns",
"(",
"[",
"'x'",
",",
"'y'",
",",
"'width'",
",",
"'height'",
"]",
",",
"dtype",
"=",
"dict",
",",
"new_column_name",
"=",
"'coordinates'",
")",
"sf",
"=",
"sf",
".",
"pack_columns",
"(",
"cols",
",",
"dtype",
"=",
"dict",
",",
"new_column_name",
"=",
"'ann'",
")",
"sf",
"=",
"sf",
".",
"unstack",
"(",
"'ann'",
",",
"new_column_name",
"=",
"'annotations'",
")",
"sf_all_ids",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"'row_id'",
":",
"range",
"(",
"num_rows",
")",
"}",
")",
"sf",
"=",
"sf",
".",
"join",
"(",
"sf_all_ids",
",",
"on",
"=",
"'row_id'",
",",
"how",
"=",
"'right'",
")",
"sf",
"=",
"sf",
".",
"fillna",
"(",
"'annotations'",
",",
"[",
"]",
")",
"sf",
"=",
"sf",
".",
"sort",
"(",
"'row_id'",
")",
"annotations_sarray",
"=",
"sf",
"[",
"'annotations'",
"]",
"# Sort the confidences again, since the unstack does not preserve the order",
"if",
"has_confidence",
":",
"annotations_sarray",
"=",
"annotations_sarray",
".",
"apply",
"(",
"lambda",
"x",
":",
"sorted",
"(",
"x",
",",
"key",
"=",
"lambda",
"ann",
":",
"ann",
"[",
"'confidence'",
"]",
",",
"reverse",
"=",
"True",
")",
",",
"dtype",
"=",
"list",
")",
"return",
"annotations_sarray"
] | Converts object detection annotations (ground truth or predictions) to
unstacked format (an `SArray` where each element is a list of object
instances).
Parameters
----------
annotations_sframe: SFrame
An `SFrame` with stacked predictions, produced by the
`stack_annotations` function.
num_rows: int
Optionally specify the number of rows in your original dataset, so that
all get represented in the unstacked format, regardless of whether or
not they had instances or not.
Returns
-------
annotations_sarray: An `SArray` with unstacked annotations.
See also
--------
stack_annotations
Examples
--------
If you have annotations in stacked format:
>>> stacked_predictions
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
They can be converted to unstacked format using this function:
>>> turicreate.object_detector.util.unstack_annotations(stacked_predictions)[0]
[{'confidence': 0.98,
'coordinates': {'height': 182.0, 'width': 80.0, 'x': 123.0, 'y': 128.0},
'label': 'dog',
'type': 'rectangle'},
{'confidence': 0.67,
'coordinates': {'height': 101.0, 'width': 129.0, 'x': 150.0, 'y': 183.0},
'label': 'cat',
'type': 'rectangle'}] | [
"Converts",
"object",
"detection",
"annotations",
"(",
"ground",
"truth",
"or",
"predictions",
")",
"to",
"unstacked",
"format",
"(",
"an",
"SArray",
"where",
"each",
"element",
"is",
"a",
"list",
"of",
"object",
"instances",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/util/_output_formats.py#L66-L148 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/ranking_factorization_recommender.py | create | def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
num_factors=32,
regularization=1e-9,
linear_regularization=1e-9,
side_data_factorization=True,
ranking_regularization=0.25,
unobserved_rating_value=None,
num_sampled_negative_examples=4,
max_iterations=25,
sgd_step_size=0,
random_seed=0,
binary_target = False,
solver = 'auto',
verbose=True,
**kwargs):
"""Create a RankingFactorizationRecommender that learns latent factors for each
user and item and uses them to make rating predictions.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
num_factors : int, optional
Number of latent factors.
regularization : float, optional
L2 regularization for interaction terms. Default: 1e-10; a typical range
for this parameter is between 1e-12 and 1. Setting this to 0 may cause
numerical issues.
linear_regularization : float, optional
L2 regularization for linear term. Default: 1e-10; a typical range for this
parameter is between 1e-12 and 1. Setting this to 0 may cause numerical issues.
side_data_factorization : boolean, optional
Use factorization for modeling any additional features beyond the user
and item columns. If True, and side features or any additional columns are
present, then a Factorization Machine model is trained. Otherwise, only
the linear terms are fit to these features. See
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
for more information. Default: True.
ranking_regularization : float, optional
Penalize the predicted value of user-item pairs not in the
training set. Larger values increase this penalization.
Suggested values: 0, 0.1, 0.5, 1. NOTE: if no target column
is present, this parameter is ignored.
unobserved_rating_value : float, optional
Penalize unobserved items with a larger predicted score than this value.
By default, the estimated 5% quantile is used (mean - 1.96*std_dev).
num_sampled_negative_examples : integer, optional
For each (user, item) pair in the data, the ranking sgd solver evaluates
this many randomly chosen unseen items for the negative example step.
Increasing this can give better performance at the expense of speed,
particularly when the number of items is large. Default is 4.
binary_target : boolean, optional
Assume the target column is composed of 0's and 1's. If True, use
logistic loss to fit the model.
max_iterations : int, optional
The training algorithm will make at most this many iterations through
the observed data. Default: 50.
sgd_step_size : float, optional
Step size for stochastic gradient descent. Smaller values generally
lead to more accurate models that take more time to train. The
default setting of 0 means that the step size is chosen by trying
several options on a small subset of the data.
random_seed : int, optional
The random seed used to choose the initial starting point for
model training. Note that some randomness in the training is
unavoidable, so models trained with the same random seed may still
differ. Default: 0.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. The available solvers for
this model are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *ials*: Implicit Alternating Least Squares [1].
- *adagrad*: Adaptive Gradient Stochastic Gradient Descent.
- *sgd*: Stochastic Gradient Descent
verbose : bool, optional
Enables verbose output.
kwargs : optional
Optional advanced keyword arguments passed in to the model
optimization procedure. These parameters do not typically
need to be changed.
Examples
--------
**Basic usage**
When given just user and item pairs, one can create a RankingFactorizationRecommender
as follows.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"])
>>> from turicreate.recommender import ranking_factorization_recommender
>>> m1 = ranking_factorization_recommender.create(sf)
When a target column is present, one can include this to try and recommend
items that are rated highly.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m1 = ranking_factorization_recommender.create(sf, target='rating')
**Including side features**
>>> user_info = turicreate.SFrame({'user_id': ["0", "1", "2"],
... 'name': ["Alice", "Bob", "Charlie"],
... 'numeric_feature': [0.1, 12, 22]})
>>> item_info = turicreate.SFrame({'item_id': ["a", "b", "c", "d"],
... 'name': ["item1", "item2", "item3", "item4"],
... 'dict_feature': [{'a' : 23}, {'a' : 13},
... {'b' : 1},
... {'a' : 23, 'b' : 32}]})
>>> m2 = ranking_factorization_recommender.create(sf, target='rating',
... user_data=user_info,
... item_data=item_info)
**Customizing ranking regularization**
Create a model that pushes predicted ratings of unobserved user-item
pairs toward 1 or below.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
... ranking_regularization = 0.1,
... unobserved_rating_value = 1)
**Using the implicit alternating least squares model**
Ranking factorization also implements implicit alternating least squares [1] as
an alternative solver. This is enable using ``solver = 'ials'``.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
solver = 'ials')
See Also
--------
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`,
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
References
-----------
[1] Collaborative Filtering for Implicit Feedback Datasets Hu, Y.; Koren,
Y.; Volinsky, C. IEEE International Conference on Data Mining
(ICDM 2008), IEEE (2008).
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.ranking_factorization_recommender()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
nearest_items = _turicreate.SFrame()
if target is None:
binary_target = True
opts = {'user_id' : user_id,
'item_id' : item_id,
'target' : target,
'random_seed' : random_seed,
'num_factors' : num_factors,
'regularization' : regularization,
'linear_regularization' : linear_regularization,
'ranking_regularization' : ranking_regularization,
'binary_target' : binary_target,
'max_iterations' : max_iterations,
'side_data_factorization' : side_data_factorization,
'num_sampled_negative_examples' : num_sampled_negative_examples,
'solver' : solver,
# Has no effect here.
'sgd_step_size' : sgd_step_size}
if unobserved_rating_value is not None:
opts["unobserved_rating_value"] = unobserved_rating_value
if kwargs:
try:
possible_args = set(_get_default_options()["name"])
except (RuntimeError, KeyError):
possible_args = set()
bad_arguments = set(kwargs.keys()).difference(possible_args)
if bad_arguments:
raise TypeError("Bad Keyword Arguments: " + ', '.join(bad_arguments))
opts.update(kwargs)
extra_data = {"nearest_items" : _turicreate.SFrame()}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return RankingFactorizationRecommender(model_proxy) | python | def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
num_factors=32,
regularization=1e-9,
linear_regularization=1e-9,
side_data_factorization=True,
ranking_regularization=0.25,
unobserved_rating_value=None,
num_sampled_negative_examples=4,
max_iterations=25,
sgd_step_size=0,
random_seed=0,
binary_target = False,
solver = 'auto',
verbose=True,
**kwargs):
"""Create a RankingFactorizationRecommender that learns latent factors for each
user and item and uses them to make rating predictions.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
num_factors : int, optional
Number of latent factors.
regularization : float, optional
L2 regularization for interaction terms. Default: 1e-10; a typical range
for this parameter is between 1e-12 and 1. Setting this to 0 may cause
numerical issues.
linear_regularization : float, optional
L2 regularization for linear term. Default: 1e-10; a typical range for this
parameter is between 1e-12 and 1. Setting this to 0 may cause numerical issues.
side_data_factorization : boolean, optional
Use factorization for modeling any additional features beyond the user
and item columns. If True, and side features or any additional columns are
present, then a Factorization Machine model is trained. Otherwise, only
the linear terms are fit to these features. See
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
for more information. Default: True.
ranking_regularization : float, optional
Penalize the predicted value of user-item pairs not in the
training set. Larger values increase this penalization.
Suggested values: 0, 0.1, 0.5, 1. NOTE: if no target column
is present, this parameter is ignored.
unobserved_rating_value : float, optional
Penalize unobserved items with a larger predicted score than this value.
By default, the estimated 5% quantile is used (mean - 1.96*std_dev).
num_sampled_negative_examples : integer, optional
For each (user, item) pair in the data, the ranking sgd solver evaluates
this many randomly chosen unseen items for the negative example step.
Increasing this can give better performance at the expense of speed,
particularly when the number of items is large. Default is 4.
binary_target : boolean, optional
Assume the target column is composed of 0's and 1's. If True, use
logistic loss to fit the model.
max_iterations : int, optional
The training algorithm will make at most this many iterations through
the observed data. Default: 50.
sgd_step_size : float, optional
Step size for stochastic gradient descent. Smaller values generally
lead to more accurate models that take more time to train. The
default setting of 0 means that the step size is chosen by trying
several options on a small subset of the data.
random_seed : int, optional
The random seed used to choose the initial starting point for
model training. Note that some randomness in the training is
unavoidable, so models trained with the same random seed may still
differ. Default: 0.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. The available solvers for
this model are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *ials*: Implicit Alternating Least Squares [1].
- *adagrad*: Adaptive Gradient Stochastic Gradient Descent.
- *sgd*: Stochastic Gradient Descent
verbose : bool, optional
Enables verbose output.
kwargs : optional
Optional advanced keyword arguments passed in to the model
optimization procedure. These parameters do not typically
need to be changed.
Examples
--------
**Basic usage**
When given just user and item pairs, one can create a RankingFactorizationRecommender
as follows.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"])
>>> from turicreate.recommender import ranking_factorization_recommender
>>> m1 = ranking_factorization_recommender.create(sf)
When a target column is present, one can include this to try and recommend
items that are rated highly.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m1 = ranking_factorization_recommender.create(sf, target='rating')
**Including side features**
>>> user_info = turicreate.SFrame({'user_id': ["0", "1", "2"],
... 'name': ["Alice", "Bob", "Charlie"],
... 'numeric_feature': [0.1, 12, 22]})
>>> item_info = turicreate.SFrame({'item_id': ["a", "b", "c", "d"],
... 'name': ["item1", "item2", "item3", "item4"],
... 'dict_feature': [{'a' : 23}, {'a' : 13},
... {'b' : 1},
... {'a' : 23, 'b' : 32}]})
>>> m2 = ranking_factorization_recommender.create(sf, target='rating',
... user_data=user_info,
... item_data=item_info)
**Customizing ranking regularization**
Create a model that pushes predicted ratings of unobserved user-item
pairs toward 1 or below.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
... ranking_regularization = 0.1,
... unobserved_rating_value = 1)
**Using the implicit alternating least squares model**
Ranking factorization also implements implicit alternating least squares [1] as
an alternative solver. This is enable using ``solver = 'ials'``.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
solver = 'ials')
See Also
--------
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`,
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
References
-----------
[1] Collaborative Filtering for Implicit Feedback Datasets Hu, Y.; Koren,
Y.; Volinsky, C. IEEE International Conference on Data Mining
(ICDM 2008), IEEE (2008).
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.ranking_factorization_recommender()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
nearest_items = _turicreate.SFrame()
if target is None:
binary_target = True
opts = {'user_id' : user_id,
'item_id' : item_id,
'target' : target,
'random_seed' : random_seed,
'num_factors' : num_factors,
'regularization' : regularization,
'linear_regularization' : linear_regularization,
'ranking_regularization' : ranking_regularization,
'binary_target' : binary_target,
'max_iterations' : max_iterations,
'side_data_factorization' : side_data_factorization,
'num_sampled_negative_examples' : num_sampled_negative_examples,
'solver' : solver,
# Has no effect here.
'sgd_step_size' : sgd_step_size}
if unobserved_rating_value is not None:
opts["unobserved_rating_value"] = unobserved_rating_value
if kwargs:
try:
possible_args = set(_get_default_options()["name"])
except (RuntimeError, KeyError):
possible_args = set()
bad_arguments = set(kwargs.keys()).difference(possible_args)
if bad_arguments:
raise TypeError("Bad Keyword Arguments: " + ', '.join(bad_arguments))
opts.update(kwargs)
extra_data = {"nearest_items" : _turicreate.SFrame()}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return RankingFactorizationRecommender(model_proxy) | [
"def",
"create",
"(",
"observation_data",
",",
"user_id",
"=",
"'user_id'",
",",
"item_id",
"=",
"'item_id'",
",",
"target",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"item_data",
"=",
"None",
",",
"num_factors",
"=",
"32",
",",
"regularization",
"=",
"1e-9",
",",
"linear_regularization",
"=",
"1e-9",
",",
"side_data_factorization",
"=",
"True",
",",
"ranking_regularization",
"=",
"0.25",
",",
"unobserved_rating_value",
"=",
"None",
",",
"num_sampled_negative_examples",
"=",
"4",
",",
"max_iterations",
"=",
"25",
",",
"sgd_step_size",
"=",
"0",
",",
"random_seed",
"=",
"0",
",",
"binary_target",
"=",
"False",
",",
"solver",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"turicreate",
".",
"_cython",
".",
"cy_server",
"import",
"QuietProgress",
"opts",
"=",
"{",
"}",
"model_proxy",
"=",
"_turicreate",
".",
"extensions",
".",
"ranking_factorization_recommender",
"(",
")",
"model_proxy",
".",
"init_options",
"(",
"opts",
")",
"if",
"user_data",
"is",
"None",
":",
"user_data",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"if",
"item_data",
"is",
"None",
":",
"item_data",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"nearest_items",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"if",
"target",
"is",
"None",
":",
"binary_target",
"=",
"True",
"opts",
"=",
"{",
"'user_id'",
":",
"user_id",
",",
"'item_id'",
":",
"item_id",
",",
"'target'",
":",
"target",
",",
"'random_seed'",
":",
"random_seed",
",",
"'num_factors'",
":",
"num_factors",
",",
"'regularization'",
":",
"regularization",
",",
"'linear_regularization'",
":",
"linear_regularization",
",",
"'ranking_regularization'",
":",
"ranking_regularization",
",",
"'binary_target'",
":",
"binary_target",
",",
"'max_iterations'",
":",
"max_iterations",
",",
"'side_data_factorization'",
":",
"side_data_factorization",
",",
"'num_sampled_negative_examples'",
":",
"num_sampled_negative_examples",
",",
"'solver'",
":",
"solver",
",",
"# Has no effect here.",
"'sgd_step_size'",
":",
"sgd_step_size",
"}",
"if",
"unobserved_rating_value",
"is",
"not",
"None",
":",
"opts",
"[",
"\"unobserved_rating_value\"",
"]",
"=",
"unobserved_rating_value",
"if",
"kwargs",
":",
"try",
":",
"possible_args",
"=",
"set",
"(",
"_get_default_options",
"(",
")",
"[",
"\"name\"",
"]",
")",
"except",
"(",
"RuntimeError",
",",
"KeyError",
")",
":",
"possible_args",
"=",
"set",
"(",
")",
"bad_arguments",
"=",
"set",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"possible_args",
")",
"if",
"bad_arguments",
":",
"raise",
"TypeError",
"(",
"\"Bad Keyword Arguments: \"",
"+",
"', '",
".",
"join",
"(",
"bad_arguments",
")",
")",
"opts",
".",
"update",
"(",
"kwargs",
")",
"extra_data",
"=",
"{",
"\"nearest_items\"",
":",
"_turicreate",
".",
"SFrame",
"(",
")",
"}",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"model_proxy",
".",
"train",
"(",
"observation_data",
",",
"user_data",
",",
"item_data",
",",
"opts",
",",
"extra_data",
")",
"return",
"RankingFactorizationRecommender",
"(",
"model_proxy",
")"
] | Create a RankingFactorizationRecommender that learns latent factors for each
user and item and uses them to make rating predictions.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
num_factors : int, optional
Number of latent factors.
regularization : float, optional
L2 regularization for interaction terms. Default: 1e-10; a typical range
for this parameter is between 1e-12 and 1. Setting this to 0 may cause
numerical issues.
linear_regularization : float, optional
L2 regularization for linear term. Default: 1e-10; a typical range for this
parameter is between 1e-12 and 1. Setting this to 0 may cause numerical issues.
side_data_factorization : boolean, optional
Use factorization for modeling any additional features beyond the user
and item columns. If True, and side features or any additional columns are
present, then a Factorization Machine model is trained. Otherwise, only
the linear terms are fit to these features. See
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
for more information. Default: True.
ranking_regularization : float, optional
Penalize the predicted value of user-item pairs not in the
training set. Larger values increase this penalization.
Suggested values: 0, 0.1, 0.5, 1. NOTE: if no target column
is present, this parameter is ignored.
unobserved_rating_value : float, optional
Penalize unobserved items with a larger predicted score than this value.
By default, the estimated 5% quantile is used (mean - 1.96*std_dev).
num_sampled_negative_examples : integer, optional
For each (user, item) pair in the data, the ranking sgd solver evaluates
this many randomly chosen unseen items for the negative example step.
Increasing this can give better performance at the expense of speed,
particularly when the number of items is large. Default is 4.
binary_target : boolean, optional
Assume the target column is composed of 0's and 1's. If True, use
logistic loss to fit the model.
max_iterations : int, optional
The training algorithm will make at most this many iterations through
the observed data. Default: 50.
sgd_step_size : float, optional
Step size for stochastic gradient descent. Smaller values generally
lead to more accurate models that take more time to train. The
default setting of 0 means that the step size is chosen by trying
several options on a small subset of the data.
random_seed : int, optional
The random seed used to choose the initial starting point for
model training. Note that some randomness in the training is
unavoidable, so models trained with the same random seed may still
differ. Default: 0.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. The available solvers for
this model are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *ials*: Implicit Alternating Least Squares [1].
- *adagrad*: Adaptive Gradient Stochastic Gradient Descent.
- *sgd*: Stochastic Gradient Descent
verbose : bool, optional
Enables verbose output.
kwargs : optional
Optional advanced keyword arguments passed in to the model
optimization procedure. These parameters do not typically
need to be changed.
Examples
--------
**Basic usage**
When given just user and item pairs, one can create a RankingFactorizationRecommender
as follows.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"])
>>> from turicreate.recommender import ranking_factorization_recommender
>>> m1 = ranking_factorization_recommender.create(sf)
When a target column is present, one can include this to try and recommend
items that are rated highly.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m1 = ranking_factorization_recommender.create(sf, target='rating')
**Including side features**
>>> user_info = turicreate.SFrame({'user_id': ["0", "1", "2"],
... 'name': ["Alice", "Bob", "Charlie"],
... 'numeric_feature': [0.1, 12, 22]})
>>> item_info = turicreate.SFrame({'item_id': ["a", "b", "c", "d"],
... 'name': ["item1", "item2", "item3", "item4"],
... 'dict_feature': [{'a' : 23}, {'a' : 13},
... {'b' : 1},
... {'a' : 23, 'b' : 32}]})
>>> m2 = ranking_factorization_recommender.create(sf, target='rating',
... user_data=user_info,
... item_data=item_info)
**Customizing ranking regularization**
Create a model that pushes predicted ratings of unobserved user-item
pairs toward 1 or below.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
... ranking_regularization = 0.1,
... unobserved_rating_value = 1)
**Using the implicit alternating least squares model**
Ranking factorization also implements implicit alternating least squares [1] as
an alternative solver. This is enable using ``solver = 'ials'``.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
solver = 'ials')
See Also
--------
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`,
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
References
-----------
[1] Collaborative Filtering for Implicit Feedback Datasets Hu, Y.; Koren,
Y.; Volinsky, C. IEEE International Conference on Data Mining
(ICDM 2008), IEEE (2008). | [
"Create",
"a",
"RankingFactorizationRecommender",
"that",
"learns",
"latent",
"factors",
"for",
"each",
"user",
"and",
"item",
"and",
"uses",
"them",
"to",
"make",
"rating",
"predictions",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/ranking_factorization_recommender.py#L19-L270 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/mlmodel/docs/preprocess.py | preprocess | def preprocess():
"splits _sources/reference.rst into separate files"
text = open("./_sources/reference.rst", "r").read()
os.remove("./_sources/reference.rst")
if not os.path.exists("./_sources/reference"):
os.makedirs("./_sources/reference")
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
iteration = iter(iterable)
return izip(iteration, iteration)
sections = map(str.strip, re.split(r"<!--\s*(.+)\s*-->", text))
for section, content in pairwise(sections[1:]):
if section.endswith(".proto"):
section_name = section[:-len(".proto")]
file_name = "./_sources/reference/{0}.rst".format(section_name)
with open(file_name, "w") as f:
f.truncate()
f.write(content)
f.close() | python | def preprocess():
"splits _sources/reference.rst into separate files"
text = open("./_sources/reference.rst", "r").read()
os.remove("./_sources/reference.rst")
if not os.path.exists("./_sources/reference"):
os.makedirs("./_sources/reference")
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
iteration = iter(iterable)
return izip(iteration, iteration)
sections = map(str.strip, re.split(r"<!--\s*(.+)\s*-->", text))
for section, content in pairwise(sections[1:]):
if section.endswith(".proto"):
section_name = section[:-len(".proto")]
file_name = "./_sources/reference/{0}.rst".format(section_name)
with open(file_name, "w") as f:
f.truncate()
f.write(content)
f.close() | [
"def",
"preprocess",
"(",
")",
":",
"text",
"=",
"open",
"(",
"\"./_sources/reference.rst\"",
",",
"\"r\"",
")",
".",
"read",
"(",
")",
"os",
".",
"remove",
"(",
"\"./_sources/reference.rst\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"\"./_sources/reference\"",
")",
":",
"os",
".",
"makedirs",
"(",
"\"./_sources/reference\"",
")",
"def",
"pairwise",
"(",
"iterable",
")",
":",
"\"s -> (s0, s1), (s2, s3), (s4, s5), ...\"",
"iteration",
"=",
"iter",
"(",
"iterable",
")",
"return",
"izip",
"(",
"iteration",
",",
"iteration",
")",
"sections",
"=",
"map",
"(",
"str",
".",
"strip",
",",
"re",
".",
"split",
"(",
"r\"<!--\\s*(.+)\\s*-->\"",
",",
"text",
")",
")",
"for",
"section",
",",
"content",
"in",
"pairwise",
"(",
"sections",
"[",
"1",
":",
"]",
")",
":",
"if",
"section",
".",
"endswith",
"(",
"\".proto\"",
")",
":",
"section_name",
"=",
"section",
"[",
":",
"-",
"len",
"(",
"\".proto\"",
")",
"]",
"file_name",
"=",
"\"./_sources/reference/{0}.rst\"",
".",
"format",
"(",
"section_name",
")",
"with",
"open",
"(",
"file_name",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"truncate",
"(",
")",
"f",
".",
"write",
"(",
"content",
")",
"f",
".",
"close",
"(",
")"
] | splits _sources/reference.rst into separate files | [
"splits",
"_sources",
"/",
"reference",
".",
"rst",
"into",
"separate",
"files"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/mlmodel/docs/preprocess.py#L6-L29 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/wire_format.py | PackTag | def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type | python | def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type | [
"def",
"PackTag",
"(",
"field_number",
",",
"wire_type",
")",
":",
"if",
"not",
"0",
"<=",
"wire_type",
"<=",
"_WIRETYPE_MAX",
":",
"raise",
"message",
".",
"EncodeError",
"(",
"'Unknown wire type: %d'",
"%",
"wire_type",
")",
"return",
"(",
"field_number",
"<<",
"TAG_TYPE_BITS",
")",
"|",
"wire_type"
] | Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants. | [
"Returns",
"an",
"unsigned",
"32",
"-",
"bit",
"integer",
"that",
"encodes",
"the",
"field",
"number",
"and",
"wire",
"type",
"information",
"in",
"standard",
"protocol",
"message",
"wire",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/wire_format.py#L80-L90 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/wire_format.py | _VarUInt64ByteSizeNoTag | def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10 | python | def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10 | [
"def",
"_VarUInt64ByteSizeNoTag",
"(",
"uint64",
")",
":",
"if",
"uint64",
"<=",
"0x7f",
":",
"return",
"1",
"if",
"uint64",
"<=",
"0x3fff",
":",
"return",
"2",
"if",
"uint64",
"<=",
"0x1fffff",
":",
"return",
"3",
"if",
"uint64",
"<=",
"0xfffffff",
":",
"return",
"4",
"if",
"uint64",
"<=",
"0x7ffffffff",
":",
"return",
"5",
"if",
"uint64",
"<=",
"0x3ffffffffff",
":",
"return",
"6",
"if",
"uint64",
"<=",
"0x1ffffffffffff",
":",
"return",
"7",
"if",
"uint64",
"<=",
"0xffffffffffffff",
":",
"return",
"8",
"if",
"uint64",
"<=",
"0x7fffffffffffffff",
":",
"return",
"9",
"if",
"uint64",
">",
"UINT64_MAX",
":",
"raise",
"message",
".",
"EncodeError",
"(",
"'Value out of range: %d'",
"%",
"uint64",
")",
"return",
"10"
] | Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned. | [
"Returns",
"the",
"number",
"of",
"bytes",
"required",
"to",
"serialize",
"a",
"single",
"varint",
"using",
"boundary",
"value",
"comparisons",
".",
"(",
"unrolled",
"loop",
"optimization",
"-",
"WPierce",
")",
"uint64",
"must",
"be",
"unsigned",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/wire_format.py#L232-L248 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/style_transfer/_utils.py | _seconds_as_string | def _seconds_as_string(seconds):
"""
Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s'
"""
TIME_UNITS = [('s', 60), ('m', 60), ('h', 24), ('d', None)]
unit_strings = []
cur = max(int(seconds), 1)
for suffix, size in TIME_UNITS:
if size is not None:
cur, rest = divmod(cur, size)
else:
rest = cur
if rest > 0:
unit_strings.insert(0, '%d%s' % (rest, suffix))
return ' '.join(unit_strings) | python | def _seconds_as_string(seconds):
"""
Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s'
"""
TIME_UNITS = [('s', 60), ('m', 60), ('h', 24), ('d', None)]
unit_strings = []
cur = max(int(seconds), 1)
for suffix, size in TIME_UNITS:
if size is not None:
cur, rest = divmod(cur, size)
else:
rest = cur
if rest > 0:
unit_strings.insert(0, '%d%s' % (rest, suffix))
return ' '.join(unit_strings) | [
"def",
"_seconds_as_string",
"(",
"seconds",
")",
":",
"TIME_UNITS",
"=",
"[",
"(",
"'s'",
",",
"60",
")",
",",
"(",
"'m'",
",",
"60",
")",
",",
"(",
"'h'",
",",
"24",
")",
",",
"(",
"'d'",
",",
"None",
")",
"]",
"unit_strings",
"=",
"[",
"]",
"cur",
"=",
"max",
"(",
"int",
"(",
"seconds",
")",
",",
"1",
")",
"for",
"suffix",
",",
"size",
"in",
"TIME_UNITS",
":",
"if",
"size",
"is",
"not",
"None",
":",
"cur",
",",
"rest",
"=",
"divmod",
"(",
"cur",
",",
"size",
")",
"else",
":",
"rest",
"=",
"cur",
"if",
"rest",
">",
"0",
":",
"unit_strings",
".",
"insert",
"(",
"0",
",",
"'%d%s'",
"%",
"(",
"rest",
",",
"suffix",
")",
")",
"return",
"' '",
".",
"join",
"(",
"unit_strings",
")"
] | Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s' | [
"Returns",
"seconds",
"as",
"a",
"human",
"-",
"friendly",
"string",
"e",
".",
"g",
".",
"1d",
"4h",
"47m",
"41s"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/style_transfer/_utils.py#L10-L24 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_converter_internal.py | _get_converter_module | def _get_converter_module(sk_obj):
"""
Returns the module holding the conversion functions for a
particular model).
"""
try:
cv_idx = _converter_lookup[sk_obj.__class__]
except KeyError:
raise ValueError(
"Transformer '%s' not supported; supported transformers are %s."
% (repr(sk_obj),
",".join(k.__name__ for k in _converter_module_list)))
return _converter_module_list[cv_idx] | python | def _get_converter_module(sk_obj):
"""
Returns the module holding the conversion functions for a
particular model).
"""
try:
cv_idx = _converter_lookup[sk_obj.__class__]
except KeyError:
raise ValueError(
"Transformer '%s' not supported; supported transformers are %s."
% (repr(sk_obj),
",".join(k.__name__ for k in _converter_module_list)))
return _converter_module_list[cv_idx] | [
"def",
"_get_converter_module",
"(",
"sk_obj",
")",
":",
"try",
":",
"cv_idx",
"=",
"_converter_lookup",
"[",
"sk_obj",
".",
"__class__",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Transformer '%s' not supported; supported transformers are %s.\"",
"%",
"(",
"repr",
"(",
"sk_obj",
")",
",",
"\",\"",
".",
"join",
"(",
"k",
".",
"__name__",
"for",
"k",
"in",
"_converter_module_list",
")",
")",
")",
"return",
"_converter_module_list",
"[",
"cv_idx",
"]"
] | Returns the module holding the conversion functions for a
particular model). | [
"Returns",
"the",
"module",
"holding",
"the",
"conversion",
"functions",
"for",
"a",
"particular",
"model",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_converter_internal.py#L87-L100 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_converter_internal.py | _convert_sklearn_model | def _convert_sklearn_model(input_sk_obj, input_features = None,
output_feature_names = None, class_labels = None):
"""
Converts a generic sklearn pipeline, transformer, classifier, or regressor
into an coreML specification.
"""
if not(HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
from sklearn.pipeline import Pipeline as sk_Pipeline
if input_features is None:
input_features = "input"
if isinstance(input_sk_obj, sk_Pipeline):
sk_obj_list = input_sk_obj.steps
else:
sk_obj_list = [("SKObj", input_sk_obj)]
if len(sk_obj_list) == 0:
raise ValueError("No SKLearn transformers supplied.")
# Put the transformers into a pipeline list to hold them so that they can
# later be added to a pipeline object. (Hold off adding them to the
# pipeline now in case it's a single model at the end, in which case it
# gets returned as is.)
#
# Each member of the pipeline list is a tuple of the proto spec for that
# model, the input features, and the output features.
pipeline_list = []
# These help us keep track of what's going on a bit easier.
Input = _namedtuple('InputTransformer', ['name', 'sk_obj', 'module'])
Output = _namedtuple('CoreMLTransformer', ['spec', 'input_features', 'output_features'])
# Get a more information rich representation of the list for convenience.
# obj_list is a list of tuples of (name, sk_obj, and the converter module for
# that step in the list.
obj_list = [ Input(sk_obj_name, sk_obj, _get_converter_module(sk_obj))
for sk_obj_name, sk_obj in sk_obj_list]
# Various preprocessing steps.
# If the first component of the object list is the sklearn dict vectorizer,
# which is unique in that it accepts a list of dictionaries, then we can
# get the feature type mapping from that. This then may require the addition
# of several OHE steps, so those need to be processed in the first stage.
if isinstance(obj_list[0].sk_obj, _dict_vectorizer.sklearn_class):
dv_obj = obj_list[0].sk_obj
output_dim = len(_dict_vectorizer.get_input_feature_names(dv_obj))
if not isinstance(input_features, _string_types):
raise TypeError("If the first transformer in a pipeline is a "
"DictVectorizer, then the input feature must be the name "
"of the input dictionary.")
input_features = [(input_features, datatypes.Dictionary(str))]
if len(obj_list) > 1:
output_feature_name = _PIPELINE_INTERNAL_FEATURE_NAME
else:
if output_feature_names is None:
output_feature_name = "transformed_features"
elif isinstance(output_feature_names, _string_types):
output_feature_name = output_feature_names
else:
raise TypeError(
"For a transformer pipeline, the "
"output_features needs to be None or a string "
"for the predicted value.")
output_features = [(output_feature_name, datatypes.Array(output_dim))]
spec = _dict_vectorizer.convert(dv_obj, input_features, output_features)._spec
pipeline_list.append(Output(spec, input_features, output_features) )
# Set up the environment for the rest of the pipeline
current_input_features = output_features
current_num_dimensions = output_dim
# In the corner case that it's only the dict vectorizer here, just return
# and exit with that at this point.
if len(obj_list) == 1:
return spec
else:
del obj_list[0]
else:
# First, we need to resolve the input feature types as the sklearn pipeline
# expects just an array as input, but what we want to expose to the coreML
# user is an interface with named variables. This resolution has to handle
# a number of cases.
# Can we get the number of features from the model? If so, pass that
# information into the feature resolution function. If we can't, then this
# function should return None.
first_sk_obj = obj_list[0].sk_obj
num_dimensions = _get_converter_module(first_sk_obj).get_input_dimension(first_sk_obj)
# Resolve the input features.
features = _fm.process_or_validate_features(input_features, num_dimensions)
current_num_dimensions = _fm.dimension_of_array_features(features)
# Add in a feature vectorizer that consolodates all of the feature inputs
# into the form expected by scipy's pipelines. Essentially this is a
# translation layer between the coreML form with named arguments and the
# scikit learn variable form.
if len(features) == 1 and isinstance(features[0][1], datatypes.Array):
current_input_features = features
else:
spec, _output_dimension = create_feature_vectorizer(
features, _PIPELINE_INTERNAL_FEATURE_NAME)
assert _output_dimension == current_num_dimensions
ft_out_features = [(_PIPELINE_INTERNAL_FEATURE_NAME,
datatypes.Array(current_num_dimensions))]
pipeline_list.append( Output(spec, features, ft_out_features) )
current_input_features = ft_out_features
# Now, validate the sequence of transformers to make sure we have something
# that can work with all of this.
for i, (_, _, m) in enumerate(obj_list[:-1]):
if m.model_type != "transformer":
raise ValueError("Only a sequence of transformer classes followed by a "
"single transformer, regressor, or classifier is currently supported. "
"(object in position %d interpreted as %s)" % (i, m.model_type))
overall_mode = obj_list[-1].module.model_type
assert overall_mode in ('transformer', 'regressor', 'classifier')
# Now, go through each transformer in the sequence of transformers and add
# it to the pipeline.
for _, sk_obj, sk_m in obj_list[: -1]:
next_dimension = sk_m.update_dimension(sk_obj, current_num_dimensions)
output_features = [(_PIPELINE_INTERNAL_FEATURE_NAME,
datatypes.Array(next_dimension))]
spec = sk_m.convert(sk_obj, current_input_features, output_features)._spec
pipeline_list.append( Output(spec, current_input_features, output_features))
current_input_features = output_features
current_num_dimensions = next_dimension
# Now, handle the final transformer. This is where we need to have different
# behavior depending on whether it's a classifier, transformer, or regressor.
_, last_sk_obj, last_sk_m = obj_list[-1]
if overall_mode == "classifier":
supports_output_scores = last_sk_m.supports_output_scores(last_sk_obj)
_internal_output_classes = list(last_sk_m.get_output_classes(last_sk_obj))
if class_labels is None:
class_labels = _internal_output_classes
output_features = _fm.process_or_validate_classifier_output_features(
output_feature_names, class_labels, supports_output_scores)
elif overall_mode == "regressor":
if output_feature_names is None:
output_features = [("prediction", datatypes.Double())]
elif isinstance(output_feature_names, _string_types):
output_features = [(output_feature_names, datatypes.Double())]
else:
raise TypeError("For a regressor object or regressor pipeline, the "
"output_features needs to be None or a string for the predicted value.")
else: # transformer
final_output_dimension = last_sk_m.update_dimension(last_sk_obj, current_num_dimensions)
if output_feature_names is None:
output_features = [("transformed_features",
datatypes.Array(final_output_dimension))]
elif isinstance(output_feature_names, _string_types):
output_features = [(output_feature_names, datatypes.Array(final_output_dimension))]
else:
raise TypeError("For a transformer object or transformer pipeline, the "
"output_features needs to be None or a string for the "
"name of the transformed value.")
last_spec = last_sk_m.convert(last_sk_obj, current_input_features, output_features)._spec
pipeline_list.append( Output(last_spec, current_input_features, output_features) )
# Now, create the pipeline and return the spec for it.
# If it's just one element, we can return it.
if len(pipeline_list) == 1:
return pipeline_list[0].spec
original_input_features = pipeline_list[0].input_features
if overall_mode == 'regressor':
pipeline = PipelineRegressor(original_input_features, output_features)
elif overall_mode == 'classifier':
pipeline = PipelineClassifier(original_input_features,
class_labels, output_features)
else:
pipeline = Pipeline(original_input_features, output_features)
# Okay, now we can build the pipeline spec.
for spec, input_features, output_features in pipeline_list:
pipeline.add_model(spec)
return pipeline.spec | python | def _convert_sklearn_model(input_sk_obj, input_features = None,
output_feature_names = None, class_labels = None):
"""
Converts a generic sklearn pipeline, transformer, classifier, or regressor
into an coreML specification.
"""
if not(HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
from sklearn.pipeline import Pipeline as sk_Pipeline
if input_features is None:
input_features = "input"
if isinstance(input_sk_obj, sk_Pipeline):
sk_obj_list = input_sk_obj.steps
else:
sk_obj_list = [("SKObj", input_sk_obj)]
if len(sk_obj_list) == 0:
raise ValueError("No SKLearn transformers supplied.")
# Put the transformers into a pipeline list to hold them so that they can
# later be added to a pipeline object. (Hold off adding them to the
# pipeline now in case it's a single model at the end, in which case it
# gets returned as is.)
#
# Each member of the pipeline list is a tuple of the proto spec for that
# model, the input features, and the output features.
pipeline_list = []
# These help us keep track of what's going on a bit easier.
Input = _namedtuple('InputTransformer', ['name', 'sk_obj', 'module'])
Output = _namedtuple('CoreMLTransformer', ['spec', 'input_features', 'output_features'])
# Get a more information rich representation of the list for convenience.
# obj_list is a list of tuples of (name, sk_obj, and the converter module for
# that step in the list.
obj_list = [ Input(sk_obj_name, sk_obj, _get_converter_module(sk_obj))
for sk_obj_name, sk_obj in sk_obj_list]
# Various preprocessing steps.
# If the first component of the object list is the sklearn dict vectorizer,
# which is unique in that it accepts a list of dictionaries, then we can
# get the feature type mapping from that. This then may require the addition
# of several OHE steps, so those need to be processed in the first stage.
if isinstance(obj_list[0].sk_obj, _dict_vectorizer.sklearn_class):
dv_obj = obj_list[0].sk_obj
output_dim = len(_dict_vectorizer.get_input_feature_names(dv_obj))
if not isinstance(input_features, _string_types):
raise TypeError("If the first transformer in a pipeline is a "
"DictVectorizer, then the input feature must be the name "
"of the input dictionary.")
input_features = [(input_features, datatypes.Dictionary(str))]
if len(obj_list) > 1:
output_feature_name = _PIPELINE_INTERNAL_FEATURE_NAME
else:
if output_feature_names is None:
output_feature_name = "transformed_features"
elif isinstance(output_feature_names, _string_types):
output_feature_name = output_feature_names
else:
raise TypeError(
"For a transformer pipeline, the "
"output_features needs to be None or a string "
"for the predicted value.")
output_features = [(output_feature_name, datatypes.Array(output_dim))]
spec = _dict_vectorizer.convert(dv_obj, input_features, output_features)._spec
pipeline_list.append(Output(spec, input_features, output_features) )
# Set up the environment for the rest of the pipeline
current_input_features = output_features
current_num_dimensions = output_dim
# In the corner case that it's only the dict vectorizer here, just return
# and exit with that at this point.
if len(obj_list) == 1:
return spec
else:
del obj_list[0]
else:
# First, we need to resolve the input feature types as the sklearn pipeline
# expects just an array as input, but what we want to expose to the coreML
# user is an interface with named variables. This resolution has to handle
# a number of cases.
# Can we get the number of features from the model? If so, pass that
# information into the feature resolution function. If we can't, then this
# function should return None.
first_sk_obj = obj_list[0].sk_obj
num_dimensions = _get_converter_module(first_sk_obj).get_input_dimension(first_sk_obj)
# Resolve the input features.
features = _fm.process_or_validate_features(input_features, num_dimensions)
current_num_dimensions = _fm.dimension_of_array_features(features)
# Add in a feature vectorizer that consolodates all of the feature inputs
# into the form expected by scipy's pipelines. Essentially this is a
# translation layer between the coreML form with named arguments and the
# scikit learn variable form.
if len(features) == 1 and isinstance(features[0][1], datatypes.Array):
current_input_features = features
else:
spec, _output_dimension = create_feature_vectorizer(
features, _PIPELINE_INTERNAL_FEATURE_NAME)
assert _output_dimension == current_num_dimensions
ft_out_features = [(_PIPELINE_INTERNAL_FEATURE_NAME,
datatypes.Array(current_num_dimensions))]
pipeline_list.append( Output(spec, features, ft_out_features) )
current_input_features = ft_out_features
# Now, validate the sequence of transformers to make sure we have something
# that can work with all of this.
for i, (_, _, m) in enumerate(obj_list[:-1]):
if m.model_type != "transformer":
raise ValueError("Only a sequence of transformer classes followed by a "
"single transformer, regressor, or classifier is currently supported. "
"(object in position %d interpreted as %s)" % (i, m.model_type))
overall_mode = obj_list[-1].module.model_type
assert overall_mode in ('transformer', 'regressor', 'classifier')
# Now, go through each transformer in the sequence of transformers and add
# it to the pipeline.
for _, sk_obj, sk_m in obj_list[: -1]:
next_dimension = sk_m.update_dimension(sk_obj, current_num_dimensions)
output_features = [(_PIPELINE_INTERNAL_FEATURE_NAME,
datatypes.Array(next_dimension))]
spec = sk_m.convert(sk_obj, current_input_features, output_features)._spec
pipeline_list.append( Output(spec, current_input_features, output_features))
current_input_features = output_features
current_num_dimensions = next_dimension
# Now, handle the final transformer. This is where we need to have different
# behavior depending on whether it's a classifier, transformer, or regressor.
_, last_sk_obj, last_sk_m = obj_list[-1]
if overall_mode == "classifier":
supports_output_scores = last_sk_m.supports_output_scores(last_sk_obj)
_internal_output_classes = list(last_sk_m.get_output_classes(last_sk_obj))
if class_labels is None:
class_labels = _internal_output_classes
output_features = _fm.process_or_validate_classifier_output_features(
output_feature_names, class_labels, supports_output_scores)
elif overall_mode == "regressor":
if output_feature_names is None:
output_features = [("prediction", datatypes.Double())]
elif isinstance(output_feature_names, _string_types):
output_features = [(output_feature_names, datatypes.Double())]
else:
raise TypeError("For a regressor object or regressor pipeline, the "
"output_features needs to be None or a string for the predicted value.")
else: # transformer
final_output_dimension = last_sk_m.update_dimension(last_sk_obj, current_num_dimensions)
if output_feature_names is None:
output_features = [("transformed_features",
datatypes.Array(final_output_dimension))]
elif isinstance(output_feature_names, _string_types):
output_features = [(output_feature_names, datatypes.Array(final_output_dimension))]
else:
raise TypeError("For a transformer object or transformer pipeline, the "
"output_features needs to be None or a string for the "
"name of the transformed value.")
last_spec = last_sk_m.convert(last_sk_obj, current_input_features, output_features)._spec
pipeline_list.append( Output(last_spec, current_input_features, output_features) )
# Now, create the pipeline and return the spec for it.
# If it's just one element, we can return it.
if len(pipeline_list) == 1:
return pipeline_list[0].spec
original_input_features = pipeline_list[0].input_features
if overall_mode == 'regressor':
pipeline = PipelineRegressor(original_input_features, output_features)
elif overall_mode == 'classifier':
pipeline = PipelineClassifier(original_input_features,
class_labels, output_features)
else:
pipeline = Pipeline(original_input_features, output_features)
# Okay, now we can build the pipeline spec.
for spec, input_features, output_features in pipeline_list:
pipeline.add_model(spec)
return pipeline.spec | [
"def",
"_convert_sklearn_model",
"(",
"input_sk_obj",
",",
"input_features",
"=",
"None",
",",
"output_feature_names",
"=",
"None",
",",
"class_labels",
"=",
"None",
")",
":",
"if",
"not",
"(",
"HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"from",
"sklearn",
".",
"pipeline",
"import",
"Pipeline",
"as",
"sk_Pipeline",
"if",
"input_features",
"is",
"None",
":",
"input_features",
"=",
"\"input\"",
"if",
"isinstance",
"(",
"input_sk_obj",
",",
"sk_Pipeline",
")",
":",
"sk_obj_list",
"=",
"input_sk_obj",
".",
"steps",
"else",
":",
"sk_obj_list",
"=",
"[",
"(",
"\"SKObj\"",
",",
"input_sk_obj",
")",
"]",
"if",
"len",
"(",
"sk_obj_list",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"No SKLearn transformers supplied.\"",
")",
"# Put the transformers into a pipeline list to hold them so that they can",
"# later be added to a pipeline object. (Hold off adding them to the",
"# pipeline now in case it's a single model at the end, in which case it",
"# gets returned as is.)",
"#",
"# Each member of the pipeline list is a tuple of the proto spec for that",
"# model, the input features, and the output features.",
"pipeline_list",
"=",
"[",
"]",
"# These help us keep track of what's going on a bit easier.",
"Input",
"=",
"_namedtuple",
"(",
"'InputTransformer'",
",",
"[",
"'name'",
",",
"'sk_obj'",
",",
"'module'",
"]",
")",
"Output",
"=",
"_namedtuple",
"(",
"'CoreMLTransformer'",
",",
"[",
"'spec'",
",",
"'input_features'",
",",
"'output_features'",
"]",
")",
"# Get a more information rich representation of the list for convenience.",
"# obj_list is a list of tuples of (name, sk_obj, and the converter module for",
"# that step in the list.",
"obj_list",
"=",
"[",
"Input",
"(",
"sk_obj_name",
",",
"sk_obj",
",",
"_get_converter_module",
"(",
"sk_obj",
")",
")",
"for",
"sk_obj_name",
",",
"sk_obj",
"in",
"sk_obj_list",
"]",
"# Various preprocessing steps.",
"# If the first component of the object list is the sklearn dict vectorizer,",
"# which is unique in that it accepts a list of dictionaries, then we can",
"# get the feature type mapping from that. This then may require the addition",
"# of several OHE steps, so those need to be processed in the first stage.",
"if",
"isinstance",
"(",
"obj_list",
"[",
"0",
"]",
".",
"sk_obj",
",",
"_dict_vectorizer",
".",
"sklearn_class",
")",
":",
"dv_obj",
"=",
"obj_list",
"[",
"0",
"]",
".",
"sk_obj",
"output_dim",
"=",
"len",
"(",
"_dict_vectorizer",
".",
"get_input_feature_names",
"(",
"dv_obj",
")",
")",
"if",
"not",
"isinstance",
"(",
"input_features",
",",
"_string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"If the first transformer in a pipeline is a \"",
"\"DictVectorizer, then the input feature must be the name \"",
"\"of the input dictionary.\"",
")",
"input_features",
"=",
"[",
"(",
"input_features",
",",
"datatypes",
".",
"Dictionary",
"(",
"str",
")",
")",
"]",
"if",
"len",
"(",
"obj_list",
")",
">",
"1",
":",
"output_feature_name",
"=",
"_PIPELINE_INTERNAL_FEATURE_NAME",
"else",
":",
"if",
"output_feature_names",
"is",
"None",
":",
"output_feature_name",
"=",
"\"transformed_features\"",
"elif",
"isinstance",
"(",
"output_feature_names",
",",
"_string_types",
")",
":",
"output_feature_name",
"=",
"output_feature_names",
"else",
":",
"raise",
"TypeError",
"(",
"\"For a transformer pipeline, the \"",
"\"output_features needs to be None or a string \"",
"\"for the predicted value.\"",
")",
"output_features",
"=",
"[",
"(",
"output_feature_name",
",",
"datatypes",
".",
"Array",
"(",
"output_dim",
")",
")",
"]",
"spec",
"=",
"_dict_vectorizer",
".",
"convert",
"(",
"dv_obj",
",",
"input_features",
",",
"output_features",
")",
".",
"_spec",
"pipeline_list",
".",
"append",
"(",
"Output",
"(",
"spec",
",",
"input_features",
",",
"output_features",
")",
")",
"# Set up the environment for the rest of the pipeline",
"current_input_features",
"=",
"output_features",
"current_num_dimensions",
"=",
"output_dim",
"# In the corner case that it's only the dict vectorizer here, just return",
"# and exit with that at this point.",
"if",
"len",
"(",
"obj_list",
")",
"==",
"1",
":",
"return",
"spec",
"else",
":",
"del",
"obj_list",
"[",
"0",
"]",
"else",
":",
"# First, we need to resolve the input feature types as the sklearn pipeline",
"# expects just an array as input, but what we want to expose to the coreML",
"# user is an interface with named variables. This resolution has to handle",
"# a number of cases.",
"# Can we get the number of features from the model? If so, pass that",
"# information into the feature resolution function. If we can't, then this",
"# function should return None.",
"first_sk_obj",
"=",
"obj_list",
"[",
"0",
"]",
".",
"sk_obj",
"num_dimensions",
"=",
"_get_converter_module",
"(",
"first_sk_obj",
")",
".",
"get_input_dimension",
"(",
"first_sk_obj",
")",
"# Resolve the input features.",
"features",
"=",
"_fm",
".",
"process_or_validate_features",
"(",
"input_features",
",",
"num_dimensions",
")",
"current_num_dimensions",
"=",
"_fm",
".",
"dimension_of_array_features",
"(",
"features",
")",
"# Add in a feature vectorizer that consolodates all of the feature inputs",
"# into the form expected by scipy's pipelines. Essentially this is a",
"# translation layer between the coreML form with named arguments and the",
"# scikit learn variable form.",
"if",
"len",
"(",
"features",
")",
"==",
"1",
"and",
"isinstance",
"(",
"features",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"datatypes",
".",
"Array",
")",
":",
"current_input_features",
"=",
"features",
"else",
":",
"spec",
",",
"_output_dimension",
"=",
"create_feature_vectorizer",
"(",
"features",
",",
"_PIPELINE_INTERNAL_FEATURE_NAME",
")",
"assert",
"_output_dimension",
"==",
"current_num_dimensions",
"ft_out_features",
"=",
"[",
"(",
"_PIPELINE_INTERNAL_FEATURE_NAME",
",",
"datatypes",
".",
"Array",
"(",
"current_num_dimensions",
")",
")",
"]",
"pipeline_list",
".",
"append",
"(",
"Output",
"(",
"spec",
",",
"features",
",",
"ft_out_features",
")",
")",
"current_input_features",
"=",
"ft_out_features",
"# Now, validate the sequence of transformers to make sure we have something",
"# that can work with all of this.",
"for",
"i",
",",
"(",
"_",
",",
"_",
",",
"m",
")",
"in",
"enumerate",
"(",
"obj_list",
"[",
":",
"-",
"1",
"]",
")",
":",
"if",
"m",
".",
"model_type",
"!=",
"\"transformer\"",
":",
"raise",
"ValueError",
"(",
"\"Only a sequence of transformer classes followed by a \"",
"\"single transformer, regressor, or classifier is currently supported. \"",
"\"(object in position %d interpreted as %s)\"",
"%",
"(",
"i",
",",
"m",
".",
"model_type",
")",
")",
"overall_mode",
"=",
"obj_list",
"[",
"-",
"1",
"]",
".",
"module",
".",
"model_type",
"assert",
"overall_mode",
"in",
"(",
"'transformer'",
",",
"'regressor'",
",",
"'classifier'",
")",
"# Now, go through each transformer in the sequence of transformers and add",
"# it to the pipeline.",
"for",
"_",
",",
"sk_obj",
",",
"sk_m",
"in",
"obj_list",
"[",
":",
"-",
"1",
"]",
":",
"next_dimension",
"=",
"sk_m",
".",
"update_dimension",
"(",
"sk_obj",
",",
"current_num_dimensions",
")",
"output_features",
"=",
"[",
"(",
"_PIPELINE_INTERNAL_FEATURE_NAME",
",",
"datatypes",
".",
"Array",
"(",
"next_dimension",
")",
")",
"]",
"spec",
"=",
"sk_m",
".",
"convert",
"(",
"sk_obj",
",",
"current_input_features",
",",
"output_features",
")",
".",
"_spec",
"pipeline_list",
".",
"append",
"(",
"Output",
"(",
"spec",
",",
"current_input_features",
",",
"output_features",
")",
")",
"current_input_features",
"=",
"output_features",
"current_num_dimensions",
"=",
"next_dimension",
"# Now, handle the final transformer. This is where we need to have different",
"# behavior depending on whether it's a classifier, transformer, or regressor.",
"_",
",",
"last_sk_obj",
",",
"last_sk_m",
"=",
"obj_list",
"[",
"-",
"1",
"]",
"if",
"overall_mode",
"==",
"\"classifier\"",
":",
"supports_output_scores",
"=",
"last_sk_m",
".",
"supports_output_scores",
"(",
"last_sk_obj",
")",
"_internal_output_classes",
"=",
"list",
"(",
"last_sk_m",
".",
"get_output_classes",
"(",
"last_sk_obj",
")",
")",
"if",
"class_labels",
"is",
"None",
":",
"class_labels",
"=",
"_internal_output_classes",
"output_features",
"=",
"_fm",
".",
"process_or_validate_classifier_output_features",
"(",
"output_feature_names",
",",
"class_labels",
",",
"supports_output_scores",
")",
"elif",
"overall_mode",
"==",
"\"regressor\"",
":",
"if",
"output_feature_names",
"is",
"None",
":",
"output_features",
"=",
"[",
"(",
"\"prediction\"",
",",
"datatypes",
".",
"Double",
"(",
")",
")",
"]",
"elif",
"isinstance",
"(",
"output_feature_names",
",",
"_string_types",
")",
":",
"output_features",
"=",
"[",
"(",
"output_feature_names",
",",
"datatypes",
".",
"Double",
"(",
")",
")",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"For a regressor object or regressor pipeline, the \"",
"\"output_features needs to be None or a string for the predicted value.\"",
")",
"else",
":",
"# transformer",
"final_output_dimension",
"=",
"last_sk_m",
".",
"update_dimension",
"(",
"last_sk_obj",
",",
"current_num_dimensions",
")",
"if",
"output_feature_names",
"is",
"None",
":",
"output_features",
"=",
"[",
"(",
"\"transformed_features\"",
",",
"datatypes",
".",
"Array",
"(",
"final_output_dimension",
")",
")",
"]",
"elif",
"isinstance",
"(",
"output_feature_names",
",",
"_string_types",
")",
":",
"output_features",
"=",
"[",
"(",
"output_feature_names",
",",
"datatypes",
".",
"Array",
"(",
"final_output_dimension",
")",
")",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"For a transformer object or transformer pipeline, the \"",
"\"output_features needs to be None or a string for the \"",
"\"name of the transformed value.\"",
")",
"last_spec",
"=",
"last_sk_m",
".",
"convert",
"(",
"last_sk_obj",
",",
"current_input_features",
",",
"output_features",
")",
".",
"_spec",
"pipeline_list",
".",
"append",
"(",
"Output",
"(",
"last_spec",
",",
"current_input_features",
",",
"output_features",
")",
")",
"# Now, create the pipeline and return the spec for it.",
"# If it's just one element, we can return it.",
"if",
"len",
"(",
"pipeline_list",
")",
"==",
"1",
":",
"return",
"pipeline_list",
"[",
"0",
"]",
".",
"spec",
"original_input_features",
"=",
"pipeline_list",
"[",
"0",
"]",
".",
"input_features",
"if",
"overall_mode",
"==",
"'regressor'",
":",
"pipeline",
"=",
"PipelineRegressor",
"(",
"original_input_features",
",",
"output_features",
")",
"elif",
"overall_mode",
"==",
"'classifier'",
":",
"pipeline",
"=",
"PipelineClassifier",
"(",
"original_input_features",
",",
"class_labels",
",",
"output_features",
")",
"else",
":",
"pipeline",
"=",
"Pipeline",
"(",
"original_input_features",
",",
"output_features",
")",
"# Okay, now we can build the pipeline spec.",
"for",
"spec",
",",
"input_features",
",",
"output_features",
"in",
"pipeline_list",
":",
"pipeline",
".",
"add_model",
"(",
"spec",
")",
"return",
"pipeline",
".",
"spec"
] | Converts a generic sklearn pipeline, transformer, classifier, or regressor
into an coreML specification. | [
"Converts",
"a",
"generic",
"sklearn",
"pipeline",
"transformer",
"classifier",
"or",
"regressor",
"into",
"an",
"coreML",
"specification",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_converter_internal.py#L109-L324 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py | TreeEnsembleBase.set_default_prediction_value | def set_default_prediction_value(self, values):
"""
Set the default prediction value(s).
The values given here form the base prediction value that the values
at activated leaves are added to. If values is a scalar, then
the output of the tree must also be 1 dimensional; otherwise, values
must be a list with length matching the dimension of values in the tree.
Parameters
----------
values: [int | double | list[double]]
Default values for predictions.
"""
if type(values) is not list:
values = [float(values)]
self.tree_parameters.numPredictionDimensions = len(values)
for value in values:
self.tree_parameters.basePredictionValue.append(value) | python | def set_default_prediction_value(self, values):
"""
Set the default prediction value(s).
The values given here form the base prediction value that the values
at activated leaves are added to. If values is a scalar, then
the output of the tree must also be 1 dimensional; otherwise, values
must be a list with length matching the dimension of values in the tree.
Parameters
----------
values: [int | double | list[double]]
Default values for predictions.
"""
if type(values) is not list:
values = [float(values)]
self.tree_parameters.numPredictionDimensions = len(values)
for value in values:
self.tree_parameters.basePredictionValue.append(value) | [
"def",
"set_default_prediction_value",
"(",
"self",
",",
"values",
")",
":",
"if",
"type",
"(",
"values",
")",
"is",
"not",
"list",
":",
"values",
"=",
"[",
"float",
"(",
"values",
")",
"]",
"self",
".",
"tree_parameters",
".",
"numPredictionDimensions",
"=",
"len",
"(",
"values",
")",
"for",
"value",
"in",
"values",
":",
"self",
".",
"tree_parameters",
".",
"basePredictionValue",
".",
"append",
"(",
"value",
")"
] | Set the default prediction value(s).
The values given here form the base prediction value that the values
at activated leaves are added to. If values is a scalar, then
the output of the tree must also be 1 dimensional; otherwise, values
must be a list with length matching the dimension of values in the tree.
Parameters
----------
values: [int | double | list[double]]
Default values for predictions. | [
"Set",
"the",
"default",
"prediction",
"value",
"(",
"s",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py#L36-L55 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py | TreeEnsembleBase.set_post_evaluation_transform | def set_post_evaluation_transform(self, value):
r"""
Set the post processing transform applied after the prediction value
from the tree ensemble.
Parameters
----------
value: str
A value denoting the transform applied. Possible values are:
- "NoTransform" (default). Do not apply a transform.
- "Classification_SoftMax".
Apply a softmax function to the outcome to produce normalized,
non-negative scores that sum to 1. The transformation applied to
dimension `i` is equivalent to:
.. math::
\frac{e^{x_i}}{\sum_j e^{x_j}}
Note: This is the output transformation applied by the XGBoost package
with multiclass classification.
- "Regression_Logistic".
Applies a logistic transform the predicted value, specifically:
.. math::
(1 + e^{-v})^{-1}
This is the transformation used in binary classification.
"""
self.tree_spec.postEvaluationTransform = \
_TreeEnsemble_pb2.TreeEnsemblePostEvaluationTransform.Value(value) | python | def set_post_evaluation_transform(self, value):
r"""
Set the post processing transform applied after the prediction value
from the tree ensemble.
Parameters
----------
value: str
A value denoting the transform applied. Possible values are:
- "NoTransform" (default). Do not apply a transform.
- "Classification_SoftMax".
Apply a softmax function to the outcome to produce normalized,
non-negative scores that sum to 1. The transformation applied to
dimension `i` is equivalent to:
.. math::
\frac{e^{x_i}}{\sum_j e^{x_j}}
Note: This is the output transformation applied by the XGBoost package
with multiclass classification.
- "Regression_Logistic".
Applies a logistic transform the predicted value, specifically:
.. math::
(1 + e^{-v})^{-1}
This is the transformation used in binary classification.
"""
self.tree_spec.postEvaluationTransform = \
_TreeEnsemble_pb2.TreeEnsemblePostEvaluationTransform.Value(value) | [
"def",
"set_post_evaluation_transform",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"tree_spec",
".",
"postEvaluationTransform",
"=",
"_TreeEnsemble_pb2",
".",
"TreeEnsemblePostEvaluationTransform",
".",
"Value",
"(",
"value",
")"
] | r"""
Set the post processing transform applied after the prediction value
from the tree ensemble.
Parameters
----------
value: str
A value denoting the transform applied. Possible values are:
- "NoTransform" (default). Do not apply a transform.
- "Classification_SoftMax".
Apply a softmax function to the outcome to produce normalized,
non-negative scores that sum to 1. The transformation applied to
dimension `i` is equivalent to:
.. math::
\frac{e^{x_i}}{\sum_j e^{x_j}}
Note: This is the output transformation applied by the XGBoost package
with multiclass classification.
- "Regression_Logistic".
Applies a logistic transform the predicted value, specifically:
.. math::
(1 + e^{-v})^{-1}
This is the transformation used in binary classification. | [
"r",
"Set",
"the",
"post",
"processing",
"transform",
"applied",
"after",
"the",
"prediction",
"value",
"from",
"the",
"tree",
"ensemble",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py#L57-L97 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py | TreeEnsembleBase.add_branch_node | def add_branch_node(self, tree_id, node_id, feature_index, feature_value,
branch_mode, true_child_id, false_child_id, relative_hit_rate = None,
missing_value_tracks_true_child = False):
"""
Add a branch node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
feature_index: int
Index of the feature in the input being split on.
feature_value: double or int
The value used in the feature comparison determining the traversal
direction from this node.
branch_mode: str
Branch mode of the node, specifying the condition under which the node
referenced by `true_child_id` is called next.
Must be one of the following:
- `"BranchOnValueLessThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] <= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueLessThan"`. Traverse to node `true_child_id`
if `input[feature_index] < feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] >= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThan"`. Traverse to node `true_child_id`
if `input[feature_index] > feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueEqual"`. Traverse to node `true_child_id`
if `input[feature_index] == feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueNotEqual"`. Traverse to node `true_child_id`
if `input[feature_index] != feature_value`, and `false_child_id`
otherwise.
true_child_id: int
ID of the child under the true condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
false_child_id: int
ID of the child under the false condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
relative_hit_rate: float [optional]
When the model is converted compiled by CoreML, this gives hints to
Core ML about which node is more likely to be hit on evaluation,
allowing for additional optimizations. The values can be on any scale,
with the values between child nodes being compared relative to each
other.
missing_value_tracks_true_child: bool [optional]
If the training data contains NaN values or missing values, then this
flag determines which direction a NaN value traverses.
"""
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.branchFeatureIndex = feature_index
spec_node.branchFeatureValue = feature_value
spec_node.trueChildNodeId = true_child_id
spec_node.falseChildNodeId = false_child_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value(branch_mode)
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
spec_node.missingValueTracksTrueChild = missing_value_tracks_true_child | python | def add_branch_node(self, tree_id, node_id, feature_index, feature_value,
branch_mode, true_child_id, false_child_id, relative_hit_rate = None,
missing_value_tracks_true_child = False):
"""
Add a branch node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
feature_index: int
Index of the feature in the input being split on.
feature_value: double or int
The value used in the feature comparison determining the traversal
direction from this node.
branch_mode: str
Branch mode of the node, specifying the condition under which the node
referenced by `true_child_id` is called next.
Must be one of the following:
- `"BranchOnValueLessThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] <= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueLessThan"`. Traverse to node `true_child_id`
if `input[feature_index] < feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] >= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThan"`. Traverse to node `true_child_id`
if `input[feature_index] > feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueEqual"`. Traverse to node `true_child_id`
if `input[feature_index] == feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueNotEqual"`. Traverse to node `true_child_id`
if `input[feature_index] != feature_value`, and `false_child_id`
otherwise.
true_child_id: int
ID of the child under the true condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
false_child_id: int
ID of the child under the false condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
relative_hit_rate: float [optional]
When the model is converted compiled by CoreML, this gives hints to
Core ML about which node is more likely to be hit on evaluation,
allowing for additional optimizations. The values can be on any scale,
with the values between child nodes being compared relative to each
other.
missing_value_tracks_true_child: bool [optional]
If the training data contains NaN values or missing values, then this
flag determines which direction a NaN value traverses.
"""
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.branchFeatureIndex = feature_index
spec_node.branchFeatureValue = feature_value
spec_node.trueChildNodeId = true_child_id
spec_node.falseChildNodeId = false_child_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value(branch_mode)
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
spec_node.missingValueTracksTrueChild = missing_value_tracks_true_child | [
"def",
"add_branch_node",
"(",
"self",
",",
"tree_id",
",",
"node_id",
",",
"feature_index",
",",
"feature_value",
",",
"branch_mode",
",",
"true_child_id",
",",
"false_child_id",
",",
"relative_hit_rate",
"=",
"None",
",",
"missing_value_tracks_true_child",
"=",
"False",
")",
":",
"spec_node",
"=",
"self",
".",
"tree_parameters",
".",
"nodes",
".",
"add",
"(",
")",
"spec_node",
".",
"treeId",
"=",
"tree_id",
"spec_node",
".",
"nodeId",
"=",
"node_id",
"spec_node",
".",
"branchFeatureIndex",
"=",
"feature_index",
"spec_node",
".",
"branchFeatureValue",
"=",
"feature_value",
"spec_node",
".",
"trueChildNodeId",
"=",
"true_child_id",
"spec_node",
".",
"falseChildNodeId",
"=",
"false_child_id",
"spec_node",
".",
"nodeBehavior",
"=",
"_TreeEnsemble_pb2",
".",
"TreeEnsembleParameters",
".",
"TreeNode",
".",
"TreeNodeBehavior",
".",
"Value",
"(",
"branch_mode",
")",
"if",
"relative_hit_rate",
"is",
"not",
"None",
":",
"spec_node",
".",
"relativeHitRate",
"=",
"relative_hit_rate",
"spec_node",
".",
"missingValueTracksTrueChild",
"=",
"missing_value_tracks_true_child"
] | Add a branch node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
feature_index: int
Index of the feature in the input being split on.
feature_value: double or int
The value used in the feature comparison determining the traversal
direction from this node.
branch_mode: str
Branch mode of the node, specifying the condition under which the node
referenced by `true_child_id` is called next.
Must be one of the following:
- `"BranchOnValueLessThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] <= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueLessThan"`. Traverse to node `true_child_id`
if `input[feature_index] < feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] >= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThan"`. Traverse to node `true_child_id`
if `input[feature_index] > feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueEqual"`. Traverse to node `true_child_id`
if `input[feature_index] == feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueNotEqual"`. Traverse to node `true_child_id`
if `input[feature_index] != feature_value`, and `false_child_id`
otherwise.
true_child_id: int
ID of the child under the true condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
false_child_id: int
ID of the child under the false condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
relative_hit_rate: float [optional]
When the model is converted compiled by CoreML, this gives hints to
Core ML about which node is more likely to be hit on evaluation,
allowing for additional optimizations. The values can be on any scale,
with the values between child nodes being compared relative to each
other.
missing_value_tracks_true_child: bool [optional]
If the training data contains NaN values or missing values, then this
flag determines which direction a NaN value traverses. | [
"Add",
"a",
"branch",
"node",
"to",
"the",
"tree",
"ensemble",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py#L99-L186 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py | TreeEnsembleBase.add_leaf_node | def add_leaf_node(self, tree_id, node_id, values, relative_hit_rate = None):
"""
Add a leaf node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
values: [float | int | list | dict]
Value(s) at the leaf node to add to the prediction when this node is
activated. If the prediction dimension of the tree is 1, then the
value is specified as a float or integer value.
For multidimensional predictions, the values can be a list of numbers
with length matching the dimension of the predictions or a dictionary
mapping index to value added to that dimension.
Note that the dimension of any tree must match the dimension given
when :py:meth:`set_default_prediction_value` is called.
"""
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value('LeafNode')
if not isinstance(values, _collections.Iterable):
values = [values]
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
if type(values) == dict:
iter = values.items()
else:
iter = enumerate(values)
for index, value in iter:
ev_info = spec_node.evaluationInfo.add()
ev_info.evaluationIndex = index
ev_info.evaluationValue = float(value)
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value('LeafNode') | python | def add_leaf_node(self, tree_id, node_id, values, relative_hit_rate = None):
"""
Add a leaf node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
values: [float | int | list | dict]
Value(s) at the leaf node to add to the prediction when this node is
activated. If the prediction dimension of the tree is 1, then the
value is specified as a float or integer value.
For multidimensional predictions, the values can be a list of numbers
with length matching the dimension of the predictions or a dictionary
mapping index to value added to that dimension.
Note that the dimension of any tree must match the dimension given
when :py:meth:`set_default_prediction_value` is called.
"""
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value('LeafNode')
if not isinstance(values, _collections.Iterable):
values = [values]
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
if type(values) == dict:
iter = values.items()
else:
iter = enumerate(values)
for index, value in iter:
ev_info = spec_node.evaluationInfo.add()
ev_info.evaluationIndex = index
ev_info.evaluationValue = float(value)
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value('LeafNode') | [
"def",
"add_leaf_node",
"(",
"self",
",",
"tree_id",
",",
"node_id",
",",
"values",
",",
"relative_hit_rate",
"=",
"None",
")",
":",
"spec_node",
"=",
"self",
".",
"tree_parameters",
".",
"nodes",
".",
"add",
"(",
")",
"spec_node",
".",
"treeId",
"=",
"tree_id",
"spec_node",
".",
"nodeId",
"=",
"node_id",
"spec_node",
".",
"nodeBehavior",
"=",
"_TreeEnsemble_pb2",
".",
"TreeEnsembleParameters",
".",
"TreeNode",
".",
"TreeNodeBehavior",
".",
"Value",
"(",
"'LeafNode'",
")",
"if",
"not",
"isinstance",
"(",
"values",
",",
"_collections",
".",
"Iterable",
")",
":",
"values",
"=",
"[",
"values",
"]",
"if",
"relative_hit_rate",
"is",
"not",
"None",
":",
"spec_node",
".",
"relativeHitRate",
"=",
"relative_hit_rate",
"if",
"type",
"(",
"values",
")",
"==",
"dict",
":",
"iter",
"=",
"values",
".",
"items",
"(",
")",
"else",
":",
"iter",
"=",
"enumerate",
"(",
"values",
")",
"for",
"index",
",",
"value",
"in",
"iter",
":",
"ev_info",
"=",
"spec_node",
".",
"evaluationInfo",
".",
"add",
"(",
")",
"ev_info",
".",
"evaluationIndex",
"=",
"index",
"ev_info",
".",
"evaluationValue",
"=",
"float",
"(",
"value",
")",
"spec_node",
".",
"nodeBehavior",
"=",
"_TreeEnsemble_pb2",
".",
"TreeEnsembleParameters",
".",
"TreeNode",
".",
"TreeNodeBehavior",
".",
"Value",
"(",
"'LeafNode'",
")"
] | Add a leaf node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
values: [float | int | list | dict]
Value(s) at the leaf node to add to the prediction when this node is
activated. If the prediction dimension of the tree is 1, then the
value is specified as a float or integer value.
For multidimensional predictions, the values can be a list of numbers
with length matching the dimension of the predictions or a dictionary
mapping index to value added to that dimension.
Note that the dimension of any tree must match the dimension given
when :py:meth:`set_default_prediction_value` is called. | [
"Add",
"a",
"leaf",
"node",
"to",
"the",
"tree",
"ensemble",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/tree_ensemble.py#L188-L235 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | create | def create (raw_properties = []):
""" Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one.
"""
assert (is_iterable_typed(raw_properties, property.Property)
or is_iterable_typed(raw_properties, basestring))
# FIXME: propagate to callers.
if len(raw_properties) > 0 and isinstance(raw_properties[0], property.Property):
x = raw_properties
else:
x = [property.create_from_string(ps) for ps in raw_properties]
# These two lines of code are optimized to the current state
# of the Property class. Since this function acts as the caching
# frontend to the PropertySet class modifying these two lines
# could have a severe performance penalty. Be careful.
# It would be faster to sort by p.id, but some projects may rely
# on the fact that the properties are ordered alphabetically. So,
# we maintain alphabetical sorting so as to maintain backward compatibility.
x = sorted(set(x), key=lambda p: (p.feature.name, p.value, p.condition))
key = tuple(p.id for p in x)
if key not in __cache:
__cache [key] = PropertySet(x)
return __cache [key] | python | def create (raw_properties = []):
""" Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one.
"""
assert (is_iterable_typed(raw_properties, property.Property)
or is_iterable_typed(raw_properties, basestring))
# FIXME: propagate to callers.
if len(raw_properties) > 0 and isinstance(raw_properties[0], property.Property):
x = raw_properties
else:
x = [property.create_from_string(ps) for ps in raw_properties]
# These two lines of code are optimized to the current state
# of the Property class. Since this function acts as the caching
# frontend to the PropertySet class modifying these two lines
# could have a severe performance penalty. Be careful.
# It would be faster to sort by p.id, but some projects may rely
# on the fact that the properties are ordered alphabetically. So,
# we maintain alphabetical sorting so as to maintain backward compatibility.
x = sorted(set(x), key=lambda p: (p.feature.name, p.value, p.condition))
key = tuple(p.id for p in x)
if key not in __cache:
__cache [key] = PropertySet(x)
return __cache [key] | [
"def",
"create",
"(",
"raw_properties",
"=",
"[",
"]",
")",
":",
"assert",
"(",
"is_iterable_typed",
"(",
"raw_properties",
",",
"property",
".",
"Property",
")",
"or",
"is_iterable_typed",
"(",
"raw_properties",
",",
"basestring",
")",
")",
"# FIXME: propagate to callers.",
"if",
"len",
"(",
"raw_properties",
")",
">",
"0",
"and",
"isinstance",
"(",
"raw_properties",
"[",
"0",
"]",
",",
"property",
".",
"Property",
")",
":",
"x",
"=",
"raw_properties",
"else",
":",
"x",
"=",
"[",
"property",
".",
"create_from_string",
"(",
"ps",
")",
"for",
"ps",
"in",
"raw_properties",
"]",
"# These two lines of code are optimized to the current state",
"# of the Property class. Since this function acts as the caching",
"# frontend to the PropertySet class modifying these two lines",
"# could have a severe performance penalty. Be careful.",
"# It would be faster to sort by p.id, but some projects may rely",
"# on the fact that the properties are ordered alphabetically. So,",
"# we maintain alphabetical sorting so as to maintain backward compatibility.",
"x",
"=",
"sorted",
"(",
"set",
"(",
"x",
")",
",",
"key",
"=",
"lambda",
"p",
":",
"(",
"p",
".",
"feature",
".",
"name",
",",
"p",
".",
"value",
",",
"p",
".",
"condition",
")",
")",
"key",
"=",
"tuple",
"(",
"p",
".",
"id",
"for",
"p",
"in",
"x",
")",
"if",
"key",
"not",
"in",
"__cache",
":",
"__cache",
"[",
"key",
"]",
"=",
"PropertySet",
"(",
"x",
")",
"return",
"__cache",
"[",
"key",
"]"
] | Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one. | [
"Creates",
"a",
"new",
"PropertySet",
"instance",
"for",
"the",
"given",
"raw",
"properties",
"or",
"returns",
"an",
"already",
"existing",
"one",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L36-L61 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | create_with_validation | def create_with_validation (raw_properties):
""" Creates new 'PropertySet' instances after checking
that all properties are valid and converting implicit
properties into gristed form.
"""
assert is_iterable_typed(raw_properties, basestring)
properties = [property.create_from_string(s) for s in raw_properties]
property.validate(properties)
return create(properties) | python | def create_with_validation (raw_properties):
""" Creates new 'PropertySet' instances after checking
that all properties are valid and converting implicit
properties into gristed form.
"""
assert is_iterable_typed(raw_properties, basestring)
properties = [property.create_from_string(s) for s in raw_properties]
property.validate(properties)
return create(properties) | [
"def",
"create_with_validation",
"(",
"raw_properties",
")",
":",
"assert",
"is_iterable_typed",
"(",
"raw_properties",
",",
"basestring",
")",
"properties",
"=",
"[",
"property",
".",
"create_from_string",
"(",
"s",
")",
"for",
"s",
"in",
"raw_properties",
"]",
"property",
".",
"validate",
"(",
"properties",
")",
"return",
"create",
"(",
"properties",
")"
] | Creates new 'PropertySet' instances after checking
that all properties are valid and converting implicit
properties into gristed form. | [
"Creates",
"new",
"PropertySet",
"instances",
"after",
"checking",
"that",
"all",
"properties",
"are",
"valid",
"and",
"converting",
"implicit",
"properties",
"into",
"gristed",
"form",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L63-L72 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | create_from_user_input | def create_from_user_input(raw_properties, jamfile_module, location):
"""Creates a property-set from the input given by the user, in the
context of 'jamfile-module' at 'location'"""
assert is_iterable_typed(raw_properties, basestring)
assert isinstance(jamfile_module, basestring)
assert isinstance(location, basestring)
properties = property.create_from_strings(raw_properties, True)
properties = property.translate_paths(properties, location)
properties = property.translate_indirect(properties, jamfile_module)
project_id = get_manager().projects().attributeDefault(jamfile_module, 'id', None)
if not project_id:
project_id = os.path.abspath(location)
properties = property.translate_dependencies(properties, project_id, location)
properties = property.expand_subfeatures_in_conditions(properties)
return create(properties) | python | def create_from_user_input(raw_properties, jamfile_module, location):
"""Creates a property-set from the input given by the user, in the
context of 'jamfile-module' at 'location'"""
assert is_iterable_typed(raw_properties, basestring)
assert isinstance(jamfile_module, basestring)
assert isinstance(location, basestring)
properties = property.create_from_strings(raw_properties, True)
properties = property.translate_paths(properties, location)
properties = property.translate_indirect(properties, jamfile_module)
project_id = get_manager().projects().attributeDefault(jamfile_module, 'id', None)
if not project_id:
project_id = os.path.abspath(location)
properties = property.translate_dependencies(properties, project_id, location)
properties = property.expand_subfeatures_in_conditions(properties)
return create(properties) | [
"def",
"create_from_user_input",
"(",
"raw_properties",
",",
"jamfile_module",
",",
"location",
")",
":",
"assert",
"is_iterable_typed",
"(",
"raw_properties",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"jamfile_module",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"location",
",",
"basestring",
")",
"properties",
"=",
"property",
".",
"create_from_strings",
"(",
"raw_properties",
",",
"True",
")",
"properties",
"=",
"property",
".",
"translate_paths",
"(",
"properties",
",",
"location",
")",
"properties",
"=",
"property",
".",
"translate_indirect",
"(",
"properties",
",",
"jamfile_module",
")",
"project_id",
"=",
"get_manager",
"(",
")",
".",
"projects",
"(",
")",
".",
"attributeDefault",
"(",
"jamfile_module",
",",
"'id'",
",",
"None",
")",
"if",
"not",
"project_id",
":",
"project_id",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"location",
")",
"properties",
"=",
"property",
".",
"translate_dependencies",
"(",
"properties",
",",
"project_id",
",",
"location",
")",
"properties",
"=",
"property",
".",
"expand_subfeatures_in_conditions",
"(",
"properties",
")",
"return",
"create",
"(",
"properties",
")"
] | Creates a property-set from the input given by the user, in the
context of 'jamfile-module' at 'location | [
"Creates",
"a",
"property",
"-",
"set",
"from",
"the",
"input",
"given",
"by",
"the",
"user",
"in",
"the",
"context",
"of",
"jamfile",
"-",
"module",
"at",
"location"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L79-L94 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | refine_from_user_input | def refine_from_user_input(parent_requirements, specification, jamfile_module,
location):
"""Refines requirements with requirements provided by the user.
Specially handles "-<property>value" syntax in specification
to remove given requirements.
- parent-requirements -- property-set object with requirements
to refine
- specification -- string list of requirements provided by the use
- project-module -- the module to which context indirect features
will be bound.
- location -- the path to which path features are relative."""
assert isinstance(parent_requirements, PropertySet)
assert is_iterable_typed(specification, basestring)
assert isinstance(jamfile_module, basestring)
assert isinstance(location, basestring)
if not specification:
return parent_requirements
add_requirements = []
remove_requirements = []
for r in specification:
if r[0] == '-':
remove_requirements.append(r[1:])
else:
add_requirements.append(r)
if remove_requirements:
# Need to create property set, so that path features
# and indirect features are translated just like they
# are in project requirements.
ps = create_from_user_input(remove_requirements,
jamfile_module, location)
parent_requirements = create(difference(parent_requirements.all(),
ps.all()))
specification = add_requirements
requirements = create_from_user_input(specification,
jamfile_module, location)
return parent_requirements.refine(requirements) | python | def refine_from_user_input(parent_requirements, specification, jamfile_module,
location):
"""Refines requirements with requirements provided by the user.
Specially handles "-<property>value" syntax in specification
to remove given requirements.
- parent-requirements -- property-set object with requirements
to refine
- specification -- string list of requirements provided by the use
- project-module -- the module to which context indirect features
will be bound.
- location -- the path to which path features are relative."""
assert isinstance(parent_requirements, PropertySet)
assert is_iterable_typed(specification, basestring)
assert isinstance(jamfile_module, basestring)
assert isinstance(location, basestring)
if not specification:
return parent_requirements
add_requirements = []
remove_requirements = []
for r in specification:
if r[0] == '-':
remove_requirements.append(r[1:])
else:
add_requirements.append(r)
if remove_requirements:
# Need to create property set, so that path features
# and indirect features are translated just like they
# are in project requirements.
ps = create_from_user_input(remove_requirements,
jamfile_module, location)
parent_requirements = create(difference(parent_requirements.all(),
ps.all()))
specification = add_requirements
requirements = create_from_user_input(specification,
jamfile_module, location)
return parent_requirements.refine(requirements) | [
"def",
"refine_from_user_input",
"(",
"parent_requirements",
",",
"specification",
",",
"jamfile_module",
",",
"location",
")",
":",
"assert",
"isinstance",
"(",
"parent_requirements",
",",
"PropertySet",
")",
"assert",
"is_iterable_typed",
"(",
"specification",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"jamfile_module",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"location",
",",
"basestring",
")",
"if",
"not",
"specification",
":",
"return",
"parent_requirements",
"add_requirements",
"=",
"[",
"]",
"remove_requirements",
"=",
"[",
"]",
"for",
"r",
"in",
"specification",
":",
"if",
"r",
"[",
"0",
"]",
"==",
"'-'",
":",
"remove_requirements",
".",
"append",
"(",
"r",
"[",
"1",
":",
"]",
")",
"else",
":",
"add_requirements",
".",
"append",
"(",
"r",
")",
"if",
"remove_requirements",
":",
"# Need to create property set, so that path features",
"# and indirect features are translated just like they",
"# are in project requirements.",
"ps",
"=",
"create_from_user_input",
"(",
"remove_requirements",
",",
"jamfile_module",
",",
"location",
")",
"parent_requirements",
"=",
"create",
"(",
"difference",
"(",
"parent_requirements",
".",
"all",
"(",
")",
",",
"ps",
".",
"all",
"(",
")",
")",
")",
"specification",
"=",
"add_requirements",
"requirements",
"=",
"create_from_user_input",
"(",
"specification",
",",
"jamfile_module",
",",
"location",
")",
"return",
"parent_requirements",
".",
"refine",
"(",
"requirements",
")"
] | Refines requirements with requirements provided by the user.
Specially handles "-<property>value" syntax in specification
to remove given requirements.
- parent-requirements -- property-set object with requirements
to refine
- specification -- string list of requirements provided by the use
- project-module -- the module to which context indirect features
will be bound.
- location -- the path to which path features are relative. | [
"Refines",
"requirements",
"with",
"requirements",
"provided",
"by",
"the",
"user",
".",
"Specially",
"handles",
"-",
"<property",
">",
"value",
"syntax",
"in",
"specification",
"to",
"remove",
"given",
"requirements",
".",
"-",
"parent",
"-",
"requirements",
"--",
"property",
"-",
"set",
"object",
"with",
"requirements",
"to",
"refine",
"-",
"specification",
"--",
"string",
"list",
"of",
"requirements",
"provided",
"by",
"the",
"use",
"-",
"project",
"-",
"module",
"--",
"the",
"module",
"to",
"which",
"context",
"indirect",
"features",
"will",
"be",
"bound",
".",
"-",
"location",
"--",
"the",
"path",
"to",
"which",
"path",
"features",
"are",
"relative",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L97-L140 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.base | def base (self):
""" Returns properties that are neither incidental nor free.
"""
result = [p for p in self.lazy_properties
if not(p.feature.incidental or p.feature.free)]
result.extend(self.base_)
return result | python | def base (self):
""" Returns properties that are neither incidental nor free.
"""
result = [p for p in self.lazy_properties
if not(p.feature.incidental or p.feature.free)]
result.extend(self.base_)
return result | [
"def",
"base",
"(",
"self",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"lazy_properties",
"if",
"not",
"(",
"p",
".",
"feature",
".",
"incidental",
"or",
"p",
".",
"feature",
".",
"free",
")",
"]",
"result",
".",
"extend",
"(",
"self",
".",
"base_",
")",
"return",
"result"
] | Returns properties that are neither incidental nor free. | [
"Returns",
"properties",
"that",
"are",
"neither",
"incidental",
"nor",
"free",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L264-L270 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.free | def free (self):
""" Returns free properties which are not dependency properties.
"""
result = [p for p in self.lazy_properties
if not p.feature.incidental and p.feature.free]
result.extend(self.free_)
return result | python | def free (self):
""" Returns free properties which are not dependency properties.
"""
result = [p for p in self.lazy_properties
if not p.feature.incidental and p.feature.free]
result.extend(self.free_)
return result | [
"def",
"free",
"(",
"self",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"lazy_properties",
"if",
"not",
"p",
".",
"feature",
".",
"incidental",
"and",
"p",
".",
"feature",
".",
"free",
"]",
"result",
".",
"extend",
"(",
"self",
".",
"free_",
")",
"return",
"result"
] | Returns free properties which are not dependency properties. | [
"Returns",
"free",
"properties",
"which",
"are",
"not",
"dependency",
"properties",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L272-L278 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.dependency | def dependency (self):
""" Returns dependency properties.
"""
result = [p for p in self.lazy_properties if p.feature.dependency]
result.extend(self.dependency_)
return self.dependency_ | python | def dependency (self):
""" Returns dependency properties.
"""
result = [p for p in self.lazy_properties if p.feature.dependency]
result.extend(self.dependency_)
return self.dependency_ | [
"def",
"dependency",
"(",
"self",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"lazy_properties",
"if",
"p",
".",
"feature",
".",
"dependency",
"]",
"result",
".",
"extend",
"(",
"self",
".",
"dependency_",
")",
"return",
"self",
".",
"dependency_"
] | Returns dependency properties. | [
"Returns",
"dependency",
"properties",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L283-L288 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.non_dependency | def non_dependency (self):
""" Returns properties that are not dependencies.
"""
result = [p for p in self.lazy_properties if not p.feature.dependency]
result.extend(self.non_dependency_)
return result | python | def non_dependency (self):
""" Returns properties that are not dependencies.
"""
result = [p for p in self.lazy_properties if not p.feature.dependency]
result.extend(self.non_dependency_)
return result | [
"def",
"non_dependency",
"(",
"self",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"lazy_properties",
"if",
"not",
"p",
".",
"feature",
".",
"dependency",
"]",
"result",
".",
"extend",
"(",
"self",
".",
"non_dependency_",
")",
"return",
"result"
] | Returns properties that are not dependencies. | [
"Returns",
"properties",
"that",
"are",
"not",
"dependencies",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L290-L295 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.incidental | def incidental (self):
""" Returns incidental properties.
"""
result = [p for p in self.lazy_properties if p.feature.incidental]
result.extend(self.incidental_)
return result | python | def incidental (self):
""" Returns incidental properties.
"""
result = [p for p in self.lazy_properties if p.feature.incidental]
result.extend(self.incidental_)
return result | [
"def",
"incidental",
"(",
"self",
")",
":",
"result",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"lazy_properties",
"if",
"p",
".",
"feature",
".",
"incidental",
"]",
"result",
".",
"extend",
"(",
"self",
".",
"incidental_",
")",
"return",
"result"
] | Returns incidental properties. | [
"Returns",
"incidental",
"properties",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L307-L312 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.refine | def refine (self, requirements):
""" Refines this set's properties using the requirements passed as an argument.
"""
assert isinstance(requirements, PropertySet)
if requirements not in self.refined_:
r = property.refine(self.all_, requirements.all_)
self.refined_[requirements] = create(r)
return self.refined_[requirements] | python | def refine (self, requirements):
""" Refines this set's properties using the requirements passed as an argument.
"""
assert isinstance(requirements, PropertySet)
if requirements not in self.refined_:
r = property.refine(self.all_, requirements.all_)
self.refined_[requirements] = create(r)
return self.refined_[requirements] | [
"def",
"refine",
"(",
"self",
",",
"requirements",
")",
":",
"assert",
"isinstance",
"(",
"requirements",
",",
"PropertySet",
")",
"if",
"requirements",
"not",
"in",
"self",
".",
"refined_",
":",
"r",
"=",
"property",
".",
"refine",
"(",
"self",
".",
"all_",
",",
"requirements",
".",
"all_",
")",
"self",
".",
"refined_",
"[",
"requirements",
"]",
"=",
"create",
"(",
"r",
")",
"return",
"self",
".",
"refined_",
"[",
"requirements",
"]"
] | Refines this set's properties using the requirements passed as an argument. | [
"Refines",
"this",
"set",
"s",
"properties",
"using",
"the",
"requirements",
"passed",
"as",
"an",
"argument",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L314-L323 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.target_path | def target_path (self):
""" Computes the target path that should be used for
target with these properties.
Returns a tuple of
- the computed path
- if the path is relative to build directory, a value of
'true'.
"""
if not self.target_path_:
# The <location> feature can be used to explicitly
# change the location of generated targets
l = self.get ('<location>')
if l:
computed = l[0]
is_relative = False
else:
p = self.as_path()
if hash_maybe:
p = hash_maybe(p)
# Really, an ugly hack. Boost regression test system requires
# specific target paths, and it seems that changing it to handle
# other directory layout is really hard. For that reason,
# we teach V2 to do the things regression system requires.
# The value o '<location-prefix>' is predended to the path.
prefix = self.get ('<location-prefix>')
if prefix:
if len (prefix) > 1:
raise AlreadyDefined ("Two <location-prefix> properties specified: '%s'" % prefix)
computed = os.path.join(prefix[0], p)
else:
computed = p
if not computed:
computed = "."
is_relative = True
self.target_path_ = (computed, is_relative)
return self.target_path_ | python | def target_path (self):
""" Computes the target path that should be used for
target with these properties.
Returns a tuple of
- the computed path
- if the path is relative to build directory, a value of
'true'.
"""
if not self.target_path_:
# The <location> feature can be used to explicitly
# change the location of generated targets
l = self.get ('<location>')
if l:
computed = l[0]
is_relative = False
else:
p = self.as_path()
if hash_maybe:
p = hash_maybe(p)
# Really, an ugly hack. Boost regression test system requires
# specific target paths, and it seems that changing it to handle
# other directory layout is really hard. For that reason,
# we teach V2 to do the things regression system requires.
# The value o '<location-prefix>' is predended to the path.
prefix = self.get ('<location-prefix>')
if prefix:
if len (prefix) > 1:
raise AlreadyDefined ("Two <location-prefix> properties specified: '%s'" % prefix)
computed = os.path.join(prefix[0], p)
else:
computed = p
if not computed:
computed = "."
is_relative = True
self.target_path_ = (computed, is_relative)
return self.target_path_ | [
"def",
"target_path",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"target_path_",
":",
"# The <location> feature can be used to explicitly",
"# change the location of generated targets",
"l",
"=",
"self",
".",
"get",
"(",
"'<location>'",
")",
"if",
"l",
":",
"computed",
"=",
"l",
"[",
"0",
"]",
"is_relative",
"=",
"False",
"else",
":",
"p",
"=",
"self",
".",
"as_path",
"(",
")",
"if",
"hash_maybe",
":",
"p",
"=",
"hash_maybe",
"(",
"p",
")",
"# Really, an ugly hack. Boost regression test system requires",
"# specific target paths, and it seems that changing it to handle",
"# other directory layout is really hard. For that reason,",
"# we teach V2 to do the things regression system requires.",
"# The value o '<location-prefix>' is predended to the path.",
"prefix",
"=",
"self",
".",
"get",
"(",
"'<location-prefix>'",
")",
"if",
"prefix",
":",
"if",
"len",
"(",
"prefix",
")",
">",
"1",
":",
"raise",
"AlreadyDefined",
"(",
"\"Two <location-prefix> properties specified: '%s'\"",
"%",
"prefix",
")",
"computed",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
"[",
"0",
"]",
",",
"p",
")",
"else",
":",
"computed",
"=",
"p",
"if",
"not",
"computed",
":",
"computed",
"=",
"\".\"",
"is_relative",
"=",
"True",
"self",
".",
"target_path_",
"=",
"(",
"computed",
",",
"is_relative",
")",
"return",
"self",
".",
"target_path_"
] | Computes the target path that should be used for
target with these properties.
Returns a tuple of
- the computed path
- if the path is relative to build directory, a value of
'true'. | [
"Computes",
"the",
"target",
"path",
"that",
"should",
"be",
"used",
"for",
"target",
"with",
"these",
"properties",
".",
"Returns",
"a",
"tuple",
"of",
"-",
"the",
"computed",
"path",
"-",
"if",
"the",
"path",
"is",
"relative",
"to",
"build",
"directory",
"a",
"value",
"of",
"true",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L395-L439 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.add | def add (self, ps):
""" Creates a new property set containing the properties in this one,
plus the ones of the property set passed as argument.
"""
assert isinstance(ps, PropertySet)
if ps not in self.added_:
self.added_[ps] = create(self.all_ + ps.all())
return self.added_[ps] | python | def add (self, ps):
""" Creates a new property set containing the properties in this one,
plus the ones of the property set passed as argument.
"""
assert isinstance(ps, PropertySet)
if ps not in self.added_:
self.added_[ps] = create(self.all_ + ps.all())
return self.added_[ps] | [
"def",
"add",
"(",
"self",
",",
"ps",
")",
":",
"assert",
"isinstance",
"(",
"ps",
",",
"PropertySet",
")",
"if",
"ps",
"not",
"in",
"self",
".",
"added_",
":",
"self",
".",
"added_",
"[",
"ps",
"]",
"=",
"create",
"(",
"self",
".",
"all_",
"+",
"ps",
".",
"all",
"(",
")",
")",
"return",
"self",
".",
"added_",
"[",
"ps",
"]"
] | Creates a new property set containing the properties in this one,
plus the ones of the property set passed as argument. | [
"Creates",
"a",
"new",
"property",
"set",
"containing",
"the",
"properties",
"in",
"this",
"one",
"plus",
"the",
"ones",
"of",
"the",
"property",
"set",
"passed",
"as",
"argument",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L441-L448 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.get | def get (self, feature):
""" Returns all values of 'feature'.
"""
if type(feature) == type([]):
feature = feature[0]
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
assert isinstance(feature, b2.build.feature.Feature)
if self.feature_map_ is None:
self.feature_map_ = {}
for v in self.all_:
if v.feature not in self.feature_map_:
self.feature_map_[v.feature] = []
self.feature_map_[v.feature].append(v.value)
return self.feature_map_.get(feature, []) | python | def get (self, feature):
""" Returns all values of 'feature'.
"""
if type(feature) == type([]):
feature = feature[0]
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
assert isinstance(feature, b2.build.feature.Feature)
if self.feature_map_ is None:
self.feature_map_ = {}
for v in self.all_:
if v.feature not in self.feature_map_:
self.feature_map_[v.feature] = []
self.feature_map_[v.feature].append(v.value)
return self.feature_map_.get(feature, []) | [
"def",
"get",
"(",
"self",
",",
"feature",
")",
":",
"if",
"type",
"(",
"feature",
")",
"==",
"type",
"(",
"[",
"]",
")",
":",
"feature",
"=",
"feature",
"[",
"0",
"]",
"if",
"not",
"isinstance",
"(",
"feature",
",",
"b2",
".",
"build",
".",
"feature",
".",
"Feature",
")",
":",
"feature",
"=",
"b2",
".",
"build",
".",
"feature",
".",
"get",
"(",
"feature",
")",
"assert",
"isinstance",
"(",
"feature",
",",
"b2",
".",
"build",
".",
"feature",
".",
"Feature",
")",
"if",
"self",
".",
"feature_map_",
"is",
"None",
":",
"self",
".",
"feature_map_",
"=",
"{",
"}",
"for",
"v",
"in",
"self",
".",
"all_",
":",
"if",
"v",
".",
"feature",
"not",
"in",
"self",
".",
"feature_map_",
":",
"self",
".",
"feature_map_",
"[",
"v",
".",
"feature",
"]",
"=",
"[",
"]",
"self",
".",
"feature_map_",
"[",
"v",
".",
"feature",
"]",
".",
"append",
"(",
"v",
".",
"value",
")",
"return",
"self",
".",
"feature_map_",
".",
"get",
"(",
"feature",
",",
"[",
"]",
")"
] | Returns all values of 'feature'. | [
"Returns",
"all",
"values",
"of",
"feature",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L457-L474 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/property_set.py | PropertySet.get_properties | def get_properties(self, feature):
"""Returns all contained properties associated with 'feature'"""
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
assert isinstance(feature, b2.build.feature.Feature)
result = []
for p in self.all_:
if p.feature == feature:
result.append(p)
return result | python | def get_properties(self, feature):
"""Returns all contained properties associated with 'feature'"""
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
assert isinstance(feature, b2.build.feature.Feature)
result = []
for p in self.all_:
if p.feature == feature:
result.append(p)
return result | [
"def",
"get_properties",
"(",
"self",
",",
"feature",
")",
":",
"if",
"not",
"isinstance",
"(",
"feature",
",",
"b2",
".",
"build",
".",
"feature",
".",
"Feature",
")",
":",
"feature",
"=",
"b2",
".",
"build",
".",
"feature",
".",
"get",
"(",
"feature",
")",
"assert",
"isinstance",
"(",
"feature",
",",
"b2",
".",
"build",
".",
"feature",
".",
"Feature",
")",
"result",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
".",
"all_",
":",
"if",
"p",
".",
"feature",
"==",
"feature",
":",
"result",
".",
"append",
"(",
"p",
")",
"return",
"result"
] | Returns all contained properties associated with 'feature | [
"Returns",
"all",
"contained",
"properties",
"associated",
"with",
"feature"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property_set.py#L477-L487 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _create | def _create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
ranking=True,
verbose=True):
"""
A unified interface for training recommender models. Based on simple
characteristics of the data, a type of model is selected and trained. The
trained model can be used to predict ratings and make recommendations.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
Name of the column in `observation_data` containing ratings given by
users to items, if applicable.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with the
same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with the
same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
ranking : bool, optional
Determine whether or not the goal is to rank items for each user.
verbose : bool, optional
Enables verbose output.
Returns
-------
out : A trained model.
- If a target column is given, then
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`.
- If no target column is given, then
:class:`turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`.
Examples
--------
**Basic usage**
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.recommender.create(sf)
>>> recs = m.recommend()
**Creating a model for ratings data**
This trains a :class:`~turicreate.recommender.factorization_recommender.FactorizationRecommender` that
can predict target ratings:
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.recommender.create(sf2, target="rating", ranking = False)
**Creating specific models**
Specific models allow for a number of additional options during create.
The available recommenders are all in the turicreate.recommender namespace.
For the complete list of acceptable options, please refer to the documentation
for individual models. Such options can be passed to the underlying model
just like any other parameter. For example, the following code creates
an :class:`~turicreate.recommender.ItemSimilarityRecommender` with a space-saving
option called `only_top_k`. The returned model stores only the 2 most
similar items for item:
>>> from turicreate.recommender import item_similarity_recommender
>>> item_similarity_recommender.create(sf, only_top_k=2)
"""
if not (isinstance(observation_data, _SFrame)):
raise TypeError('observation_data input must be a SFrame')
side_data = (user_data is not None) or (item_data is not None)
if user_data is not None:
if not isinstance(user_data, _SFrame):
raise TypeError('Provided user_data must be an SFrame.')
if item_data is not None:
if not isinstance(item_data, _SFrame):
raise TypeError('Provided item_data must be an SFrame.')
if target is None:
if ranking:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'item_similarity'
else:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'item_similarity'
else:
if ranking:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'ranking_factorization_recommender'
else:
if side_data:
method = 'factorization_recommender'
else:
method = 'factorization_recommender'
opts = {'observation_data': observation_data,
'user_id': user_id,
'item_id': item_id,
'target': target,
'user_data': user_data,
'item_data': item_data}
if method == "item_similarity":
return _turicreate.recommender.item_similarity_recommender.create(**opts)
elif method == "factorization_recommender":
return _turicreate.recommender.factorization_recommender.create(**opts)
elif method == "ranking_factorization_recommender":
return _turicreate.recommender.ranking_factorization_recommender.create(**opts)
else:
raise RuntimeError("Provided method not recognized.") | python | def _create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
ranking=True,
verbose=True):
"""
A unified interface for training recommender models. Based on simple
characteristics of the data, a type of model is selected and trained. The
trained model can be used to predict ratings and make recommendations.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
Name of the column in `observation_data` containing ratings given by
users to items, if applicable.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with the
same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with the
same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
ranking : bool, optional
Determine whether or not the goal is to rank items for each user.
verbose : bool, optional
Enables verbose output.
Returns
-------
out : A trained model.
- If a target column is given, then
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`.
- If no target column is given, then
:class:`turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`.
Examples
--------
**Basic usage**
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.recommender.create(sf)
>>> recs = m.recommend()
**Creating a model for ratings data**
This trains a :class:`~turicreate.recommender.factorization_recommender.FactorizationRecommender` that
can predict target ratings:
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.recommender.create(sf2, target="rating", ranking = False)
**Creating specific models**
Specific models allow for a number of additional options during create.
The available recommenders are all in the turicreate.recommender namespace.
For the complete list of acceptable options, please refer to the documentation
for individual models. Such options can be passed to the underlying model
just like any other parameter. For example, the following code creates
an :class:`~turicreate.recommender.ItemSimilarityRecommender` with a space-saving
option called `only_top_k`. The returned model stores only the 2 most
similar items for item:
>>> from turicreate.recommender import item_similarity_recommender
>>> item_similarity_recommender.create(sf, only_top_k=2)
"""
if not (isinstance(observation_data, _SFrame)):
raise TypeError('observation_data input must be a SFrame')
side_data = (user_data is not None) or (item_data is not None)
if user_data is not None:
if not isinstance(user_data, _SFrame):
raise TypeError('Provided user_data must be an SFrame.')
if item_data is not None:
if not isinstance(item_data, _SFrame):
raise TypeError('Provided item_data must be an SFrame.')
if target is None:
if ranking:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'item_similarity'
else:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'item_similarity'
else:
if ranking:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'ranking_factorization_recommender'
else:
if side_data:
method = 'factorization_recommender'
else:
method = 'factorization_recommender'
opts = {'observation_data': observation_data,
'user_id': user_id,
'item_id': item_id,
'target': target,
'user_data': user_data,
'item_data': item_data}
if method == "item_similarity":
return _turicreate.recommender.item_similarity_recommender.create(**opts)
elif method == "factorization_recommender":
return _turicreate.recommender.factorization_recommender.create(**opts)
elif method == "ranking_factorization_recommender":
return _turicreate.recommender.ranking_factorization_recommender.create(**opts)
else:
raise RuntimeError("Provided method not recognized.") | [
"def",
"_create",
"(",
"observation_data",
",",
"user_id",
"=",
"'user_id'",
",",
"item_id",
"=",
"'item_id'",
",",
"target",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"item_data",
"=",
"None",
",",
"ranking",
"=",
"True",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"observation_data",
",",
"_SFrame",
")",
")",
":",
"raise",
"TypeError",
"(",
"'observation_data input must be a SFrame'",
")",
"side_data",
"=",
"(",
"user_data",
"is",
"not",
"None",
")",
"or",
"(",
"item_data",
"is",
"not",
"None",
")",
"if",
"user_data",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"user_data",
",",
"_SFrame",
")",
":",
"raise",
"TypeError",
"(",
"'Provided user_data must be an SFrame.'",
")",
"if",
"item_data",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"item_data",
",",
"_SFrame",
")",
":",
"raise",
"TypeError",
"(",
"'Provided item_data must be an SFrame.'",
")",
"if",
"target",
"is",
"None",
":",
"if",
"ranking",
":",
"if",
"side_data",
":",
"method",
"=",
"'ranking_factorization_recommender'",
"else",
":",
"method",
"=",
"'item_similarity'",
"else",
":",
"if",
"side_data",
":",
"method",
"=",
"'ranking_factorization_recommender'",
"else",
":",
"method",
"=",
"'item_similarity'",
"else",
":",
"if",
"ranking",
":",
"if",
"side_data",
":",
"method",
"=",
"'ranking_factorization_recommender'",
"else",
":",
"method",
"=",
"'ranking_factorization_recommender'",
"else",
":",
"if",
"side_data",
":",
"method",
"=",
"'factorization_recommender'",
"else",
":",
"method",
"=",
"'factorization_recommender'",
"opts",
"=",
"{",
"'observation_data'",
":",
"observation_data",
",",
"'user_id'",
":",
"user_id",
",",
"'item_id'",
":",
"item_id",
",",
"'target'",
":",
"target",
",",
"'user_data'",
":",
"user_data",
",",
"'item_data'",
":",
"item_data",
"}",
"if",
"method",
"==",
"\"item_similarity\"",
":",
"return",
"_turicreate",
".",
"recommender",
".",
"item_similarity_recommender",
".",
"create",
"(",
"*",
"*",
"opts",
")",
"elif",
"method",
"==",
"\"factorization_recommender\"",
":",
"return",
"_turicreate",
".",
"recommender",
".",
"factorization_recommender",
".",
"create",
"(",
"*",
"*",
"opts",
")",
"elif",
"method",
"==",
"\"ranking_factorization_recommender\"",
":",
"return",
"_turicreate",
".",
"recommender",
".",
"ranking_factorization_recommender",
".",
"create",
"(",
"*",
"*",
"opts",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Provided method not recognized.\"",
")"
] | A unified interface for training recommender models. Based on simple
characteristics of the data, a type of model is selected and trained. The
trained model can be used to predict ratings and make recommendations.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
Name of the column in `observation_data` containing ratings given by
users to items, if applicable.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with the
same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with the
same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
ranking : bool, optional
Determine whether or not the goal is to rank items for each user.
verbose : bool, optional
Enables verbose output.
Returns
-------
out : A trained model.
- If a target column is given, then
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`.
- If no target column is given, then
:class:`turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`.
Examples
--------
**Basic usage**
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.recommender.create(sf)
>>> recs = m.recommend()
**Creating a model for ratings data**
This trains a :class:`~turicreate.recommender.factorization_recommender.FactorizationRecommender` that
can predict target ratings:
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.recommender.create(sf2, target="rating", ranking = False)
**Creating specific models**
Specific models allow for a number of additional options during create.
The available recommenders are all in the turicreate.recommender namespace.
For the complete list of acceptable options, please refer to the documentation
for individual models. Such options can be passed to the underlying model
just like any other parameter. For example, the following code creates
an :class:`~turicreate.recommender.ItemSimilarityRecommender` with a space-saving
option called `only_top_k`. The returned model stores only the 2 most
similar items for item:
>>> from turicreate.recommender import item_similarity_recommender
>>> item_similarity_recommender.create(sf, only_top_k=2) | [
"A",
"unified",
"interface",
"for",
"training",
"recommender",
"models",
".",
"Based",
"on",
"simple",
"characteristics",
"of",
"the",
"data",
"a",
"type",
"of",
"model",
"is",
"selected",
"and",
"trained",
".",
"The",
"trained",
"model",
"can",
"be",
"used",
"to",
"predict",
"ratings",
"and",
"make",
"recommendations",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L24-L175 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | compare_models | def compare_models(dataset, models, model_names=None, user_sample=1.0,
metric='auto',
target=None,
exclude_known_for_precision_recall=True,
make_plot=False,
verbose=True,
**kwargs):
"""
Compare the prediction or recommendation performance of recommender models
on a common test dataset.
Models that are trained to predict ratings are compared separately from
models that are trained without target ratings. The ratings prediction
models are compared on root-mean-squared error, and the rest are compared on
precision-recall.
Parameters
----------
dataset : SFrame
The dataset to use for model evaluation.
models : list[recommender models]
List of trained recommender models.
model_names : list[str], optional
List of model name strings for display.
user_sample : float, optional
Sampling proportion of unique users to use in estimating model
performance. Defaults to 1.0, i.e. use all users in the dataset.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric for the evaluation. The default automatically splits
models into two groups with their default evaluation metric respectively:
'rmse' for models trained with a target, and 'precision_recall'
otherwise.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same column.
If the model is trained without a target column and `metric='rmse'`,
then this option must be provided by user.
exclude_known_for_precision_recall : bool, optional
A useful option when `metric='precision_recall'`. Recommender models
automatically exclude items seen in the training data from the
final recommendation list. If the input evaluation `dataset` is the
same as the data used for training the models, set this option to False.
verbose : bool, optional
If true, print the progress.
Returns
-------
out : list[SFrame]
A list of results where each one is an sframe of evaluation results of
the respective model on the given dataset
Examples
--------
If you have created two ItemSimilarityRecommenders ``m1`` and ``m2`` and have
an :class:`~turicreate.SFrame` ``test_data``, then you may compare the
performance of the two models on test data using:
>>> import turicreate
>>> train_data = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"]})
>>> test_data = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"]})
>>> m1 = turicreate.item_similarity_recommender.create(train_data)
>>> m2 = turicreate.item_similarity_recommender.create(train_data, only_top_k=1)
>>> turicreate.recommender.util.compare_models(test_data, [m1, m2], model_names=["m1", "m2"])
The evaluation metric is automatically set to 'precision_recall', and the
evaluation will be based on recommendations that exclude items seen in the
training data.
If you want to evaluate on the original training set:
>>> turicreate.recommender.util.compare_models(train_data, [m1, m2],
... exclude_known_for_precision_recall=False)
Suppose you have four models, two trained with a target rating column, and
the other two trained without a target. By default, the models are put into
two different groups with "rmse", and "precision-recall" as the evaluation
metric respectively.
>>> train_data2 = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"],
... 'rating': [1, 3, 4, 5, 3, 4, 2, 5]})
>>> test_data2 = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"],
... 'rating': [3, 5, 4, 4, 3, 5, 2]})
>>> m3 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> m4 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> turicreate.recommender.util.compare_models(test_data2, [m3, m4])
To compare all four models using the same 'precision_recall' metric, you can
do:
>>> turicreate.recommender.util.compare_models(test_data2, [m1, m2, m3, m4],
... metric='precision_recall')
"""
num_models = len(models)
if model_names is None:
model_names = ['M' + str(i) for i in range(len(models))]
if num_models < 1:
raise ValueError("Must pass in at least one recommender model to \
evaluate")
if model_names is not None and len(model_names) != num_models:
raise ValueError("Must pass in the same number of model names as \
models")
# if we are asked to sample the users, come up with a list of unique users
if user_sample < 1.0:
user_id_name = models[0].user_id
if user_id_name is None:
raise ValueError("user_id not set in model(s)")
user_sa = dataset[user_id_name]
unique_users = list(user_sa.unique())
nusers = len(unique_users)
ntake = int(round(user_sample * nusers))
_random.shuffle(unique_users)
users = unique_users[:ntake]
print("compare_models: using", ntake, "users to estimate model performance")
users = frozenset(users)
ix = [u in users for u in dataset[user_id_name]]
dataset_subset = dataset[_SArray(ix) == True]
else:
dataset_subset = dataset
results = []
for (m, mname) in zip(models, model_names):
if verbose:
print('PROGRESS: Evaluate model %s' % mname)
r = m.evaluate(dataset_subset,
metric,
exclude_known_for_precision_recall,
target,
verbose=verbose,
cutoffs=list(range(1,11,1))+list(range(11,50,5)),
**kwargs)
results.append(r)
return results | python | def compare_models(dataset, models, model_names=None, user_sample=1.0,
metric='auto',
target=None,
exclude_known_for_precision_recall=True,
make_plot=False,
verbose=True,
**kwargs):
"""
Compare the prediction or recommendation performance of recommender models
on a common test dataset.
Models that are trained to predict ratings are compared separately from
models that are trained without target ratings. The ratings prediction
models are compared on root-mean-squared error, and the rest are compared on
precision-recall.
Parameters
----------
dataset : SFrame
The dataset to use for model evaluation.
models : list[recommender models]
List of trained recommender models.
model_names : list[str], optional
List of model name strings for display.
user_sample : float, optional
Sampling proportion of unique users to use in estimating model
performance. Defaults to 1.0, i.e. use all users in the dataset.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric for the evaluation. The default automatically splits
models into two groups with their default evaluation metric respectively:
'rmse' for models trained with a target, and 'precision_recall'
otherwise.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same column.
If the model is trained without a target column and `metric='rmse'`,
then this option must be provided by user.
exclude_known_for_precision_recall : bool, optional
A useful option when `metric='precision_recall'`. Recommender models
automatically exclude items seen in the training data from the
final recommendation list. If the input evaluation `dataset` is the
same as the data used for training the models, set this option to False.
verbose : bool, optional
If true, print the progress.
Returns
-------
out : list[SFrame]
A list of results where each one is an sframe of evaluation results of
the respective model on the given dataset
Examples
--------
If you have created two ItemSimilarityRecommenders ``m1`` and ``m2`` and have
an :class:`~turicreate.SFrame` ``test_data``, then you may compare the
performance of the two models on test data using:
>>> import turicreate
>>> train_data = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"]})
>>> test_data = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"]})
>>> m1 = turicreate.item_similarity_recommender.create(train_data)
>>> m2 = turicreate.item_similarity_recommender.create(train_data, only_top_k=1)
>>> turicreate.recommender.util.compare_models(test_data, [m1, m2], model_names=["m1", "m2"])
The evaluation metric is automatically set to 'precision_recall', and the
evaluation will be based on recommendations that exclude items seen in the
training data.
If you want to evaluate on the original training set:
>>> turicreate.recommender.util.compare_models(train_data, [m1, m2],
... exclude_known_for_precision_recall=False)
Suppose you have four models, two trained with a target rating column, and
the other two trained without a target. By default, the models are put into
two different groups with "rmse", and "precision-recall" as the evaluation
metric respectively.
>>> train_data2 = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"],
... 'rating': [1, 3, 4, 5, 3, 4, 2, 5]})
>>> test_data2 = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"],
... 'rating': [3, 5, 4, 4, 3, 5, 2]})
>>> m3 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> m4 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> turicreate.recommender.util.compare_models(test_data2, [m3, m4])
To compare all four models using the same 'precision_recall' metric, you can
do:
>>> turicreate.recommender.util.compare_models(test_data2, [m1, m2, m3, m4],
... metric='precision_recall')
"""
num_models = len(models)
if model_names is None:
model_names = ['M' + str(i) for i in range(len(models))]
if num_models < 1:
raise ValueError("Must pass in at least one recommender model to \
evaluate")
if model_names is not None and len(model_names) != num_models:
raise ValueError("Must pass in the same number of model names as \
models")
# if we are asked to sample the users, come up with a list of unique users
if user_sample < 1.0:
user_id_name = models[0].user_id
if user_id_name is None:
raise ValueError("user_id not set in model(s)")
user_sa = dataset[user_id_name]
unique_users = list(user_sa.unique())
nusers = len(unique_users)
ntake = int(round(user_sample * nusers))
_random.shuffle(unique_users)
users = unique_users[:ntake]
print("compare_models: using", ntake, "users to estimate model performance")
users = frozenset(users)
ix = [u in users for u in dataset[user_id_name]]
dataset_subset = dataset[_SArray(ix) == True]
else:
dataset_subset = dataset
results = []
for (m, mname) in zip(models, model_names):
if verbose:
print('PROGRESS: Evaluate model %s' % mname)
r = m.evaluate(dataset_subset,
metric,
exclude_known_for_precision_recall,
target,
verbose=verbose,
cutoffs=list(range(1,11,1))+list(range(11,50,5)),
**kwargs)
results.append(r)
return results | [
"def",
"compare_models",
"(",
"dataset",
",",
"models",
",",
"model_names",
"=",
"None",
",",
"user_sample",
"=",
"1.0",
",",
"metric",
"=",
"'auto'",
",",
"target",
"=",
"None",
",",
"exclude_known_for_precision_recall",
"=",
"True",
",",
"make_plot",
"=",
"False",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"num_models",
"=",
"len",
"(",
"models",
")",
"if",
"model_names",
"is",
"None",
":",
"model_names",
"=",
"[",
"'M'",
"+",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"models",
")",
")",
"]",
"if",
"num_models",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Must pass in at least one recommender model to \\\n evaluate\"",
")",
"if",
"model_names",
"is",
"not",
"None",
"and",
"len",
"(",
"model_names",
")",
"!=",
"num_models",
":",
"raise",
"ValueError",
"(",
"\"Must pass in the same number of model names as \\\n models\"",
")",
"# if we are asked to sample the users, come up with a list of unique users",
"if",
"user_sample",
"<",
"1.0",
":",
"user_id_name",
"=",
"models",
"[",
"0",
"]",
".",
"user_id",
"if",
"user_id_name",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"user_id not set in model(s)\"",
")",
"user_sa",
"=",
"dataset",
"[",
"user_id_name",
"]",
"unique_users",
"=",
"list",
"(",
"user_sa",
".",
"unique",
"(",
")",
")",
"nusers",
"=",
"len",
"(",
"unique_users",
")",
"ntake",
"=",
"int",
"(",
"round",
"(",
"user_sample",
"*",
"nusers",
")",
")",
"_random",
".",
"shuffle",
"(",
"unique_users",
")",
"users",
"=",
"unique_users",
"[",
":",
"ntake",
"]",
"print",
"(",
"\"compare_models: using\"",
",",
"ntake",
",",
"\"users to estimate model performance\"",
")",
"users",
"=",
"frozenset",
"(",
"users",
")",
"ix",
"=",
"[",
"u",
"in",
"users",
"for",
"u",
"in",
"dataset",
"[",
"user_id_name",
"]",
"]",
"dataset_subset",
"=",
"dataset",
"[",
"_SArray",
"(",
"ix",
")",
"==",
"True",
"]",
"else",
":",
"dataset_subset",
"=",
"dataset",
"results",
"=",
"[",
"]",
"for",
"(",
"m",
",",
"mname",
")",
"in",
"zip",
"(",
"models",
",",
"model_names",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'PROGRESS: Evaluate model %s'",
"%",
"mname",
")",
"r",
"=",
"m",
".",
"evaluate",
"(",
"dataset_subset",
",",
"metric",
",",
"exclude_known_for_precision_recall",
",",
"target",
",",
"verbose",
"=",
"verbose",
",",
"cutoffs",
"=",
"list",
"(",
"range",
"(",
"1",
",",
"11",
",",
"1",
")",
")",
"+",
"list",
"(",
"range",
"(",
"11",
",",
"50",
",",
"5",
")",
")",
",",
"*",
"*",
"kwargs",
")",
"results",
".",
"append",
"(",
"r",
")",
"return",
"results"
] | Compare the prediction or recommendation performance of recommender models
on a common test dataset.
Models that are trained to predict ratings are compared separately from
models that are trained without target ratings. The ratings prediction
models are compared on root-mean-squared error, and the rest are compared on
precision-recall.
Parameters
----------
dataset : SFrame
The dataset to use for model evaluation.
models : list[recommender models]
List of trained recommender models.
model_names : list[str], optional
List of model name strings for display.
user_sample : float, optional
Sampling proportion of unique users to use in estimating model
performance. Defaults to 1.0, i.e. use all users in the dataset.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric for the evaluation. The default automatically splits
models into two groups with their default evaluation metric respectively:
'rmse' for models trained with a target, and 'precision_recall'
otherwise.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same column.
If the model is trained without a target column and `metric='rmse'`,
then this option must be provided by user.
exclude_known_for_precision_recall : bool, optional
A useful option when `metric='precision_recall'`. Recommender models
automatically exclude items seen in the training data from the
final recommendation list. If the input evaluation `dataset` is the
same as the data used for training the models, set this option to False.
verbose : bool, optional
If true, print the progress.
Returns
-------
out : list[SFrame]
A list of results where each one is an sframe of evaluation results of
the respective model on the given dataset
Examples
--------
If you have created two ItemSimilarityRecommenders ``m1`` and ``m2`` and have
an :class:`~turicreate.SFrame` ``test_data``, then you may compare the
performance of the two models on test data using:
>>> import turicreate
>>> train_data = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"]})
>>> test_data = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"]})
>>> m1 = turicreate.item_similarity_recommender.create(train_data)
>>> m2 = turicreate.item_similarity_recommender.create(train_data, only_top_k=1)
>>> turicreate.recommender.util.compare_models(test_data, [m1, m2], model_names=["m1", "m2"])
The evaluation metric is automatically set to 'precision_recall', and the
evaluation will be based on recommendations that exclude items seen in the
training data.
If you want to evaluate on the original training set:
>>> turicreate.recommender.util.compare_models(train_data, [m1, m2],
... exclude_known_for_precision_recall=False)
Suppose you have four models, two trained with a target rating column, and
the other two trained without a target. By default, the models are put into
two different groups with "rmse", and "precision-recall" as the evaluation
metric respectively.
>>> train_data2 = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"],
... 'rating': [1, 3, 4, 5, 3, 4, 2, 5]})
>>> test_data2 = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"],
... 'rating': [3, 5, 4, 4, 3, 5, 2]})
>>> m3 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> m4 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> turicreate.recommender.util.compare_models(test_data2, [m3, m4])
To compare all four models using the same 'precision_recall' metric, you can
do:
>>> turicreate.recommender.util.compare_models(test_data2, [m1, m2, m3, m4],
... metric='precision_recall') | [
"Compare",
"the",
"prediction",
"or",
"recommendation",
"performance",
"of",
"recommender",
"models",
"on",
"a",
"common",
"test",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L177-L328 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | precision_recall_by_user | def precision_recall_by_user(observed_user_items,
recommendations,
cutoffs=[10]):
"""
Compute precision and recall at a given cutoff for each user. In information
retrieval terms, precision represents the ratio of relevant, retrieved items
to the number of relevant items. Recall represents the ratio of relevant,
retrieved items to the number of relevant items.
Let :math:`p_k` be a vector of the first :math:`k` elements in the
recommendations for a particular user, and let :math:`a` be the set of items
in ``observed_user_items`` for that user. The "precision at cutoff k" for
this user is defined as
.. math::
P(k) = \\frac{ | a \cap p_k | }{k},
while "recall at cutoff k" is defined as
.. math::
R(k) = \\frac{ | a \cap p_k | }{|a|}
The order of the elements in the recommendations affects the returned
precision and recall scores.
Parameters
----------
observed_user_items : SFrame
An SFrame containing observed user item pairs, where the first
column contains user ids and the second column contains item ids.
recommendations : SFrame
An SFrame containing columns pertaining to the user id, the item id,
the score given to that pair, and the rank of that item among the
recommendations made for user id. For example, see the output of
recommend() produced by any turicreate.recommender model.
cutoffs : list[int], optional
The cutoffs to use when computing precision and recall.
Returns
-------
out : SFrame
An SFrame containing columns user id, cutoff, precision, recall, and
count where the precision and recall are reported for each user at
each requested cutoff, and count is the number of observations for
that user id.
Notes
-----
The corner cases that involve empty lists were chosen to be consistent
with the feasible set of precision-recall curves, which start at
(precision, recall) = (1,0) and end at (0,1). However, we do not believe
there is a well-known consensus on this choice.
Examples
--------
Given SFrames ``train_data`` and ``test_data`` with columns user_id
and item_id:
>>> from turicreate.toolkits.recommender.util import precision_recall_by_user
>>> m = turicreate.recommender.create(train_data)
>>> recs = m.recommend()
>>> precision_recall_by_user(test_data, recs, cutoffs=[5, 10])
"""
assert type(observed_user_items) == _SFrame
assert type(recommendations) == _SFrame
assert type(cutoffs) == list
assert min(cutoffs) > 0, "All cutoffs must be positive integers."
assert recommendations.num_columns() >= 2
user_id = recommendations.column_names()[0]
item_id = recommendations.column_names()[1]
assert observed_user_items.num_rows() > 0, \
"Evaluating precision and recall requires a non-empty " + \
"observed_user_items."
assert user_id in observed_user_items.column_names(), \
"User column required in observed_user_items."
assert item_id in observed_user_items.column_names(), \
"Item column required in observed_user_items."
assert observed_user_items[user_id].dtype == \
recommendations[user_id].dtype, \
"The user column in the two provided SFrames must have the same type."
assert observed_user_items[item_id].dtype == \
recommendations[item_id].dtype, \
"The user column in the two provided SFrames must have the same type."
cutoffs = _array.array('f', cutoffs)
opts = {'data': observed_user_items,
'recommendations': recommendations,
'cutoffs': cutoffs}
response = _turicreate.toolkits._main.run('evaluation_precision_recall_by_user', opts)
sf = _SFrame(None, _proxy=response['pr'])
return sf.sort([user_id, 'cutoff']) | python | def precision_recall_by_user(observed_user_items,
recommendations,
cutoffs=[10]):
"""
Compute precision and recall at a given cutoff for each user. In information
retrieval terms, precision represents the ratio of relevant, retrieved items
to the number of relevant items. Recall represents the ratio of relevant,
retrieved items to the number of relevant items.
Let :math:`p_k` be a vector of the first :math:`k` elements in the
recommendations for a particular user, and let :math:`a` be the set of items
in ``observed_user_items`` for that user. The "precision at cutoff k" for
this user is defined as
.. math::
P(k) = \\frac{ | a \cap p_k | }{k},
while "recall at cutoff k" is defined as
.. math::
R(k) = \\frac{ | a \cap p_k | }{|a|}
The order of the elements in the recommendations affects the returned
precision and recall scores.
Parameters
----------
observed_user_items : SFrame
An SFrame containing observed user item pairs, where the first
column contains user ids and the second column contains item ids.
recommendations : SFrame
An SFrame containing columns pertaining to the user id, the item id,
the score given to that pair, and the rank of that item among the
recommendations made for user id. For example, see the output of
recommend() produced by any turicreate.recommender model.
cutoffs : list[int], optional
The cutoffs to use when computing precision and recall.
Returns
-------
out : SFrame
An SFrame containing columns user id, cutoff, precision, recall, and
count where the precision and recall are reported for each user at
each requested cutoff, and count is the number of observations for
that user id.
Notes
-----
The corner cases that involve empty lists were chosen to be consistent
with the feasible set of precision-recall curves, which start at
(precision, recall) = (1,0) and end at (0,1). However, we do not believe
there is a well-known consensus on this choice.
Examples
--------
Given SFrames ``train_data`` and ``test_data`` with columns user_id
and item_id:
>>> from turicreate.toolkits.recommender.util import precision_recall_by_user
>>> m = turicreate.recommender.create(train_data)
>>> recs = m.recommend()
>>> precision_recall_by_user(test_data, recs, cutoffs=[5, 10])
"""
assert type(observed_user_items) == _SFrame
assert type(recommendations) == _SFrame
assert type(cutoffs) == list
assert min(cutoffs) > 0, "All cutoffs must be positive integers."
assert recommendations.num_columns() >= 2
user_id = recommendations.column_names()[0]
item_id = recommendations.column_names()[1]
assert observed_user_items.num_rows() > 0, \
"Evaluating precision and recall requires a non-empty " + \
"observed_user_items."
assert user_id in observed_user_items.column_names(), \
"User column required in observed_user_items."
assert item_id in observed_user_items.column_names(), \
"Item column required in observed_user_items."
assert observed_user_items[user_id].dtype == \
recommendations[user_id].dtype, \
"The user column in the two provided SFrames must have the same type."
assert observed_user_items[item_id].dtype == \
recommendations[item_id].dtype, \
"The user column in the two provided SFrames must have the same type."
cutoffs = _array.array('f', cutoffs)
opts = {'data': observed_user_items,
'recommendations': recommendations,
'cutoffs': cutoffs}
response = _turicreate.toolkits._main.run('evaluation_precision_recall_by_user', opts)
sf = _SFrame(None, _proxy=response['pr'])
return sf.sort([user_id, 'cutoff']) | [
"def",
"precision_recall_by_user",
"(",
"observed_user_items",
",",
"recommendations",
",",
"cutoffs",
"=",
"[",
"10",
"]",
")",
":",
"assert",
"type",
"(",
"observed_user_items",
")",
"==",
"_SFrame",
"assert",
"type",
"(",
"recommendations",
")",
"==",
"_SFrame",
"assert",
"type",
"(",
"cutoffs",
")",
"==",
"list",
"assert",
"min",
"(",
"cutoffs",
")",
">",
"0",
",",
"\"All cutoffs must be positive integers.\"",
"assert",
"recommendations",
".",
"num_columns",
"(",
")",
">=",
"2",
"user_id",
"=",
"recommendations",
".",
"column_names",
"(",
")",
"[",
"0",
"]",
"item_id",
"=",
"recommendations",
".",
"column_names",
"(",
")",
"[",
"1",
"]",
"assert",
"observed_user_items",
".",
"num_rows",
"(",
")",
">",
"0",
",",
"\"Evaluating precision and recall requires a non-empty \"",
"+",
"\"observed_user_items.\"",
"assert",
"user_id",
"in",
"observed_user_items",
".",
"column_names",
"(",
")",
",",
"\"User column required in observed_user_items.\"",
"assert",
"item_id",
"in",
"observed_user_items",
".",
"column_names",
"(",
")",
",",
"\"Item column required in observed_user_items.\"",
"assert",
"observed_user_items",
"[",
"user_id",
"]",
".",
"dtype",
"==",
"recommendations",
"[",
"user_id",
"]",
".",
"dtype",
",",
"\"The user column in the two provided SFrames must have the same type.\"",
"assert",
"observed_user_items",
"[",
"item_id",
"]",
".",
"dtype",
"==",
"recommendations",
"[",
"item_id",
"]",
".",
"dtype",
",",
"\"The user column in the two provided SFrames must have the same type.\"",
"cutoffs",
"=",
"_array",
".",
"array",
"(",
"'f'",
",",
"cutoffs",
")",
"opts",
"=",
"{",
"'data'",
":",
"observed_user_items",
",",
"'recommendations'",
":",
"recommendations",
",",
"'cutoffs'",
":",
"cutoffs",
"}",
"response",
"=",
"_turicreate",
".",
"toolkits",
".",
"_main",
".",
"run",
"(",
"'evaluation_precision_recall_by_user'",
",",
"opts",
")",
"sf",
"=",
"_SFrame",
"(",
"None",
",",
"_proxy",
"=",
"response",
"[",
"'pr'",
"]",
")",
"return",
"sf",
".",
"sort",
"(",
"[",
"user_id",
",",
"'cutoff'",
"]",
")"
] | Compute precision and recall at a given cutoff for each user. In information
retrieval terms, precision represents the ratio of relevant, retrieved items
to the number of relevant items. Recall represents the ratio of relevant,
retrieved items to the number of relevant items.
Let :math:`p_k` be a vector of the first :math:`k` elements in the
recommendations for a particular user, and let :math:`a` be the set of items
in ``observed_user_items`` for that user. The "precision at cutoff k" for
this user is defined as
.. math::
P(k) = \\frac{ | a \cap p_k | }{k},
while "recall at cutoff k" is defined as
.. math::
R(k) = \\frac{ | a \cap p_k | }{|a|}
The order of the elements in the recommendations affects the returned
precision and recall scores.
Parameters
----------
observed_user_items : SFrame
An SFrame containing observed user item pairs, where the first
column contains user ids and the second column contains item ids.
recommendations : SFrame
An SFrame containing columns pertaining to the user id, the item id,
the score given to that pair, and the rank of that item among the
recommendations made for user id. For example, see the output of
recommend() produced by any turicreate.recommender model.
cutoffs : list[int], optional
The cutoffs to use when computing precision and recall.
Returns
-------
out : SFrame
An SFrame containing columns user id, cutoff, precision, recall, and
count where the precision and recall are reported for each user at
each requested cutoff, and count is the number of observations for
that user id.
Notes
-----
The corner cases that involve empty lists were chosen to be consistent
with the feasible set of precision-recall curves, which start at
(precision, recall) = (1,0) and end at (0,1). However, we do not believe
there is a well-known consensus on this choice.
Examples
--------
Given SFrames ``train_data`` and ``test_data`` with columns user_id
and item_id:
>>> from turicreate.toolkits.recommender.util import precision_recall_by_user
>>> m = turicreate.recommender.create(train_data)
>>> recs = m.recommend()
>>> precision_recall_by_user(test_data, recs, cutoffs=[5, 10]) | [
"Compute",
"precision",
"and",
"recall",
"at",
"a",
"given",
"cutoff",
"for",
"each",
"user",
".",
"In",
"information",
"retrieval",
"terms",
"precision",
"represents",
"the",
"ratio",
"of",
"relevant",
"retrieved",
"items",
"to",
"the",
"number",
"of",
"relevant",
"items",
".",
"Recall",
"represents",
"the",
"ratio",
"of",
"relevant",
"retrieved",
"items",
"to",
"the",
"number",
"of",
"relevant",
"items",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L331-L427 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | random_split_by_user | def random_split_by_user(dataset,
user_id='user_id',
item_id='item_id',
max_num_users=1000,
item_test_proportion=.2,
random_seed=0):
"""Create a recommender-friendly train-test split of the provided data set.
The test dataset is generated by first choosing `max_num_users` out of the
total number of users in `dataset`. Then, for each of the chosen test users,
a portion of the user's items (determined by `item_test_proportion`) is
randomly chosen to be included in the test set. This split allows the
training data to retain enough information about the users in the testset,
so that adequate recommendations can be made. The total number of users
in the test set may be fewer than `max_num_users` if a user was chosen for
the test set but none of their items are selected.
Parameters
----------
dataset : SFrame
An SFrame containing (user, item) pairs.
user_id : str, optional
The name of the column in ``dataset`` that contains user ids.
item_id : str, optional
The name of the column in ``dataset`` that contains item ids.
max_num_users : int, optional
The maximum number of users to use to construct the test set. If
set to 'None', then use all available users.
item_test_proportion : float, optional
The desired probability that a test user's item will be chosen
for the test set.
random_seed : int, optional The random seed to use for
randomization. If None, then the random seed is different
every time; if numeric, then subsequent calls with the same
dataset and random seed with have the same split.
Returns
-------
train, test : SFrame
A tuple with two datasets to be used for training and testing.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf, max_num_users=100)
"""
assert user_id in dataset.column_names(), \
'Provided user column "{0}" not found in data set.'.format(user_id)
assert item_id in dataset.column_names(), \
'Provided item column "{0}" not found in data set.'.format(item_id)
if max_num_users == 'all':
max_num_users = None
if random_seed is None:
import time
random_seed = int(hash("%20f" % time.time()) % 2**63)
opts = {'dataset': dataset,
'user_id': user_id,
'item_id': item_id,
'max_num_users': max_num_users,
'item_test_proportion': item_test_proportion,
'random_seed': random_seed}
response = _turicreate.extensions._recsys.train_test_split(dataset, user_id, item_id,
max_num_users, item_test_proportion, random_seed)
train = response['train']
test = response['test']
return train, test | python | def random_split_by_user(dataset,
user_id='user_id',
item_id='item_id',
max_num_users=1000,
item_test_proportion=.2,
random_seed=0):
"""Create a recommender-friendly train-test split of the provided data set.
The test dataset is generated by first choosing `max_num_users` out of the
total number of users in `dataset`. Then, for each of the chosen test users,
a portion of the user's items (determined by `item_test_proportion`) is
randomly chosen to be included in the test set. This split allows the
training data to retain enough information about the users in the testset,
so that adequate recommendations can be made. The total number of users
in the test set may be fewer than `max_num_users` if a user was chosen for
the test set but none of their items are selected.
Parameters
----------
dataset : SFrame
An SFrame containing (user, item) pairs.
user_id : str, optional
The name of the column in ``dataset`` that contains user ids.
item_id : str, optional
The name of the column in ``dataset`` that contains item ids.
max_num_users : int, optional
The maximum number of users to use to construct the test set. If
set to 'None', then use all available users.
item_test_proportion : float, optional
The desired probability that a test user's item will be chosen
for the test set.
random_seed : int, optional The random seed to use for
randomization. If None, then the random seed is different
every time; if numeric, then subsequent calls with the same
dataset and random seed with have the same split.
Returns
-------
train, test : SFrame
A tuple with two datasets to be used for training and testing.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf, max_num_users=100)
"""
assert user_id in dataset.column_names(), \
'Provided user column "{0}" not found in data set.'.format(user_id)
assert item_id in dataset.column_names(), \
'Provided item column "{0}" not found in data set.'.format(item_id)
if max_num_users == 'all':
max_num_users = None
if random_seed is None:
import time
random_seed = int(hash("%20f" % time.time()) % 2**63)
opts = {'dataset': dataset,
'user_id': user_id,
'item_id': item_id,
'max_num_users': max_num_users,
'item_test_proportion': item_test_proportion,
'random_seed': random_seed}
response = _turicreate.extensions._recsys.train_test_split(dataset, user_id, item_id,
max_num_users, item_test_proportion, random_seed)
train = response['train']
test = response['test']
return train, test | [
"def",
"random_split_by_user",
"(",
"dataset",
",",
"user_id",
"=",
"'user_id'",
",",
"item_id",
"=",
"'item_id'",
",",
"max_num_users",
"=",
"1000",
",",
"item_test_proportion",
"=",
".2",
",",
"random_seed",
"=",
"0",
")",
":",
"assert",
"user_id",
"in",
"dataset",
".",
"column_names",
"(",
")",
",",
"'Provided user column \"{0}\" not found in data set.'",
".",
"format",
"(",
"user_id",
")",
"assert",
"item_id",
"in",
"dataset",
".",
"column_names",
"(",
")",
",",
"'Provided item column \"{0}\" not found in data set.'",
".",
"format",
"(",
"item_id",
")",
"if",
"max_num_users",
"==",
"'all'",
":",
"max_num_users",
"=",
"None",
"if",
"random_seed",
"is",
"None",
":",
"import",
"time",
"random_seed",
"=",
"int",
"(",
"hash",
"(",
"\"%20f\"",
"%",
"time",
".",
"time",
"(",
")",
")",
"%",
"2",
"**",
"63",
")",
"opts",
"=",
"{",
"'dataset'",
":",
"dataset",
",",
"'user_id'",
":",
"user_id",
",",
"'item_id'",
":",
"item_id",
",",
"'max_num_users'",
":",
"max_num_users",
",",
"'item_test_proportion'",
":",
"item_test_proportion",
",",
"'random_seed'",
":",
"random_seed",
"}",
"response",
"=",
"_turicreate",
".",
"extensions",
".",
"_recsys",
".",
"train_test_split",
"(",
"dataset",
",",
"user_id",
",",
"item_id",
",",
"max_num_users",
",",
"item_test_proportion",
",",
"random_seed",
")",
"train",
"=",
"response",
"[",
"'train'",
"]",
"test",
"=",
"response",
"[",
"'test'",
"]",
"return",
"train",
",",
"test"
] | Create a recommender-friendly train-test split of the provided data set.
The test dataset is generated by first choosing `max_num_users` out of the
total number of users in `dataset`. Then, for each of the chosen test users,
a portion of the user's items (determined by `item_test_proportion`) is
randomly chosen to be included in the test set. This split allows the
training data to retain enough information about the users in the testset,
so that adequate recommendations can be made. The total number of users
in the test set may be fewer than `max_num_users` if a user was chosen for
the test set but none of their items are selected.
Parameters
----------
dataset : SFrame
An SFrame containing (user, item) pairs.
user_id : str, optional
The name of the column in ``dataset`` that contains user ids.
item_id : str, optional
The name of the column in ``dataset`` that contains item ids.
max_num_users : int, optional
The maximum number of users to use to construct the test set. If
set to 'None', then use all available users.
item_test_proportion : float, optional
The desired probability that a test user's item will be chosen
for the test set.
random_seed : int, optional The random seed to use for
randomization. If None, then the random seed is different
every time; if numeric, then subsequent calls with the same
dataset and random seed with have the same split.
Returns
-------
train, test : SFrame
A tuple with two datasets to be used for training and testing.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf, max_num_users=100) | [
"Create",
"a",
"recommender",
"-",
"friendly",
"train",
"-",
"test",
"split",
"of",
"the",
"provided",
"data",
"set",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L430-L508 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._list_fields | def _list_fields(self):
"""
Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
"""
response = self.__proxy__.list_fields()
return [s for s in response['value'] if not s.startswith("_")] | python | def _list_fields(self):
"""
Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
"""
response = self.__proxy__.list_fields()
return [s for s in response['value'] if not s.startswith("_")] | [
"def",
"_list_fields",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"__proxy__",
".",
"list_fields",
"(",
")",
"return",
"[",
"s",
"for",
"s",
"in",
"response",
"[",
"'value'",
"]",
"if",
"not",
"s",
".",
"startswith",
"(",
"\"_\"",
")",
"]"
] | Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method. | [
"Get",
"the",
"current",
"settings",
"of",
"the",
"model",
".",
"The",
"keys",
"depend",
"on",
"the",
"type",
"of",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L543-L555 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._get_summary_struct | def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section names.
The order matches that of the 'sections' object.
"""
stats = self._list_fields()
options = self._get_current_options()
section_titles = []
sections = []
observation_columns = set(self.observation_data_column_names)
not_needed = set([self.user_id,
self.item_id,
self.target])
num_obs_fields = len(observation_columns.difference(not_needed))
user_features = self.user_side_data_column_names
item_features = self.item_side_data_column_names
section_titles.append("Schema")
schema_fields = [
('User ID', 'user_id'),
('Item ID', 'item_id'),
('Target', 'target'),
('Additional observation features', _precomputed_field(num_obs_fields)),
('User side features', _precomputed_field(user_features)),
('Item side features', _precomputed_field(item_features))]
sections.append(schema_fields)
data_fields = [
('Number of observations', 'num_observations'),
('Number of users', 'num_users'),
('Number of items', 'num_items')]
section_titles.append("Statistics")
sections.append(data_fields)
training_fields = [
('Training time', 'training_time')]
if 'data_load_elapsed_time' in stats:
training_fields.append(('Data load time',
'data_load_elapsed_time'))
if 'validation_metrics_elapsed_time' in stats:
training_fields.append(('Validation metrics time',
'validation_metrics_elapsed_time'))
section_titles.append("Training summary")
sections.append(training_fields)
# Remove any options that should not be shown under "Settings"
to_ignore = ['random_seed',
'user_id',
'item_id',
'target']
for k in to_ignore:
if k in options:
del options[k]
def add_ordered_options(name, ordered_options, additional = []):
option_fields = []
for k, v in additional:
option_fields.append((k, _precomputed_field(v)))
for k in ordered_options:
if k in options:
option_fields.append((k, _precomputed_field(options[k])))
del options[k]
if option_fields:
section_titles.append(name)
sections.append(option_fields)
# Put in a number of things in order, if applicable.
# Model parameters
model_parameter_options = [
"only_top_k",
"threshold",
"num_factors",
"binary_target",
"side_data_factorization",
"solver",
"nmf",
"max_iterations",
"similarity_type",
"training_method"]
add_ordered_options("Model Parameters", model_parameter_options,
[("Model class", self.__class__.__name__)])
# Regularization type options
regularization_options = [
"regularization",
"regularization_type",
"linear_regularization",
"ranking_regularization",
"unobserved_rating_value",
"num_sampled_negative_examples",
"ials_confidence_scaling_type",
"ials_confidence_scaling_factor"]
add_ordered_options("Regularization Settings", regularization_options)
# Optimization stuff
optimization_settings = [
"init_random_sigma",
"sgd_convergence_interval",
"sgd_convergence_threshold",
"sgd_max_trial_iterations",
"sgd_sampling_block_size",
"sgd_step_adjustment_interval",
"sgd_step_size",
"sgd_trial_sample_minimum_size",
"sgd_trial_sample_proportion",
"step_size_decrease_rate",
"additional_iterations_if_unhealthy",
"adagrad_momentum_weighting",
"num_tempering_iterations",
"tempering_regularization_start_value",
"track_exact_loss"]
add_ordered_options("Optimization Settings", optimization_settings)
# clean up
option_fields = []
for k, v in _six.iteritems(options):
option_fields.append((k, _precomputed_field(v)))
if option_fields:
section_titles.append("Other Settings")
sections.append(option_fields)
return (sections, section_titles) | python | def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section names.
The order matches that of the 'sections' object.
"""
stats = self._list_fields()
options = self._get_current_options()
section_titles = []
sections = []
observation_columns = set(self.observation_data_column_names)
not_needed = set([self.user_id,
self.item_id,
self.target])
num_obs_fields = len(observation_columns.difference(not_needed))
user_features = self.user_side_data_column_names
item_features = self.item_side_data_column_names
section_titles.append("Schema")
schema_fields = [
('User ID', 'user_id'),
('Item ID', 'item_id'),
('Target', 'target'),
('Additional observation features', _precomputed_field(num_obs_fields)),
('User side features', _precomputed_field(user_features)),
('Item side features', _precomputed_field(item_features))]
sections.append(schema_fields)
data_fields = [
('Number of observations', 'num_observations'),
('Number of users', 'num_users'),
('Number of items', 'num_items')]
section_titles.append("Statistics")
sections.append(data_fields)
training_fields = [
('Training time', 'training_time')]
if 'data_load_elapsed_time' in stats:
training_fields.append(('Data load time',
'data_load_elapsed_time'))
if 'validation_metrics_elapsed_time' in stats:
training_fields.append(('Validation metrics time',
'validation_metrics_elapsed_time'))
section_titles.append("Training summary")
sections.append(training_fields)
# Remove any options that should not be shown under "Settings"
to_ignore = ['random_seed',
'user_id',
'item_id',
'target']
for k in to_ignore:
if k in options:
del options[k]
def add_ordered_options(name, ordered_options, additional = []):
option_fields = []
for k, v in additional:
option_fields.append((k, _precomputed_field(v)))
for k in ordered_options:
if k in options:
option_fields.append((k, _precomputed_field(options[k])))
del options[k]
if option_fields:
section_titles.append(name)
sections.append(option_fields)
# Put in a number of things in order, if applicable.
# Model parameters
model_parameter_options = [
"only_top_k",
"threshold",
"num_factors",
"binary_target",
"side_data_factorization",
"solver",
"nmf",
"max_iterations",
"similarity_type",
"training_method"]
add_ordered_options("Model Parameters", model_parameter_options,
[("Model class", self.__class__.__name__)])
# Regularization type options
regularization_options = [
"regularization",
"regularization_type",
"linear_regularization",
"ranking_regularization",
"unobserved_rating_value",
"num_sampled_negative_examples",
"ials_confidence_scaling_type",
"ials_confidence_scaling_factor"]
add_ordered_options("Regularization Settings", regularization_options)
# Optimization stuff
optimization_settings = [
"init_random_sigma",
"sgd_convergence_interval",
"sgd_convergence_threshold",
"sgd_max_trial_iterations",
"sgd_sampling_block_size",
"sgd_step_adjustment_interval",
"sgd_step_size",
"sgd_trial_sample_minimum_size",
"sgd_trial_sample_proportion",
"step_size_decrease_rate",
"additional_iterations_if_unhealthy",
"adagrad_momentum_weighting",
"num_tempering_iterations",
"tempering_regularization_start_value",
"track_exact_loss"]
add_ordered_options("Optimization Settings", optimization_settings)
# clean up
option_fields = []
for k, v in _six.iteritems(options):
option_fields.append((k, _precomputed_field(v)))
if option_fields:
section_titles.append("Other Settings")
sections.append(option_fields)
return (sections, section_titles) | [
"def",
"_get_summary_struct",
"(",
"self",
")",
":",
"stats",
"=",
"self",
".",
"_list_fields",
"(",
")",
"options",
"=",
"self",
".",
"_get_current_options",
"(",
")",
"section_titles",
"=",
"[",
"]",
"sections",
"=",
"[",
"]",
"observation_columns",
"=",
"set",
"(",
"self",
".",
"observation_data_column_names",
")",
"not_needed",
"=",
"set",
"(",
"[",
"self",
".",
"user_id",
",",
"self",
".",
"item_id",
",",
"self",
".",
"target",
"]",
")",
"num_obs_fields",
"=",
"len",
"(",
"observation_columns",
".",
"difference",
"(",
"not_needed",
")",
")",
"user_features",
"=",
"self",
".",
"user_side_data_column_names",
"item_features",
"=",
"self",
".",
"item_side_data_column_names",
"section_titles",
".",
"append",
"(",
"\"Schema\"",
")",
"schema_fields",
"=",
"[",
"(",
"'User ID'",
",",
"'user_id'",
")",
",",
"(",
"'Item ID'",
",",
"'item_id'",
")",
",",
"(",
"'Target'",
",",
"'target'",
")",
",",
"(",
"'Additional observation features'",
",",
"_precomputed_field",
"(",
"num_obs_fields",
")",
")",
",",
"(",
"'User side features'",
",",
"_precomputed_field",
"(",
"user_features",
")",
")",
",",
"(",
"'Item side features'",
",",
"_precomputed_field",
"(",
"item_features",
")",
")",
"]",
"sections",
".",
"append",
"(",
"schema_fields",
")",
"data_fields",
"=",
"[",
"(",
"'Number of observations'",
",",
"'num_observations'",
")",
",",
"(",
"'Number of users'",
",",
"'num_users'",
")",
",",
"(",
"'Number of items'",
",",
"'num_items'",
")",
"]",
"section_titles",
".",
"append",
"(",
"\"Statistics\"",
")",
"sections",
".",
"append",
"(",
"data_fields",
")",
"training_fields",
"=",
"[",
"(",
"'Training time'",
",",
"'training_time'",
")",
"]",
"if",
"'data_load_elapsed_time'",
"in",
"stats",
":",
"training_fields",
".",
"append",
"(",
"(",
"'Data load time'",
",",
"'data_load_elapsed_time'",
")",
")",
"if",
"'validation_metrics_elapsed_time'",
"in",
"stats",
":",
"training_fields",
".",
"append",
"(",
"(",
"'Validation metrics time'",
",",
"'validation_metrics_elapsed_time'",
")",
")",
"section_titles",
".",
"append",
"(",
"\"Training summary\"",
")",
"sections",
".",
"append",
"(",
"training_fields",
")",
"# Remove any options that should not be shown under \"Settings\"",
"to_ignore",
"=",
"[",
"'random_seed'",
",",
"'user_id'",
",",
"'item_id'",
",",
"'target'",
"]",
"for",
"k",
"in",
"to_ignore",
":",
"if",
"k",
"in",
"options",
":",
"del",
"options",
"[",
"k",
"]",
"def",
"add_ordered_options",
"(",
"name",
",",
"ordered_options",
",",
"additional",
"=",
"[",
"]",
")",
":",
"option_fields",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"additional",
":",
"option_fields",
".",
"append",
"(",
"(",
"k",
",",
"_precomputed_field",
"(",
"v",
")",
")",
")",
"for",
"k",
"in",
"ordered_options",
":",
"if",
"k",
"in",
"options",
":",
"option_fields",
".",
"append",
"(",
"(",
"k",
",",
"_precomputed_field",
"(",
"options",
"[",
"k",
"]",
")",
")",
")",
"del",
"options",
"[",
"k",
"]",
"if",
"option_fields",
":",
"section_titles",
".",
"append",
"(",
"name",
")",
"sections",
".",
"append",
"(",
"option_fields",
")",
"# Put in a number of things in order, if applicable.",
"# Model parameters",
"model_parameter_options",
"=",
"[",
"\"only_top_k\"",
",",
"\"threshold\"",
",",
"\"num_factors\"",
",",
"\"binary_target\"",
",",
"\"side_data_factorization\"",
",",
"\"solver\"",
",",
"\"nmf\"",
",",
"\"max_iterations\"",
",",
"\"similarity_type\"",
",",
"\"training_method\"",
"]",
"add_ordered_options",
"(",
"\"Model Parameters\"",
",",
"model_parameter_options",
",",
"[",
"(",
"\"Model class\"",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
"]",
")",
"# Regularization type options",
"regularization_options",
"=",
"[",
"\"regularization\"",
",",
"\"regularization_type\"",
",",
"\"linear_regularization\"",
",",
"\"ranking_regularization\"",
",",
"\"unobserved_rating_value\"",
",",
"\"num_sampled_negative_examples\"",
",",
"\"ials_confidence_scaling_type\"",
",",
"\"ials_confidence_scaling_factor\"",
"]",
"add_ordered_options",
"(",
"\"Regularization Settings\"",
",",
"regularization_options",
")",
"# Optimization stuff",
"optimization_settings",
"=",
"[",
"\"init_random_sigma\"",
",",
"\"sgd_convergence_interval\"",
",",
"\"sgd_convergence_threshold\"",
",",
"\"sgd_max_trial_iterations\"",
",",
"\"sgd_sampling_block_size\"",
",",
"\"sgd_step_adjustment_interval\"",
",",
"\"sgd_step_size\"",
",",
"\"sgd_trial_sample_minimum_size\"",
",",
"\"sgd_trial_sample_proportion\"",
",",
"\"step_size_decrease_rate\"",
",",
"\"additional_iterations_if_unhealthy\"",
",",
"\"adagrad_momentum_weighting\"",
",",
"\"num_tempering_iterations\"",
",",
"\"tempering_regularization_start_value\"",
",",
"\"track_exact_loss\"",
"]",
"add_ordered_options",
"(",
"\"Optimization Settings\"",
",",
"optimization_settings",
")",
"# clean up",
"option_fields",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"_six",
".",
"iteritems",
"(",
"options",
")",
":",
"option_fields",
".",
"append",
"(",
"(",
"k",
",",
"_precomputed_field",
"(",
"v",
")",
")",
")",
"if",
"option_fields",
":",
"section_titles",
".",
"append",
"(",
"\"Other Settings\"",
")",
"sections",
".",
"append",
"(",
"option_fields",
")",
"return",
"(",
"sections",
",",
"section_titles",
")"
] | Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section names.
The order matches that of the 'sections' object. | [
"Returns",
"a",
"structured",
"description",
"of",
"the",
"model",
"including",
"(",
"where",
"relevant",
")",
"the",
"schema",
"of",
"the",
"training",
"data",
"description",
"of",
"the",
"training",
"data",
"training",
"statistics",
"and",
"model",
"hyperparameters",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L634-L782 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._set_current_options | def _set_current_options(self, options):
"""
Set current options for a model.
Parameters
----------
options : dict
A dictionary of the desired option settings. The key should be the name
of the option and each value is the desired value of the option.
"""
opts = self._get_current_options()
opts.update(options)
response = self.__proxy__.set_current_options(opts)
return response | python | def _set_current_options(self, options):
"""
Set current options for a model.
Parameters
----------
options : dict
A dictionary of the desired option settings. The key should be the name
of the option and each value is the desired value of the option.
"""
opts = self._get_current_options()
opts.update(options)
response = self.__proxy__.set_current_options(opts)
return response | [
"def",
"_set_current_options",
"(",
"self",
",",
"options",
")",
":",
"opts",
"=",
"self",
".",
"_get_current_options",
"(",
")",
"opts",
".",
"update",
"(",
"options",
")",
"response",
"=",
"self",
".",
"__proxy__",
".",
"set_current_options",
"(",
"opts",
")",
"return",
"response"
] | Set current options for a model.
Parameters
----------
options : dict
A dictionary of the desired option settings. The key should be the name
of the option and each value is the desired value of the option. | [
"Set",
"current",
"options",
"for",
"a",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L804-L818 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.__prepare_dataset_parameter | def __prepare_dataset_parameter(self, dataset):
"""
Processes the dataset parameter for type correctness.
Returns it as an SFrame.
"""
# Translate the dataset argument into the proper type
if not isinstance(dataset, _SFrame):
def raise_dataset_type_exception():
raise TypeError("The dataset parameter must be either an SFrame, "
"or a dictionary of (str : list) or (str : value).")
if type(dataset) is dict:
if not all(type(k) is str for k in _six.iterkeys(dataset)):
raise_dataset_type_exception()
if all(type(v) in (list, tuple, _array.array) for v in _six.itervalues(dataset)):
dataset = _SFrame(dataset)
else:
dataset = _SFrame({k : [v] for k, v in _six.iteritems(dataset)})
else:
raise_dataset_type_exception()
return dataset | python | def __prepare_dataset_parameter(self, dataset):
"""
Processes the dataset parameter for type correctness.
Returns it as an SFrame.
"""
# Translate the dataset argument into the proper type
if not isinstance(dataset, _SFrame):
def raise_dataset_type_exception():
raise TypeError("The dataset parameter must be either an SFrame, "
"or a dictionary of (str : list) or (str : value).")
if type(dataset) is dict:
if not all(type(k) is str for k in _six.iterkeys(dataset)):
raise_dataset_type_exception()
if all(type(v) in (list, tuple, _array.array) for v in _six.itervalues(dataset)):
dataset = _SFrame(dataset)
else:
dataset = _SFrame({k : [v] for k, v in _six.iteritems(dataset)})
else:
raise_dataset_type_exception()
return dataset | [
"def",
"__prepare_dataset_parameter",
"(",
"self",
",",
"dataset",
")",
":",
"# Translate the dataset argument into the proper type",
"if",
"not",
"isinstance",
"(",
"dataset",
",",
"_SFrame",
")",
":",
"def",
"raise_dataset_type_exception",
"(",
")",
":",
"raise",
"TypeError",
"(",
"\"The dataset parameter must be either an SFrame, \"",
"\"or a dictionary of (str : list) or (str : value).\"",
")",
"if",
"type",
"(",
"dataset",
")",
"is",
"dict",
":",
"if",
"not",
"all",
"(",
"type",
"(",
"k",
")",
"is",
"str",
"for",
"k",
"in",
"_six",
".",
"iterkeys",
"(",
"dataset",
")",
")",
":",
"raise_dataset_type_exception",
"(",
")",
"if",
"all",
"(",
"type",
"(",
"v",
")",
"in",
"(",
"list",
",",
"tuple",
",",
"_array",
".",
"array",
")",
"for",
"v",
"in",
"_six",
".",
"itervalues",
"(",
"dataset",
")",
")",
":",
"dataset",
"=",
"_SFrame",
"(",
"dataset",
")",
"else",
":",
"dataset",
"=",
"_SFrame",
"(",
"{",
"k",
":",
"[",
"v",
"]",
"for",
"k",
",",
"v",
"in",
"_six",
".",
"iteritems",
"(",
"dataset",
")",
"}",
")",
"else",
":",
"raise_dataset_type_exception",
"(",
")",
"return",
"dataset"
] | Processes the dataset parameter for type correctness.
Returns it as an SFrame. | [
"Processes",
"the",
"dataset",
"parameter",
"for",
"type",
"correctness",
".",
"Returns",
"it",
"as",
"an",
"SFrame",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L820-L843 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._get_data_schema | def _get_data_schema(self):
"""
Returns a dictionary of (column : type) for the data used in the
model.
"""
if not hasattr(self, "_data_schema"):
response = self.__proxy__.get_data_schema()
self._data_schema = {k : _turicreate._cython.cy_flexible_type.pytype_from_type_name(v)
for k, v in response["schema"].items()}
return self._data_schema | python | def _get_data_schema(self):
"""
Returns a dictionary of (column : type) for the data used in the
model.
"""
if not hasattr(self, "_data_schema"):
response = self.__proxy__.get_data_schema()
self._data_schema = {k : _turicreate._cython.cy_flexible_type.pytype_from_type_name(v)
for k, v in response["schema"].items()}
return self._data_schema | [
"def",
"_get_data_schema",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_data_schema\"",
")",
":",
"response",
"=",
"self",
".",
"__proxy__",
".",
"get_data_schema",
"(",
")",
"self",
".",
"_data_schema",
"=",
"{",
"k",
":",
"_turicreate",
".",
"_cython",
".",
"cy_flexible_type",
".",
"pytype_from_type_name",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"response",
"[",
"\"schema\"",
"]",
".",
"items",
"(",
")",
"}",
"return",
"self",
".",
"_data_schema"
] | Returns a dictionary of (column : type) for the data used in the
model. | [
"Returns",
"a",
"dictionary",
"of",
"(",
"column",
":",
"type",
")",
"for",
"the",
"data",
"used",
"in",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L845-L857 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.predict | def predict(self, dataset,
new_observation_data=None, new_user_data=None, new_item_data=None):
"""
Return a score prediction for the user ids and item ids in the provided
data set.
Parameters
----------
dataset : SFrame
Dataset in the same form used for training.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score accuracy. Must be in the same format as the
observation data passed to ``create``. How this data is
used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
Returns
-------
out : SArray
An SArray with predicted scores for each given observation
predicted by the model.
See Also
--------
recommend, evaluate
"""
if new_observation_data is None:
new_observation_data = _SFrame()
if new_user_data is None:
new_user_data = _SFrame()
if new_item_data is None:
new_item_data = _SFrame()
dataset = self.__prepare_dataset_parameter(dataset)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types))
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"])
check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"])
check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"])
response = self.__proxy__.predict(dataset, new_user_data, new_item_data)
return response['prediction'] | python | def predict(self, dataset,
new_observation_data=None, new_user_data=None, new_item_data=None):
"""
Return a score prediction for the user ids and item ids in the provided
data set.
Parameters
----------
dataset : SFrame
Dataset in the same form used for training.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score accuracy. Must be in the same format as the
observation data passed to ``create``. How this data is
used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
Returns
-------
out : SArray
An SArray with predicted scores for each given observation
predicted by the model.
See Also
--------
recommend, evaluate
"""
if new_observation_data is None:
new_observation_data = _SFrame()
if new_user_data is None:
new_user_data = _SFrame()
if new_item_data is None:
new_item_data = _SFrame()
dataset = self.__prepare_dataset_parameter(dataset)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types))
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"])
check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"])
check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"])
response = self.__proxy__.predict(dataset, new_user_data, new_item_data)
return response['prediction'] | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"new_observation_data",
"=",
"None",
",",
"new_user_data",
"=",
"None",
",",
"new_item_data",
"=",
"None",
")",
":",
"if",
"new_observation_data",
"is",
"None",
":",
"new_observation_data",
"=",
"_SFrame",
"(",
")",
"if",
"new_user_data",
"is",
"None",
":",
"new_user_data",
"=",
"_SFrame",
"(",
")",
"if",
"new_item_data",
"is",
"None",
":",
"new_item_data",
"=",
"_SFrame",
"(",
")",
"dataset",
"=",
"self",
".",
"__prepare_dataset_parameter",
"(",
"dataset",
")",
"def",
"check_type",
"(",
"arg",
",",
"arg_name",
",",
"required_type",
",",
"allowed_types",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"required_type",
")",
":",
"raise",
"TypeError",
"(",
"\"Parameter \"",
"+",
"arg_name",
"+",
"\" must be of type(s) \"",
"+",
"(",
"\", \"",
".",
"join",
"(",
"allowed_types",
")",
")",
"+",
"\"; Type '\"",
"+",
"str",
"(",
"type",
"(",
"arg",
")",
")",
"+",
"\"' not recognized.\"",
")",
"check_type",
"(",
"new_observation_data",
",",
"\"new_observation_data\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
"]",
")",
"check_type",
"(",
"new_user_data",
",",
"\"new_user_data\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
"]",
")",
"check_type",
"(",
"new_item_data",
",",
"\"new_item_data\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
"]",
")",
"response",
"=",
"self",
".",
"__proxy__",
".",
"predict",
"(",
"dataset",
",",
"new_user_data",
",",
"new_item_data",
")",
"return",
"response",
"[",
"'prediction'",
"]"
] | Return a score prediction for the user ids and item ids in the provided
data set.
Parameters
----------
dataset : SFrame
Dataset in the same form used for training.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score accuracy. Must be in the same format as the
observation data passed to ``create``. How this data is
used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
Returns
-------
out : SArray
An SArray with predicted scores for each given observation
predicted by the model.
See Also
--------
recommend, evaluate | [
"Return",
"a",
"score",
"prediction",
"for",
"the",
"user",
"ids",
"and",
"item",
"ids",
"in",
"the",
"provided",
"data",
"set",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L859-L925 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.get_similar_items | def get_similar_items(self, items=None, k=10, verbose=False):
"""
Get the k most similar items for each item in items.
Each type of recommender has its own model for the similarity
between items. For example, the item_similarity_recommender will
return the most similar items according to the user-chosen
similarity; the factorization_recommender will return the
nearest items based on the cosine similarity between latent item
factors.
Parameters
----------
items : SArray or list; optional
An :class:`~turicreate.SArray` or list of item ids for which to get
similar items. If 'None', then return the `k` most similar items for
all items in the training set.
k : int, optional
The number of similar items for each item.
verbose : bool, optional
Progress printing is shown.
Returns
-------
out : SFrame
A SFrame with the top ranked similar items for each item. The
columns `item`, 'similar', 'score' and 'rank', where
`item` matches the item column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that item. The value of the score depends on the method
used for computing item similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items()
"""
if items is None:
get_all_items = True
items = _SArray()
else:
get_all_items = False
if isinstance(items, list):
items = _SArray(items)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(items, "items", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
return self.__proxy__.get_similar_items(items, k, verbose, get_all_items) | python | def get_similar_items(self, items=None, k=10, verbose=False):
"""
Get the k most similar items for each item in items.
Each type of recommender has its own model for the similarity
between items. For example, the item_similarity_recommender will
return the most similar items according to the user-chosen
similarity; the factorization_recommender will return the
nearest items based on the cosine similarity between latent item
factors.
Parameters
----------
items : SArray or list; optional
An :class:`~turicreate.SArray` or list of item ids for which to get
similar items. If 'None', then return the `k` most similar items for
all items in the training set.
k : int, optional
The number of similar items for each item.
verbose : bool, optional
Progress printing is shown.
Returns
-------
out : SFrame
A SFrame with the top ranked similar items for each item. The
columns `item`, 'similar', 'score' and 'rank', where
`item` matches the item column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that item. The value of the score depends on the method
used for computing item similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items()
"""
if items is None:
get_all_items = True
items = _SArray()
else:
get_all_items = False
if isinstance(items, list):
items = _SArray(items)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(items, "items", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
return self.__proxy__.get_similar_items(items, k, verbose, get_all_items) | [
"def",
"get_similar_items",
"(",
"self",
",",
"items",
"=",
"None",
",",
"k",
"=",
"10",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"items",
"is",
"None",
":",
"get_all_items",
"=",
"True",
"items",
"=",
"_SArray",
"(",
")",
"else",
":",
"get_all_items",
"=",
"False",
"if",
"isinstance",
"(",
"items",
",",
"list",
")",
":",
"items",
"=",
"_SArray",
"(",
"items",
")",
"def",
"check_type",
"(",
"arg",
",",
"arg_name",
",",
"required_type",
",",
"allowed_types",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"required_type",
")",
":",
"raise",
"TypeError",
"(",
"\"Parameter \"",
"+",
"arg_name",
"+",
"\" must be of type(s) \"",
"+",
"(",
"\", \"",
".",
"join",
"(",
"allowed_types",
")",
")",
"+",
"\"; Type '\"",
"+",
"str",
"(",
"type",
"(",
"arg",
")",
")",
"+",
"\"' not recognized.\"",
")",
"check_type",
"(",
"items",
",",
"\"items\"",
",",
"_SArray",
",",
"[",
"\"SArray\"",
",",
"\"list\"",
"]",
")",
"check_type",
"(",
"k",
",",
"\"k\"",
",",
"int",
",",
"[",
"\"int\"",
"]",
")",
"return",
"self",
".",
"__proxy__",
".",
"get_similar_items",
"(",
"items",
",",
"k",
",",
"verbose",
",",
"get_all_items",
")"
] | Get the k most similar items for each item in items.
Each type of recommender has its own model for the similarity
between items. For example, the item_similarity_recommender will
return the most similar items according to the user-chosen
similarity; the factorization_recommender will return the
nearest items based on the cosine similarity between latent item
factors.
Parameters
----------
items : SArray or list; optional
An :class:`~turicreate.SArray` or list of item ids for which to get
similar items. If 'None', then return the `k` most similar items for
all items in the training set.
k : int, optional
The number of similar items for each item.
verbose : bool, optional
Progress printing is shown.
Returns
-------
out : SFrame
A SFrame with the top ranked similar items for each item. The
columns `item`, 'similar', 'score' and 'rank', where
`item` matches the item column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that item. The value of the score depends on the method
used for computing item similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items() | [
"Get",
"the",
"k",
"most",
"similar",
"items",
"for",
"each",
"item",
"in",
"items",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L927-L988 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.get_similar_users | def get_similar_users(self, users=None, k=10):
"""Get the k most similar users for each entry in `users`.
Each type of recommender has its own model for the similarity
between users. For example, the factorization_recommender will
return the nearest users based on the cosine similarity
between latent user factors. (This method is not currently
available for item_similarity models.)
Parameters
----------
users : SArray or list; optional
An :class:`~turicreate.SArray` or list of user ids for which to get
similar users. If 'None', then return the `k` most similar users for
all users in the training set.
k : int, optional
The number of neighbors to return for each user.
Returns
-------
out : SFrame
A SFrame with the top ranked similar users for each user. The
columns `user`, 'similar', 'score' and 'rank', where
`user` matches the user column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that user. The value of the score depends on the method
used for computing user similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.factorization_recommender.create(sf)
>>> nn = m.get_similar_users()
"""
if users is None:
get_all_users = True
users = _SArray()
else:
get_all_users = False
if isinstance(users, list):
users = _SArray(users)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(users, "users", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
opt = {'model': self.__proxy__,
'users': users,
'get_all_users' : get_all_users,
'k': k}
response = self.__proxy__.get_similar_users(users, k, get_all_users)
return response | python | def get_similar_users(self, users=None, k=10):
"""Get the k most similar users for each entry in `users`.
Each type of recommender has its own model for the similarity
between users. For example, the factorization_recommender will
return the nearest users based on the cosine similarity
between latent user factors. (This method is not currently
available for item_similarity models.)
Parameters
----------
users : SArray or list; optional
An :class:`~turicreate.SArray` or list of user ids for which to get
similar users. If 'None', then return the `k` most similar users for
all users in the training set.
k : int, optional
The number of neighbors to return for each user.
Returns
-------
out : SFrame
A SFrame with the top ranked similar users for each user. The
columns `user`, 'similar', 'score' and 'rank', where
`user` matches the user column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that user. The value of the score depends on the method
used for computing user similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.factorization_recommender.create(sf)
>>> nn = m.get_similar_users()
"""
if users is None:
get_all_users = True
users = _SArray()
else:
get_all_users = False
if isinstance(users, list):
users = _SArray(users)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(users, "users", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
opt = {'model': self.__proxy__,
'users': users,
'get_all_users' : get_all_users,
'k': k}
response = self.__proxy__.get_similar_users(users, k, get_all_users)
return response | [
"def",
"get_similar_users",
"(",
"self",
",",
"users",
"=",
"None",
",",
"k",
"=",
"10",
")",
":",
"if",
"users",
"is",
"None",
":",
"get_all_users",
"=",
"True",
"users",
"=",
"_SArray",
"(",
")",
"else",
":",
"get_all_users",
"=",
"False",
"if",
"isinstance",
"(",
"users",
",",
"list",
")",
":",
"users",
"=",
"_SArray",
"(",
"users",
")",
"def",
"check_type",
"(",
"arg",
",",
"arg_name",
",",
"required_type",
",",
"allowed_types",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"required_type",
")",
":",
"raise",
"TypeError",
"(",
"\"Parameter \"",
"+",
"arg_name",
"+",
"\" must be of type(s) \"",
"+",
"(",
"\", \"",
".",
"join",
"(",
"allowed_types",
")",
")",
"+",
"\"; Type '\"",
"+",
"str",
"(",
"type",
"(",
"arg",
")",
")",
"+",
"\"' not recognized.\"",
")",
"check_type",
"(",
"users",
",",
"\"users\"",
",",
"_SArray",
",",
"[",
"\"SArray\"",
",",
"\"list\"",
"]",
")",
"check_type",
"(",
"k",
",",
"\"k\"",
",",
"int",
",",
"[",
"\"int\"",
"]",
")",
"opt",
"=",
"{",
"'model'",
":",
"self",
".",
"__proxy__",
",",
"'users'",
":",
"users",
",",
"'get_all_users'",
":",
"get_all_users",
",",
"'k'",
":",
"k",
"}",
"response",
"=",
"self",
".",
"__proxy__",
".",
"get_similar_users",
"(",
"users",
",",
"k",
",",
"get_all_users",
")",
"return",
"response"
] | Get the k most similar users for each entry in `users`.
Each type of recommender has its own model for the similarity
between users. For example, the factorization_recommender will
return the nearest users based on the cosine similarity
between latent user factors. (This method is not currently
available for item_similarity models.)
Parameters
----------
users : SArray or list; optional
An :class:`~turicreate.SArray` or list of user ids for which to get
similar users. If 'None', then return the `k` most similar users for
all users in the training set.
k : int, optional
The number of neighbors to return for each user.
Returns
-------
out : SFrame
A SFrame with the top ranked similar users for each user. The
columns `user`, 'similar', 'score' and 'rank', where
`user` matches the user column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that user. The value of the score depends on the method
used for computing user similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.factorization_recommender.create(sf)
>>> nn = m.get_similar_users() | [
"Get",
"the",
"k",
"most",
"similar",
"users",
"for",
"each",
"entry",
"in",
"users",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L990-L1053 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.recommend | def recommend(self, users=None, k=10, exclude=None, items=None,
new_observation_data=None, new_user_data=None, new_item_data=None,
exclude_known=True, diversity=0, random_seed=None,
verbose=True):
"""
Recommend the ``k`` highest scored items for each user.
Parameters
----------
users : SArray, SFrame, or list, optional
Users or observation queries for which to make recommendations.
For list, SArray, and single-column inputs, this is simply a set
of user IDs. By default, recommendations are returned for all
users present when the model was trained. However, if the
recommender model was created with additional features in the
``observation_data`` SFrame, then a corresponding SFrame of
observation queries -- observation data without item or target
columns -- can be passed to this method. For example, a model
trained with user ID, item ID, time, and rating columns may be
queried using an SFrame with user ID and time columns. In this
case, the user ID column must be present, and all column names
should match those in the ``observation_data`` SFrame passed to
``create.``
k : int, optional
The number of recommendations to generate for each user.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be made. If
``items`` is an SArray, list, or SFrame with a single column,
only items from the given set will be recommended. This can be
used, for example, to restrict the recommendations to items
within a particular category or genre. If ``items`` is an
SFrame with user ID and item ID columns, then the item
restriction is specialized to each user. For example, if
``items`` contains 3 rows with user U1 -- (U1, I1), (U1, I2),
and (U1, I3) -- then the recommendations for user U1 are
chosen from items I1, I2, and I3. By default, recommendations
are made from all items present when the model was trained.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score and recommendation accuracy. Must be in the same
format as the observation data passed to ``create``. How
this data is used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of user / item pairs. The
column names must be equal to the user and item columns of
the main data, and it provides the model with user/item
pairs to exclude from the recommendations. These
user-item-pairs are always excluded from the predictions,
even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``user_id``, ``item_id``, *score*,
and *rank*, where ``user_id`` and ``item_id``
match the user and item column names specified at training
time. The rank column is between 1 and ``k`` and gives
the relative score of that item. The value of score
depends on the method used for recommendations.
See Also
--------
recommend_from_interactions
predict
evaluate
"""
from turicreate._cython.cy_server import QuietProgress
assert type(k) == int
column_types = self._get_data_schema()
user_id = self.user_id
item_id = self.item_id
user_type = column_types[user_id]
item_type = column_types[item_id]
__null_sframe = _SFrame()
if users is None:
users = __null_sframe
if exclude is None:
exclude = __null_sframe
if items is None:
items = __null_sframe
if new_observation_data is None:
new_observation_data = __null_sframe
if new_user_data is None:
new_user_data = __null_sframe
if new_item_data is None:
new_item_data = __null_sframe
if isinstance(users, list) or (_HAS_NUMPY and isinstance(users, _numpy.ndarray)):
users = _SArray(users)
# allow to take a list of dictionaries of the form [{'user_id':1,'time':10}] etc.
if users.dtype == dict:
users = users.unpack(column_name_prefix='')
if isinstance(users, _SArray):
users = _SFrame({user_id: users})
if isinstance(items, list) or (_HAS_NUMPY and isinstance(items, _numpy.ndarray)):
items = _SArray(items, dtype = item_type)
if isinstance(items, _SArray):
items = _SFrame({item_id: items})
# Check type of incoming data.
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types))
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(users, "users", _SFrame, ["SArray", "list", "SFrame", "numpy.ndarray"])
check_type(exclude, "exclude", _SFrame, ["SFrame"])
check_type(items, "items", _SFrame, ["SFrame", "SArray", "list", "numpy.ndarray"])
check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"])
check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"])
check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"])
# See if we are in the situation where there are no users
# originally. In this case, the default type of the user
# column is string, so we have to be mindful of that when
# making recommendations and possibly cast it to string if
# needed.
# The only case where we need to deal with the user id is when
# it's used to link with rated items in new_observation_data,
# thus handle that case explicitly and error out in others.
cast_user_to_string_type = False
if self.num_users == 0:
cast_user_to_string_type = True
if users.num_rows() != 0:
# In this case, the user column has actually been set to a
# string type, so we need to make sure that we cast
# everything back and forth to that to preserve type.
if new_observation_data.num_rows() == 0:
raise ValueError("When users are not specified with the model, "
"new_observation_data must be set in order to make recommendations.")
new_observation_data[user_id] = new_observation_data[user_id].astype(user_type)
else:
print("WARNING: No users specified to model at creation time, so "
"calling recommend() for all users returns empty SFrame.")
# Cast to the appropriate type if necessary.
if users.num_rows() != 0:
try:
user_column = users[user_id]
except RuntimeError:
raise _ToolkitError("User column '%s' not present in input user data." % user_id)
if cast_user_to_string_type:
assert new_observation_data.num_rows() != 0
original_user_type = user_column.dtype
users[user_id] = user_column.astype(str)
user_type=str
elif user_column.dtype != user_type:
users[user_id] = user_column.astype(user_type)
# Cast user specified in exclude to the appropriate type if necessary.
if user_id in exclude.column_names() and exclude[user_id].dtype!=user_type:
exclude[user_id] = exclude[user_id].astype(user_type)
try:
diversity = float(diversity)
except Exception:
raise TypeError("Parameter diversity must be a floating point value equal to or larger than 0.")
if diversity < 0:
raise TypeError("Parameter diversity must be a floating point value equal to or larger than 0.")
if random_seed is None:
random_seed = hash("%.20f" % _time.time())
else:
try:
random_seed = int(random_seed)
except TypeError:
raise TypeError("random_seed must be integer.")
opt = {'model': self.__proxy__,
'query': users,
'top_k': k,
'exclude': exclude,
'restrictions': items,
'new_data': new_observation_data,
'new_user_data': new_user_data,
'new_item_data': new_item_data,
'exclude_known': exclude_known,
'diversity' : diversity,
'random_seed' : random_seed
}
with QuietProgress(verbose):
recs = self.__proxy__.recommend(users, exclude, items, new_observation_data, new_user_data,
new_item_data, exclude_known, k, diversity, random_seed)
if cast_user_to_string_type:
recs[user_id] = recs[user_id].astype(original_user_type)
return recs | python | def recommend(self, users=None, k=10, exclude=None, items=None,
new_observation_data=None, new_user_data=None, new_item_data=None,
exclude_known=True, diversity=0, random_seed=None,
verbose=True):
"""
Recommend the ``k`` highest scored items for each user.
Parameters
----------
users : SArray, SFrame, or list, optional
Users or observation queries for which to make recommendations.
For list, SArray, and single-column inputs, this is simply a set
of user IDs. By default, recommendations are returned for all
users present when the model was trained. However, if the
recommender model was created with additional features in the
``observation_data`` SFrame, then a corresponding SFrame of
observation queries -- observation data without item or target
columns -- can be passed to this method. For example, a model
trained with user ID, item ID, time, and rating columns may be
queried using an SFrame with user ID and time columns. In this
case, the user ID column must be present, and all column names
should match those in the ``observation_data`` SFrame passed to
``create.``
k : int, optional
The number of recommendations to generate for each user.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be made. If
``items`` is an SArray, list, or SFrame with a single column,
only items from the given set will be recommended. This can be
used, for example, to restrict the recommendations to items
within a particular category or genre. If ``items`` is an
SFrame with user ID and item ID columns, then the item
restriction is specialized to each user. For example, if
``items`` contains 3 rows with user U1 -- (U1, I1), (U1, I2),
and (U1, I3) -- then the recommendations for user U1 are
chosen from items I1, I2, and I3. By default, recommendations
are made from all items present when the model was trained.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score and recommendation accuracy. Must be in the same
format as the observation data passed to ``create``. How
this data is used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of user / item pairs. The
column names must be equal to the user and item columns of
the main data, and it provides the model with user/item
pairs to exclude from the recommendations. These
user-item-pairs are always excluded from the predictions,
even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``user_id``, ``item_id``, *score*,
and *rank*, where ``user_id`` and ``item_id``
match the user and item column names specified at training
time. The rank column is between 1 and ``k`` and gives
the relative score of that item. The value of score
depends on the method used for recommendations.
See Also
--------
recommend_from_interactions
predict
evaluate
"""
from turicreate._cython.cy_server import QuietProgress
assert type(k) == int
column_types = self._get_data_schema()
user_id = self.user_id
item_id = self.item_id
user_type = column_types[user_id]
item_type = column_types[item_id]
__null_sframe = _SFrame()
if users is None:
users = __null_sframe
if exclude is None:
exclude = __null_sframe
if items is None:
items = __null_sframe
if new_observation_data is None:
new_observation_data = __null_sframe
if new_user_data is None:
new_user_data = __null_sframe
if new_item_data is None:
new_item_data = __null_sframe
if isinstance(users, list) or (_HAS_NUMPY and isinstance(users, _numpy.ndarray)):
users = _SArray(users)
# allow to take a list of dictionaries of the form [{'user_id':1,'time':10}] etc.
if users.dtype == dict:
users = users.unpack(column_name_prefix='')
if isinstance(users, _SArray):
users = _SFrame({user_id: users})
if isinstance(items, list) or (_HAS_NUMPY and isinstance(items, _numpy.ndarray)):
items = _SArray(items, dtype = item_type)
if isinstance(items, _SArray):
items = _SFrame({item_id: items})
# Check type of incoming data.
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types))
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(users, "users", _SFrame, ["SArray", "list", "SFrame", "numpy.ndarray"])
check_type(exclude, "exclude", _SFrame, ["SFrame"])
check_type(items, "items", _SFrame, ["SFrame", "SArray", "list", "numpy.ndarray"])
check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"])
check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"])
check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"])
# See if we are in the situation where there are no users
# originally. In this case, the default type of the user
# column is string, so we have to be mindful of that when
# making recommendations and possibly cast it to string if
# needed.
# The only case where we need to deal with the user id is when
# it's used to link with rated items in new_observation_data,
# thus handle that case explicitly and error out in others.
cast_user_to_string_type = False
if self.num_users == 0:
cast_user_to_string_type = True
if users.num_rows() != 0:
# In this case, the user column has actually been set to a
# string type, so we need to make sure that we cast
# everything back and forth to that to preserve type.
if new_observation_data.num_rows() == 0:
raise ValueError("When users are not specified with the model, "
"new_observation_data must be set in order to make recommendations.")
new_observation_data[user_id] = new_observation_data[user_id].astype(user_type)
else:
print("WARNING: No users specified to model at creation time, so "
"calling recommend() for all users returns empty SFrame.")
# Cast to the appropriate type if necessary.
if users.num_rows() != 0:
try:
user_column = users[user_id]
except RuntimeError:
raise _ToolkitError("User column '%s' not present in input user data." % user_id)
if cast_user_to_string_type:
assert new_observation_data.num_rows() != 0
original_user_type = user_column.dtype
users[user_id] = user_column.astype(str)
user_type=str
elif user_column.dtype != user_type:
users[user_id] = user_column.astype(user_type)
# Cast user specified in exclude to the appropriate type if necessary.
if user_id in exclude.column_names() and exclude[user_id].dtype!=user_type:
exclude[user_id] = exclude[user_id].astype(user_type)
try:
diversity = float(diversity)
except Exception:
raise TypeError("Parameter diversity must be a floating point value equal to or larger than 0.")
if diversity < 0:
raise TypeError("Parameter diversity must be a floating point value equal to or larger than 0.")
if random_seed is None:
random_seed = hash("%.20f" % _time.time())
else:
try:
random_seed = int(random_seed)
except TypeError:
raise TypeError("random_seed must be integer.")
opt = {'model': self.__proxy__,
'query': users,
'top_k': k,
'exclude': exclude,
'restrictions': items,
'new_data': new_observation_data,
'new_user_data': new_user_data,
'new_item_data': new_item_data,
'exclude_known': exclude_known,
'diversity' : diversity,
'random_seed' : random_seed
}
with QuietProgress(verbose):
recs = self.__proxy__.recommend(users, exclude, items, new_observation_data, new_user_data,
new_item_data, exclude_known, k, diversity, random_seed)
if cast_user_to_string_type:
recs[user_id] = recs[user_id].astype(original_user_type)
return recs | [
"def",
"recommend",
"(",
"self",
",",
"users",
"=",
"None",
",",
"k",
"=",
"10",
",",
"exclude",
"=",
"None",
",",
"items",
"=",
"None",
",",
"new_observation_data",
"=",
"None",
",",
"new_user_data",
"=",
"None",
",",
"new_item_data",
"=",
"None",
",",
"exclude_known",
"=",
"True",
",",
"diversity",
"=",
"0",
",",
"random_seed",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"from",
"turicreate",
".",
"_cython",
".",
"cy_server",
"import",
"QuietProgress",
"assert",
"type",
"(",
"k",
")",
"==",
"int",
"column_types",
"=",
"self",
".",
"_get_data_schema",
"(",
")",
"user_id",
"=",
"self",
".",
"user_id",
"item_id",
"=",
"self",
".",
"item_id",
"user_type",
"=",
"column_types",
"[",
"user_id",
"]",
"item_type",
"=",
"column_types",
"[",
"item_id",
"]",
"__null_sframe",
"=",
"_SFrame",
"(",
")",
"if",
"users",
"is",
"None",
":",
"users",
"=",
"__null_sframe",
"if",
"exclude",
"is",
"None",
":",
"exclude",
"=",
"__null_sframe",
"if",
"items",
"is",
"None",
":",
"items",
"=",
"__null_sframe",
"if",
"new_observation_data",
"is",
"None",
":",
"new_observation_data",
"=",
"__null_sframe",
"if",
"new_user_data",
"is",
"None",
":",
"new_user_data",
"=",
"__null_sframe",
"if",
"new_item_data",
"is",
"None",
":",
"new_item_data",
"=",
"__null_sframe",
"if",
"isinstance",
"(",
"users",
",",
"list",
")",
"or",
"(",
"_HAS_NUMPY",
"and",
"isinstance",
"(",
"users",
",",
"_numpy",
".",
"ndarray",
")",
")",
":",
"users",
"=",
"_SArray",
"(",
"users",
")",
"# allow to take a list of dictionaries of the form [{'user_id':1,'time':10}] etc.",
"if",
"users",
".",
"dtype",
"==",
"dict",
":",
"users",
"=",
"users",
".",
"unpack",
"(",
"column_name_prefix",
"=",
"''",
")",
"if",
"isinstance",
"(",
"users",
",",
"_SArray",
")",
":",
"users",
"=",
"_SFrame",
"(",
"{",
"user_id",
":",
"users",
"}",
")",
"if",
"isinstance",
"(",
"items",
",",
"list",
")",
"or",
"(",
"_HAS_NUMPY",
"and",
"isinstance",
"(",
"items",
",",
"_numpy",
".",
"ndarray",
")",
")",
":",
"items",
"=",
"_SArray",
"(",
"items",
",",
"dtype",
"=",
"item_type",
")",
"if",
"isinstance",
"(",
"items",
",",
"_SArray",
")",
":",
"items",
"=",
"_SFrame",
"(",
"{",
"item_id",
":",
"items",
"}",
")",
"# Check type of incoming data.",
"def",
"check_type",
"(",
"arg",
",",
"arg_name",
",",
"required_type",
",",
"allowed_types",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"required_type",
")",
":",
"raise",
"TypeError",
"(",
"\"Parameter \"",
"+",
"arg_name",
"+",
"\" must be of type(s) \"",
"+",
"(",
"\", \"",
".",
"join",
"(",
"allowed_types",
")",
")",
"+",
"\"; Type '\"",
"+",
"str",
"(",
"type",
"(",
"arg",
")",
")",
"+",
"\"' not recognized.\"",
")",
"check_type",
"(",
"users",
",",
"\"users\"",
",",
"_SFrame",
",",
"[",
"\"SArray\"",
",",
"\"list\"",
",",
"\"SFrame\"",
",",
"\"numpy.ndarray\"",
"]",
")",
"check_type",
"(",
"exclude",
",",
"\"exclude\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
"]",
")",
"check_type",
"(",
"items",
",",
"\"items\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
",",
"\"SArray\"",
",",
"\"list\"",
",",
"\"numpy.ndarray\"",
"]",
")",
"check_type",
"(",
"new_observation_data",
",",
"\"new_observation_data\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
"]",
")",
"check_type",
"(",
"new_user_data",
",",
"\"new_user_data\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
"]",
")",
"check_type",
"(",
"new_item_data",
",",
"\"new_item_data\"",
",",
"_SFrame",
",",
"[",
"\"SFrame\"",
"]",
")",
"# See if we are in the situation where there are no users",
"# originally. In this case, the default type of the user",
"# column is string, so we have to be mindful of that when",
"# making recommendations and possibly cast it to string if",
"# needed.",
"# The only case where we need to deal with the user id is when",
"# it's used to link with rated items in new_observation_data,",
"# thus handle that case explicitly and error out in others.",
"cast_user_to_string_type",
"=",
"False",
"if",
"self",
".",
"num_users",
"==",
"0",
":",
"cast_user_to_string_type",
"=",
"True",
"if",
"users",
".",
"num_rows",
"(",
")",
"!=",
"0",
":",
"# In this case, the user column has actually been set to a",
"# string type, so we need to make sure that we cast",
"# everything back and forth to that to preserve type.",
"if",
"new_observation_data",
".",
"num_rows",
"(",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"When users are not specified with the model, \"",
"\"new_observation_data must be set in order to make recommendations.\"",
")",
"new_observation_data",
"[",
"user_id",
"]",
"=",
"new_observation_data",
"[",
"user_id",
"]",
".",
"astype",
"(",
"user_type",
")",
"else",
":",
"print",
"(",
"\"WARNING: No users specified to model at creation time, so \"",
"\"calling recommend() for all users returns empty SFrame.\"",
")",
"# Cast to the appropriate type if necessary.",
"if",
"users",
".",
"num_rows",
"(",
")",
"!=",
"0",
":",
"try",
":",
"user_column",
"=",
"users",
"[",
"user_id",
"]",
"except",
"RuntimeError",
":",
"raise",
"_ToolkitError",
"(",
"\"User column '%s' not present in input user data.\"",
"%",
"user_id",
")",
"if",
"cast_user_to_string_type",
":",
"assert",
"new_observation_data",
".",
"num_rows",
"(",
")",
"!=",
"0",
"original_user_type",
"=",
"user_column",
".",
"dtype",
"users",
"[",
"user_id",
"]",
"=",
"user_column",
".",
"astype",
"(",
"str",
")",
"user_type",
"=",
"str",
"elif",
"user_column",
".",
"dtype",
"!=",
"user_type",
":",
"users",
"[",
"user_id",
"]",
"=",
"user_column",
".",
"astype",
"(",
"user_type",
")",
"# Cast user specified in exclude to the appropriate type if necessary.",
"if",
"user_id",
"in",
"exclude",
".",
"column_names",
"(",
")",
"and",
"exclude",
"[",
"user_id",
"]",
".",
"dtype",
"!=",
"user_type",
":",
"exclude",
"[",
"user_id",
"]",
"=",
"exclude",
"[",
"user_id",
"]",
".",
"astype",
"(",
"user_type",
")",
"try",
":",
"diversity",
"=",
"float",
"(",
"diversity",
")",
"except",
"Exception",
":",
"raise",
"TypeError",
"(",
"\"Parameter diversity must be a floating point value equal to or larger than 0.\"",
")",
"if",
"diversity",
"<",
"0",
":",
"raise",
"TypeError",
"(",
"\"Parameter diversity must be a floating point value equal to or larger than 0.\"",
")",
"if",
"random_seed",
"is",
"None",
":",
"random_seed",
"=",
"hash",
"(",
"\"%.20f\"",
"%",
"_time",
".",
"time",
"(",
")",
")",
"else",
":",
"try",
":",
"random_seed",
"=",
"int",
"(",
"random_seed",
")",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"\"random_seed must be integer.\"",
")",
"opt",
"=",
"{",
"'model'",
":",
"self",
".",
"__proxy__",
",",
"'query'",
":",
"users",
",",
"'top_k'",
":",
"k",
",",
"'exclude'",
":",
"exclude",
",",
"'restrictions'",
":",
"items",
",",
"'new_data'",
":",
"new_observation_data",
",",
"'new_user_data'",
":",
"new_user_data",
",",
"'new_item_data'",
":",
"new_item_data",
",",
"'exclude_known'",
":",
"exclude_known",
",",
"'diversity'",
":",
"diversity",
",",
"'random_seed'",
":",
"random_seed",
"}",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"recs",
"=",
"self",
".",
"__proxy__",
".",
"recommend",
"(",
"users",
",",
"exclude",
",",
"items",
",",
"new_observation_data",
",",
"new_user_data",
",",
"new_item_data",
",",
"exclude_known",
",",
"k",
",",
"diversity",
",",
"random_seed",
")",
"if",
"cast_user_to_string_type",
":",
"recs",
"[",
"user_id",
"]",
"=",
"recs",
"[",
"user_id",
"]",
".",
"astype",
"(",
"original_user_type",
")",
"return",
"recs"
] | Recommend the ``k`` highest scored items for each user.
Parameters
----------
users : SArray, SFrame, or list, optional
Users or observation queries for which to make recommendations.
For list, SArray, and single-column inputs, this is simply a set
of user IDs. By default, recommendations are returned for all
users present when the model was trained. However, if the
recommender model was created with additional features in the
``observation_data`` SFrame, then a corresponding SFrame of
observation queries -- observation data without item or target
columns -- can be passed to this method. For example, a model
trained with user ID, item ID, time, and rating columns may be
queried using an SFrame with user ID and time columns. In this
case, the user ID column must be present, and all column names
should match those in the ``observation_data`` SFrame passed to
``create.``
k : int, optional
The number of recommendations to generate for each user.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be made. If
``items`` is an SArray, list, or SFrame with a single column,
only items from the given set will be recommended. This can be
used, for example, to restrict the recommendations to items
within a particular category or genre. If ``items`` is an
SFrame with user ID and item ID columns, then the item
restriction is specialized to each user. For example, if
``items`` contains 3 rows with user U1 -- (U1, I1), (U1, I2),
and (U1, I3) -- then the recommendations for user U1 are
chosen from items I1, I2, and I3. By default, recommendations
are made from all items present when the model was trained.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score and recommendation accuracy. Must be in the same
format as the observation data passed to ``create``. How
this data is used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of user / item pairs. The
column names must be equal to the user and item columns of
the main data, and it provides the model with user/item
pairs to exclude from the recommendations. These
user-item-pairs are always excluded from the predictions,
even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``user_id``, ``item_id``, *score*,
and *rank*, where ``user_id`` and ``item_id``
match the user and item column names specified at training
time. The rank column is between 1 and ``k`` and gives
the relative score of that item. The value of score
depends on the method used for recommendations.
See Also
--------
recommend_from_interactions
predict
evaluate | [
"Recommend",
"the",
"k",
"highest",
"scored",
"items",
"for",
"each",
"user",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1056-L1308 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.recommend_from_interactions | def recommend_from_interactions(
self, observed_items, k=10, exclude=None, items=None,
new_user_data=None, new_item_data=None,
exclude_known=True, diversity=0, random_seed=None,
verbose=True):
"""
Recommend the ``k`` highest scored items based on the
interactions given in `observed_items.`
Parameters
----------
observed_items : SArray, SFrame, or list
A list/SArray of items to use to make recommendations, or
an SFrame of items and optionally ratings and/or other
interaction data. The model will then recommend the most
similar items to those given. If ``observed_items`` has a user
column, then it must be only one user, and the additional
interaction data stored in the model is also used to make
recommendations.
k : int, optional
The number of recommendations to generate.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be
made. ``items`` must be an SArray, list, or SFrame with a
single column containing items, and all recommendations
will be made from this pool of items. This can be used,
for example, to restrict the recommendations to items
within a particular category or genre. By default,
recommendations are made from all items present when the
model was trained.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of items or user / item
pairs. The column names must be equal to the user and
item columns of the main data, and it provides the model
with user/item pairs to exclude from the recommendations.
These user-item-pairs are always excluded from the
predictions, even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, then it will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``item_id``, *score*, and *rank*, where
``user_id`` and ``item_id`` match the user and item column
names specified at training time. The rank column is
between 1 and ``k`` and gives the relative score of that
item. The value of score depends on the method used for
recommendations.
observed_items: list, SArray, or SFrame
"""
column_types = self._get_data_schema()
user_id = self.user_id
item_id = self.item_id
user_type = column_types[user_id]
item_type = column_types[item_id]
if not hasattr(self, "_implicit_user_name"):
import hashlib
import time
self._implicit_user_name = None #("implicit-user-%s"
# % hashlib.md5("%0.20f" % time.time()).hexdigest()[:12])
if isinstance(observed_items, list):
observed_items = _SArray(observed_items, dtype = item_type)
if isinstance(observed_items, _SArray):
observed_items = _SFrame({self.item_id : observed_items})
if not isinstance(observed_items, _SFrame):
raise TypeError("observed_items must be a list or SArray of items, or an SFrame of items "
"and optionally ratings or other interaction information.")
# Don't modify the user's argument (if it's an SFrame).
observed_items = observed_items.copy()
# If a user id is present, then use that as the query user id
# (making sure there is only one present). If not, then use
# the local fake user id.
if user_id in observed_items.column_names():
main_user_value = observed_items[user_id][0]
if (observed_items[user_id] != main_user_value).any():
raise ValueError("To recommend items for more than one user, use `recommend()` and "
"supply new interactions using new_observation_data.")
users = _SArray([main_user_value], dtype = user_type)
else:
users = _SArray([self._implicit_user_name], dtype = user_type)
observed_items[user_id] = self._implicit_user_name
if observed_items[user_id].dtype != user_type:
observed_items[user_id] = observed_items[user_id].astype(user_type)
# Check the rest of the arguments.
if exclude is not None:
if isinstance(exclude, list):
exclude = _SArray(exclude, dtype = item_type)
if isinstance(exclude, _SArray):
exclude = _SFrame({item_id : exclude})
if user_id not in exclude.column_names():
exclude[user_id] = self._implicit_user_name
exclude[user_id] = exclude[user_id].astype(user_type)
recommendations = self.recommend(
users = users,
new_observation_data = observed_items,
k = k,
items = items,
new_user_data = new_user_data,
new_item_data = new_item_data,
exclude_known = exclude_known,
diversity = diversity,
random_seed = random_seed,
verbose = verbose)
del recommendations[user_id]
return recommendations | python | def recommend_from_interactions(
self, observed_items, k=10, exclude=None, items=None,
new_user_data=None, new_item_data=None,
exclude_known=True, diversity=0, random_seed=None,
verbose=True):
"""
Recommend the ``k`` highest scored items based on the
interactions given in `observed_items.`
Parameters
----------
observed_items : SArray, SFrame, or list
A list/SArray of items to use to make recommendations, or
an SFrame of items and optionally ratings and/or other
interaction data. The model will then recommend the most
similar items to those given. If ``observed_items`` has a user
column, then it must be only one user, and the additional
interaction data stored in the model is also used to make
recommendations.
k : int, optional
The number of recommendations to generate.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be
made. ``items`` must be an SArray, list, or SFrame with a
single column containing items, and all recommendations
will be made from this pool of items. This can be used,
for example, to restrict the recommendations to items
within a particular category or genre. By default,
recommendations are made from all items present when the
model was trained.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of items or user / item
pairs. The column names must be equal to the user and
item columns of the main data, and it provides the model
with user/item pairs to exclude from the recommendations.
These user-item-pairs are always excluded from the
predictions, even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, then it will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``item_id``, *score*, and *rank*, where
``user_id`` and ``item_id`` match the user and item column
names specified at training time. The rank column is
between 1 and ``k`` and gives the relative score of that
item. The value of score depends on the method used for
recommendations.
observed_items: list, SArray, or SFrame
"""
column_types = self._get_data_schema()
user_id = self.user_id
item_id = self.item_id
user_type = column_types[user_id]
item_type = column_types[item_id]
if not hasattr(self, "_implicit_user_name"):
import hashlib
import time
self._implicit_user_name = None #("implicit-user-%s"
# % hashlib.md5("%0.20f" % time.time()).hexdigest()[:12])
if isinstance(observed_items, list):
observed_items = _SArray(observed_items, dtype = item_type)
if isinstance(observed_items, _SArray):
observed_items = _SFrame({self.item_id : observed_items})
if not isinstance(observed_items, _SFrame):
raise TypeError("observed_items must be a list or SArray of items, or an SFrame of items "
"and optionally ratings or other interaction information.")
# Don't modify the user's argument (if it's an SFrame).
observed_items = observed_items.copy()
# If a user id is present, then use that as the query user id
# (making sure there is only one present). If not, then use
# the local fake user id.
if user_id in observed_items.column_names():
main_user_value = observed_items[user_id][0]
if (observed_items[user_id] != main_user_value).any():
raise ValueError("To recommend items for more than one user, use `recommend()` and "
"supply new interactions using new_observation_data.")
users = _SArray([main_user_value], dtype = user_type)
else:
users = _SArray([self._implicit_user_name], dtype = user_type)
observed_items[user_id] = self._implicit_user_name
if observed_items[user_id].dtype != user_type:
observed_items[user_id] = observed_items[user_id].astype(user_type)
# Check the rest of the arguments.
if exclude is not None:
if isinstance(exclude, list):
exclude = _SArray(exclude, dtype = item_type)
if isinstance(exclude, _SArray):
exclude = _SFrame({item_id : exclude})
if user_id not in exclude.column_names():
exclude[user_id] = self._implicit_user_name
exclude[user_id] = exclude[user_id].astype(user_type)
recommendations = self.recommend(
users = users,
new_observation_data = observed_items,
k = k,
items = items,
new_user_data = new_user_data,
new_item_data = new_item_data,
exclude_known = exclude_known,
diversity = diversity,
random_seed = random_seed,
verbose = verbose)
del recommendations[user_id]
return recommendations | [
"def",
"recommend_from_interactions",
"(",
"self",
",",
"observed_items",
",",
"k",
"=",
"10",
",",
"exclude",
"=",
"None",
",",
"items",
"=",
"None",
",",
"new_user_data",
"=",
"None",
",",
"new_item_data",
"=",
"None",
",",
"exclude_known",
"=",
"True",
",",
"diversity",
"=",
"0",
",",
"random_seed",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"column_types",
"=",
"self",
".",
"_get_data_schema",
"(",
")",
"user_id",
"=",
"self",
".",
"user_id",
"item_id",
"=",
"self",
".",
"item_id",
"user_type",
"=",
"column_types",
"[",
"user_id",
"]",
"item_type",
"=",
"column_types",
"[",
"item_id",
"]",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_implicit_user_name\"",
")",
":",
"import",
"hashlib",
"import",
"time",
"self",
".",
"_implicit_user_name",
"=",
"None",
"#(\"implicit-user-%s\"",
"# % hashlib.md5(\"%0.20f\" % time.time()).hexdigest()[:12])",
"if",
"isinstance",
"(",
"observed_items",
",",
"list",
")",
":",
"observed_items",
"=",
"_SArray",
"(",
"observed_items",
",",
"dtype",
"=",
"item_type",
")",
"if",
"isinstance",
"(",
"observed_items",
",",
"_SArray",
")",
":",
"observed_items",
"=",
"_SFrame",
"(",
"{",
"self",
".",
"item_id",
":",
"observed_items",
"}",
")",
"if",
"not",
"isinstance",
"(",
"observed_items",
",",
"_SFrame",
")",
":",
"raise",
"TypeError",
"(",
"\"observed_items must be a list or SArray of items, or an SFrame of items \"",
"\"and optionally ratings or other interaction information.\"",
")",
"# Don't modify the user's argument (if it's an SFrame).",
"observed_items",
"=",
"observed_items",
".",
"copy",
"(",
")",
"# If a user id is present, then use that as the query user id",
"# (making sure there is only one present). If not, then use",
"# the local fake user id.",
"if",
"user_id",
"in",
"observed_items",
".",
"column_names",
"(",
")",
":",
"main_user_value",
"=",
"observed_items",
"[",
"user_id",
"]",
"[",
"0",
"]",
"if",
"(",
"observed_items",
"[",
"user_id",
"]",
"!=",
"main_user_value",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"To recommend items for more than one user, use `recommend()` and \"",
"\"supply new interactions using new_observation_data.\"",
")",
"users",
"=",
"_SArray",
"(",
"[",
"main_user_value",
"]",
",",
"dtype",
"=",
"user_type",
")",
"else",
":",
"users",
"=",
"_SArray",
"(",
"[",
"self",
".",
"_implicit_user_name",
"]",
",",
"dtype",
"=",
"user_type",
")",
"observed_items",
"[",
"user_id",
"]",
"=",
"self",
".",
"_implicit_user_name",
"if",
"observed_items",
"[",
"user_id",
"]",
".",
"dtype",
"!=",
"user_type",
":",
"observed_items",
"[",
"user_id",
"]",
"=",
"observed_items",
"[",
"user_id",
"]",
".",
"astype",
"(",
"user_type",
")",
"# Check the rest of the arguments.",
"if",
"exclude",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"exclude",
",",
"list",
")",
":",
"exclude",
"=",
"_SArray",
"(",
"exclude",
",",
"dtype",
"=",
"item_type",
")",
"if",
"isinstance",
"(",
"exclude",
",",
"_SArray",
")",
":",
"exclude",
"=",
"_SFrame",
"(",
"{",
"item_id",
":",
"exclude",
"}",
")",
"if",
"user_id",
"not",
"in",
"exclude",
".",
"column_names",
"(",
")",
":",
"exclude",
"[",
"user_id",
"]",
"=",
"self",
".",
"_implicit_user_name",
"exclude",
"[",
"user_id",
"]",
"=",
"exclude",
"[",
"user_id",
"]",
".",
"astype",
"(",
"user_type",
")",
"recommendations",
"=",
"self",
".",
"recommend",
"(",
"users",
"=",
"users",
",",
"new_observation_data",
"=",
"observed_items",
",",
"k",
"=",
"k",
",",
"items",
"=",
"items",
",",
"new_user_data",
"=",
"new_user_data",
",",
"new_item_data",
"=",
"new_item_data",
",",
"exclude_known",
"=",
"exclude_known",
",",
"diversity",
"=",
"diversity",
",",
"random_seed",
"=",
"random_seed",
",",
"verbose",
"=",
"verbose",
")",
"del",
"recommendations",
"[",
"user_id",
"]",
"return",
"recommendations"
] | Recommend the ``k`` highest scored items based on the
interactions given in `observed_items.`
Parameters
----------
observed_items : SArray, SFrame, or list
A list/SArray of items to use to make recommendations, or
an SFrame of items and optionally ratings and/or other
interaction data. The model will then recommend the most
similar items to those given. If ``observed_items`` has a user
column, then it must be only one user, and the additional
interaction data stored in the model is also used to make
recommendations.
k : int, optional
The number of recommendations to generate.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be
made. ``items`` must be an SArray, list, or SFrame with a
single column containing items, and all recommendations
will be made from this pool of items. This can be used,
for example, to restrict the recommendations to items
within a particular category or genre. By default,
recommendations are made from all items present when the
model was trained.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of items or user / item
pairs. The column names must be equal to the user and
item columns of the main data, and it provides the model
with user/item pairs to exclude from the recommendations.
These user-item-pairs are always excluded from the
predictions, even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, then it will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``item_id``, *score*, and *rank*, where
``user_id`` and ``item_id`` match the user and item column
names specified at training time. The rank column is
between 1 and ``k`` and gives the relative score of that
item. The value of score depends on the method used for
recommendations.
observed_items: list, SArray, or SFrame | [
"Recommend",
"the",
"k",
"highest",
"scored",
"items",
"based",
"on",
"the",
"interactions",
"given",
"in",
"observed_items",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1310-L1470 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.evaluate_precision_recall | def evaluate_precision_recall(self, dataset, cutoffs=list(range(1,11,1))+list(range(11,50,5)),
skip_set=None, exclude_known=True,
verbose=True, **kwargs):
"""
Compute a model's precision and recall scores for a particular dataset.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
This will be compared to the model's recommendations, which exclude
the (user, item) pairs seen at training time.
cutoffs : list, optional
A list of cutoff values for which one wants to evaluate precision
and recall, i.e. the value of k in "precision at k".
skip_set : SFrame, optional
Passed to :meth:`recommend` as ``exclude``.
exclude_known : bool, optional
Passed to :meth:`recommend` as ``exclude_known``. If True, exclude
training item from recommendation.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
Additional keyword arguments are passed to the recommend
function, whose returned recommendations are used for evaluating
precision and recall of the model.
Returns
-------
out : dict
Contains the precision and recall at each cutoff value and each
user in ``dataset``.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train)
>>> m.evaluate_precision_recall(test)
See Also
--------
turicreate.recommender.util.precision_recall_by_user
"""
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
dataset = self.__prepare_dataset_parameter(dataset)
users = dataset[self.user_id].unique()
dataset = dataset[[self.user_id, self.item_id]]
recs = self.recommend(users=users, k=max(cutoffs), exclude=skip_set,
exclude_known=exclude_known,
verbose=verbose,
**kwargs)
precision_recall_by_user = self.__proxy__.precision_recall_by_user(dataset, recs, cutoffs)
ret = {'precision_recall_by_user': precision_recall_by_user}
pr_agg = precision_recall_by_user.groupby(
'cutoff',
operations={'precision' : _Aggregate.MEAN('precision'),
'recall' : _Aggregate.MEAN('recall')})
pr_agg = pr_agg[['cutoff', 'precision', 'recall']]
ret["precision_recall_overall"] = pr_agg.sort("cutoff")
return ret | python | def evaluate_precision_recall(self, dataset, cutoffs=list(range(1,11,1))+list(range(11,50,5)),
skip_set=None, exclude_known=True,
verbose=True, **kwargs):
"""
Compute a model's precision and recall scores for a particular dataset.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
This will be compared to the model's recommendations, which exclude
the (user, item) pairs seen at training time.
cutoffs : list, optional
A list of cutoff values for which one wants to evaluate precision
and recall, i.e. the value of k in "precision at k".
skip_set : SFrame, optional
Passed to :meth:`recommend` as ``exclude``.
exclude_known : bool, optional
Passed to :meth:`recommend` as ``exclude_known``. If True, exclude
training item from recommendation.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
Additional keyword arguments are passed to the recommend
function, whose returned recommendations are used for evaluating
precision and recall of the model.
Returns
-------
out : dict
Contains the precision and recall at each cutoff value and each
user in ``dataset``.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train)
>>> m.evaluate_precision_recall(test)
See Also
--------
turicreate.recommender.util.precision_recall_by_user
"""
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
dataset = self.__prepare_dataset_parameter(dataset)
users = dataset[self.user_id].unique()
dataset = dataset[[self.user_id, self.item_id]]
recs = self.recommend(users=users, k=max(cutoffs), exclude=skip_set,
exclude_known=exclude_known,
verbose=verbose,
**kwargs)
precision_recall_by_user = self.__proxy__.precision_recall_by_user(dataset, recs, cutoffs)
ret = {'precision_recall_by_user': precision_recall_by_user}
pr_agg = precision_recall_by_user.groupby(
'cutoff',
operations={'precision' : _Aggregate.MEAN('precision'),
'recall' : _Aggregate.MEAN('recall')})
pr_agg = pr_agg[['cutoff', 'precision', 'recall']]
ret["precision_recall_overall"] = pr_agg.sort("cutoff")
return ret | [
"def",
"evaluate_precision_recall",
"(",
"self",
",",
"dataset",
",",
"cutoffs",
"=",
"list",
"(",
"range",
"(",
"1",
",",
"11",
",",
"1",
")",
")",
"+",
"list",
"(",
"range",
"(",
"11",
",",
"50",
",",
"5",
")",
")",
",",
"skip_set",
"=",
"None",
",",
"exclude_known",
"=",
"True",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"user_column",
"=",
"self",
".",
"user_id",
"item_column",
"=",
"self",
".",
"item_id",
"assert",
"user_column",
"in",
"dataset",
".",
"column_names",
"(",
")",
"and",
"item_column",
"in",
"dataset",
".",
"column_names",
"(",
")",
",",
"'Provided data set must have a column pertaining to user ids and \\\n item ids, similar to what we had during training.'",
"dataset",
"=",
"self",
".",
"__prepare_dataset_parameter",
"(",
"dataset",
")",
"users",
"=",
"dataset",
"[",
"self",
".",
"user_id",
"]",
".",
"unique",
"(",
")",
"dataset",
"=",
"dataset",
"[",
"[",
"self",
".",
"user_id",
",",
"self",
".",
"item_id",
"]",
"]",
"recs",
"=",
"self",
".",
"recommend",
"(",
"users",
"=",
"users",
",",
"k",
"=",
"max",
"(",
"cutoffs",
")",
",",
"exclude",
"=",
"skip_set",
",",
"exclude_known",
"=",
"exclude_known",
",",
"verbose",
"=",
"verbose",
",",
"*",
"*",
"kwargs",
")",
"precision_recall_by_user",
"=",
"self",
".",
"__proxy__",
".",
"precision_recall_by_user",
"(",
"dataset",
",",
"recs",
",",
"cutoffs",
")",
"ret",
"=",
"{",
"'precision_recall_by_user'",
":",
"precision_recall_by_user",
"}",
"pr_agg",
"=",
"precision_recall_by_user",
".",
"groupby",
"(",
"'cutoff'",
",",
"operations",
"=",
"{",
"'precision'",
":",
"_Aggregate",
".",
"MEAN",
"(",
"'precision'",
")",
",",
"'recall'",
":",
"_Aggregate",
".",
"MEAN",
"(",
"'recall'",
")",
"}",
")",
"pr_agg",
"=",
"pr_agg",
"[",
"[",
"'cutoff'",
",",
"'precision'",
",",
"'recall'",
"]",
"]",
"ret",
"[",
"\"precision_recall_overall\"",
"]",
"=",
"pr_agg",
".",
"sort",
"(",
"\"cutoff\"",
")",
"return",
"ret"
] | Compute a model's precision and recall scores for a particular dataset.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
This will be compared to the model's recommendations, which exclude
the (user, item) pairs seen at training time.
cutoffs : list, optional
A list of cutoff values for which one wants to evaluate precision
and recall, i.e. the value of k in "precision at k".
skip_set : SFrame, optional
Passed to :meth:`recommend` as ``exclude``.
exclude_known : bool, optional
Passed to :meth:`recommend` as ``exclude_known``. If True, exclude
training item from recommendation.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
Additional keyword arguments are passed to the recommend
function, whose returned recommendations are used for evaluating
precision and recall of the model.
Returns
-------
out : dict
Contains the precision and recall at each cutoff value and each
user in ``dataset``.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train)
>>> m.evaluate_precision_recall(test)
See Also
--------
turicreate.recommender.util.precision_recall_by_user | [
"Compute",
"a",
"model",
"s",
"precision",
"and",
"recall",
"scores",
"for",
"a",
"particular",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1492-L1574 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.evaluate_rmse | def evaluate_rmse(self, dataset, target):
"""
Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse
"""
assert target in dataset.column_names(), \
'Provided dataset must contain a target column with the same \
name as the target used during training.'
y = dataset[target]
yhat = self.predict(dataset)
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
result = dataset[[user_column, item_column]]
result['sq_error'] = (y - yhat) * (y - yhat)
rmse_by_user = result.groupby(user_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_user['rmse'] = rmse_by_user['rmse'].apply(lambda x: x**.5)
rmse_by_item = result.groupby(item_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_item['rmse'] = rmse_by_item['rmse'].apply(lambda x: x**.5)
overall_rmse = result['sq_error'].mean() ** .5
return {'rmse_by_user': rmse_by_user,
'rmse_by_item': rmse_by_item,
'rmse_overall': overall_rmse} | python | def evaluate_rmse(self, dataset, target):
"""
Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse
"""
assert target in dataset.column_names(), \
'Provided dataset must contain a target column with the same \
name as the target used during training.'
y = dataset[target]
yhat = self.predict(dataset)
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
result = dataset[[user_column, item_column]]
result['sq_error'] = (y - yhat) * (y - yhat)
rmse_by_user = result.groupby(user_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_user['rmse'] = rmse_by_user['rmse'].apply(lambda x: x**.5)
rmse_by_item = result.groupby(item_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_item['rmse'] = rmse_by_item['rmse'].apply(lambda x: x**.5)
overall_rmse = result['sq_error'].mean() ** .5
return {'rmse_by_user': rmse_by_user,
'rmse_by_item': rmse_by_item,
'rmse_overall': overall_rmse} | [
"def",
"evaluate_rmse",
"(",
"self",
",",
"dataset",
",",
"target",
")",
":",
"assert",
"target",
"in",
"dataset",
".",
"column_names",
"(",
")",
",",
"'Provided dataset must contain a target column with the same \\\n name as the target used during training.'",
"y",
"=",
"dataset",
"[",
"target",
"]",
"yhat",
"=",
"self",
".",
"predict",
"(",
"dataset",
")",
"user_column",
"=",
"self",
".",
"user_id",
"item_column",
"=",
"self",
".",
"item_id",
"assert",
"user_column",
"in",
"dataset",
".",
"column_names",
"(",
")",
"and",
"item_column",
"in",
"dataset",
".",
"column_names",
"(",
")",
",",
"'Provided data set must have a column pertaining to user ids and \\\n item ids, similar to what we had during training.'",
"result",
"=",
"dataset",
"[",
"[",
"user_column",
",",
"item_column",
"]",
"]",
"result",
"[",
"'sq_error'",
"]",
"=",
"(",
"y",
"-",
"yhat",
")",
"*",
"(",
"y",
"-",
"yhat",
")",
"rmse_by_user",
"=",
"result",
".",
"groupby",
"(",
"user_column",
",",
"{",
"'rmse'",
":",
"_turicreate",
".",
"aggregate",
".",
"AVG",
"(",
"'sq_error'",
")",
",",
"'count'",
":",
"_turicreate",
".",
"aggregate",
".",
"COUNT",
"}",
")",
"rmse_by_user",
"[",
"'rmse'",
"]",
"=",
"rmse_by_user",
"[",
"'rmse'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"**",
".5",
")",
"rmse_by_item",
"=",
"result",
".",
"groupby",
"(",
"item_column",
",",
"{",
"'rmse'",
":",
"_turicreate",
".",
"aggregate",
".",
"AVG",
"(",
"'sq_error'",
")",
",",
"'count'",
":",
"_turicreate",
".",
"aggregate",
".",
"COUNT",
"}",
")",
"rmse_by_item",
"[",
"'rmse'",
"]",
"=",
"rmse_by_item",
"[",
"'rmse'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"**",
".5",
")",
"overall_rmse",
"=",
"result",
"[",
"'sq_error'",
"]",
".",
"mean",
"(",
")",
"**",
".5",
"return",
"{",
"'rmse_by_user'",
":",
"rmse_by_user",
",",
"'rmse_by_item'",
":",
"rmse_by_item",
",",
"'rmse_overall'",
":",
"overall_rmse",
"}"
] | Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse | [
"Evaluate",
"the",
"prediction",
"error",
"for",
"each",
"user",
"-",
"item",
"pair",
"in",
"the",
"given",
"data",
"set",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1576-L1635 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.evaluate | def evaluate(self, dataset, metric='auto',
exclude_known_for_precision_recall=True,
target=None,
verbose=True, **kwargs):
r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user
"""
ret = {}
dataset = self.__prepare_dataset_parameter(dataset)
# If the model does not have a target column, compute prec-recall.
if metric in ['precision_recall', 'auto']:
results = self.evaluate_precision_recall(dataset,
exclude_known=exclude_known_for_precision_recall,
verbose=verbose,
**kwargs)
ret.update(results)
if verbose:
print("\nPrecision and recall summary statistics by cutoff")
print(results['precision_recall_by_user'].groupby('cutoff', \
{'mean_precision': _turicreate.aggregate.AVG('precision'),
'mean_recall': _turicreate.aggregate.AVG('recall')}).topk('cutoff', reverse=True))
if metric in ['rmse', 'auto']:
if target is None:
target = self.target
if target is None or target == "":
_logging.warning("Model trained without a target. Skipping RMSE computation.")
else:
results = self.evaluate_rmse(dataset, target)
ret.update(results)
if verbose:
print("\nOverall RMSE:", results['rmse_overall'])
print("\nPer User RMSE (best)")
print(results['rmse_by_user'].topk('rmse', 1, reverse=True))
print("\nPer User RMSE (worst)")
print(results['rmse_by_user'].topk('rmse', 1))
print("\nPer Item RMSE (best)")
print(results['rmse_by_item'].topk('rmse', 1, reverse=True))
print("\nPer Item RMSE (worst)")
print(results['rmse_by_item'].topk('rmse', 1))
if metric not in ['rmse', 'precision_recall', 'auto']:
raise ValueError('Unknown evaluation metric %s, supported metrics are [\"rmse\", \"precision_recall\"]' % metric)
return ret | python | def evaluate(self, dataset, metric='auto',
exclude_known_for_precision_recall=True,
target=None,
verbose=True, **kwargs):
r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user
"""
ret = {}
dataset = self.__prepare_dataset_parameter(dataset)
# If the model does not have a target column, compute prec-recall.
if metric in ['precision_recall', 'auto']:
results = self.evaluate_precision_recall(dataset,
exclude_known=exclude_known_for_precision_recall,
verbose=verbose,
**kwargs)
ret.update(results)
if verbose:
print("\nPrecision and recall summary statistics by cutoff")
print(results['precision_recall_by_user'].groupby('cutoff', \
{'mean_precision': _turicreate.aggregate.AVG('precision'),
'mean_recall': _turicreate.aggregate.AVG('recall')}).topk('cutoff', reverse=True))
if metric in ['rmse', 'auto']:
if target is None:
target = self.target
if target is None or target == "":
_logging.warning("Model trained without a target. Skipping RMSE computation.")
else:
results = self.evaluate_rmse(dataset, target)
ret.update(results)
if verbose:
print("\nOverall RMSE:", results['rmse_overall'])
print("\nPer User RMSE (best)")
print(results['rmse_by_user'].topk('rmse', 1, reverse=True))
print("\nPer User RMSE (worst)")
print(results['rmse_by_user'].topk('rmse', 1))
print("\nPer Item RMSE (best)")
print(results['rmse_by_item'].topk('rmse', 1, reverse=True))
print("\nPer Item RMSE (worst)")
print(results['rmse_by_item'].topk('rmse', 1))
if metric not in ['rmse', 'precision_recall', 'auto']:
raise ValueError('Unknown evaluation metric %s, supported metrics are [\"rmse\", \"precision_recall\"]' % metric)
return ret | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
",",
"exclude_known_for_precision_recall",
"=",
"True",
",",
"target",
"=",
"None",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"}",
"dataset",
"=",
"self",
".",
"__prepare_dataset_parameter",
"(",
"dataset",
")",
"# If the model does not have a target column, compute prec-recall.",
"if",
"metric",
"in",
"[",
"'precision_recall'",
",",
"'auto'",
"]",
":",
"results",
"=",
"self",
".",
"evaluate_precision_recall",
"(",
"dataset",
",",
"exclude_known",
"=",
"exclude_known_for_precision_recall",
",",
"verbose",
"=",
"verbose",
",",
"*",
"*",
"kwargs",
")",
"ret",
".",
"update",
"(",
"results",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\\nPrecision and recall summary statistics by cutoff\"",
")",
"print",
"(",
"results",
"[",
"'precision_recall_by_user'",
"]",
".",
"groupby",
"(",
"'cutoff'",
",",
"{",
"'mean_precision'",
":",
"_turicreate",
".",
"aggregate",
".",
"AVG",
"(",
"'precision'",
")",
",",
"'mean_recall'",
":",
"_turicreate",
".",
"aggregate",
".",
"AVG",
"(",
"'recall'",
")",
"}",
")",
".",
"topk",
"(",
"'cutoff'",
",",
"reverse",
"=",
"True",
")",
")",
"if",
"metric",
"in",
"[",
"'rmse'",
",",
"'auto'",
"]",
":",
"if",
"target",
"is",
"None",
":",
"target",
"=",
"self",
".",
"target",
"if",
"target",
"is",
"None",
"or",
"target",
"==",
"\"\"",
":",
"_logging",
".",
"warning",
"(",
"\"Model trained without a target. Skipping RMSE computation.\"",
")",
"else",
":",
"results",
"=",
"self",
".",
"evaluate_rmse",
"(",
"dataset",
",",
"target",
")",
"ret",
".",
"update",
"(",
"results",
")",
"if",
"verbose",
":",
"print",
"(",
"\"\\nOverall RMSE:\"",
",",
"results",
"[",
"'rmse_overall'",
"]",
")",
"print",
"(",
"\"\\nPer User RMSE (best)\"",
")",
"print",
"(",
"results",
"[",
"'rmse_by_user'",
"]",
".",
"topk",
"(",
"'rmse'",
",",
"1",
",",
"reverse",
"=",
"True",
")",
")",
"print",
"(",
"\"\\nPer User RMSE (worst)\"",
")",
"print",
"(",
"results",
"[",
"'rmse_by_user'",
"]",
".",
"topk",
"(",
"'rmse'",
",",
"1",
")",
")",
"print",
"(",
"\"\\nPer Item RMSE (best)\"",
")",
"print",
"(",
"results",
"[",
"'rmse_by_item'",
"]",
".",
"topk",
"(",
"'rmse'",
",",
"1",
",",
"reverse",
"=",
"True",
")",
")",
"print",
"(",
"\"\\nPer Item RMSE (worst)\"",
")",
"print",
"(",
"results",
"[",
"'rmse_by_item'",
"]",
".",
"topk",
"(",
"'rmse'",
",",
"1",
")",
")",
"if",
"metric",
"not",
"in",
"[",
"'rmse'",
",",
"'precision_recall'",
",",
"'auto'",
"]",
":",
"raise",
"ValueError",
"(",
"'Unknown evaluation metric %s, supported metrics are [\\\"rmse\\\", \\\"precision_recall\\\"]'",
"%",
"metric",
")",
"return",
"ret"
] | r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user | [
"r",
"Evaluate",
"the",
"model",
"s",
"ability",
"to",
"make",
"rating",
"predictions",
"or",
"recommendations",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1637-L1761 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._get_popularity_baseline | def _get_popularity_baseline(self):
"""
Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes.
"""
response = self.__proxy__.get_popularity_baseline()
from .popularity_recommender import PopularityRecommender
return PopularityRecommender(response) | python | def _get_popularity_baseline(self):
"""
Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes.
"""
response = self.__proxy__.get_popularity_baseline()
from .popularity_recommender import PopularityRecommender
return PopularityRecommender(response) | [
"def",
"_get_popularity_baseline",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"__proxy__",
".",
"get_popularity_baseline",
"(",
")",
"from",
".",
"popularity_recommender",
"import",
"PopularityRecommender",
"return",
"PopularityRecommender",
"(",
"response",
")"
] | Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes. | [
"Returns",
"a",
"new",
"popularity",
"model",
"matching",
"the",
"data",
"set",
"this",
"model",
"was",
"trained",
"with",
".",
"Can",
"be",
"used",
"for",
"comparison",
"purposes",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1763-L1772 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._get_item_intersection_info | def _get_item_intersection_info(self, item_pairs):
"""
For a collection of item -> item pairs, returns information about the
users in that intersection.
Parameters
----------
item_pairs : 2-column SFrame of two item columns, or a list of
(item_1, item_2) tuples.
Returns
-------
out : SFrame
A SFrame with the two item columns given above, the number of
users that rated each, and a dictionary mapping the user to a
pair of the ratings, with the first rating being the rating of
the first item and the second being the rating of the second item.
If no ratings are provided, these values are always 1.0.
"""
if type(item_pairs) is list:
if not all(type(t) in [list, tuple] and len(t) == 2 for t in item_pairs):
raise TypeError("item_pairs must be 2-column SFrame of two item "
"columns, or a list of (item_1, item_2) tuples. ")
item_name = self.item_id
item_pairs = _turicreate.SFrame({item_name + "_1" : [v1 for v1, v2 in item_pairs],
item_name + "_2" : [v2 for v1, v2 in item_pairs]})
if not isinstance(item_pairs, _turicreate.SFrame):
raise TypeError("item_pairs must be 2-column SFrame of two item "
"columns, or a list of (item_1, item_2) tuples. ")
response = self.__proxy__.get_item_intersection_info(item_pairs)
return response | python | def _get_item_intersection_info(self, item_pairs):
"""
For a collection of item -> item pairs, returns information about the
users in that intersection.
Parameters
----------
item_pairs : 2-column SFrame of two item columns, or a list of
(item_1, item_2) tuples.
Returns
-------
out : SFrame
A SFrame with the two item columns given above, the number of
users that rated each, and a dictionary mapping the user to a
pair of the ratings, with the first rating being the rating of
the first item and the second being the rating of the second item.
If no ratings are provided, these values are always 1.0.
"""
if type(item_pairs) is list:
if not all(type(t) in [list, tuple] and len(t) == 2 for t in item_pairs):
raise TypeError("item_pairs must be 2-column SFrame of two item "
"columns, or a list of (item_1, item_2) tuples. ")
item_name = self.item_id
item_pairs = _turicreate.SFrame({item_name + "_1" : [v1 for v1, v2 in item_pairs],
item_name + "_2" : [v2 for v1, v2 in item_pairs]})
if not isinstance(item_pairs, _turicreate.SFrame):
raise TypeError("item_pairs must be 2-column SFrame of two item "
"columns, or a list of (item_1, item_2) tuples. ")
response = self.__proxy__.get_item_intersection_info(item_pairs)
return response | [
"def",
"_get_item_intersection_info",
"(",
"self",
",",
"item_pairs",
")",
":",
"if",
"type",
"(",
"item_pairs",
")",
"is",
"list",
":",
"if",
"not",
"all",
"(",
"type",
"(",
"t",
")",
"in",
"[",
"list",
",",
"tuple",
"]",
"and",
"len",
"(",
"t",
")",
"==",
"2",
"for",
"t",
"in",
"item_pairs",
")",
":",
"raise",
"TypeError",
"(",
"\"item_pairs must be 2-column SFrame of two item \"",
"\"columns, or a list of (item_1, item_2) tuples. \"",
")",
"item_name",
"=",
"self",
".",
"item_id",
"item_pairs",
"=",
"_turicreate",
".",
"SFrame",
"(",
"{",
"item_name",
"+",
"\"_1\"",
":",
"[",
"v1",
"for",
"v1",
",",
"v2",
"in",
"item_pairs",
"]",
",",
"item_name",
"+",
"\"_2\"",
":",
"[",
"v2",
"for",
"v1",
",",
"v2",
"in",
"item_pairs",
"]",
"}",
")",
"if",
"not",
"isinstance",
"(",
"item_pairs",
",",
"_turicreate",
".",
"SFrame",
")",
":",
"raise",
"TypeError",
"(",
"\"item_pairs must be 2-column SFrame of two item \"",
"\"columns, or a list of (item_1, item_2) tuples. \"",
")",
"response",
"=",
"self",
".",
"__proxy__",
".",
"get_item_intersection_info",
"(",
"item_pairs",
")",
"return",
"response"
] | For a collection of item -> item pairs, returns information about the
users in that intersection.
Parameters
----------
item_pairs : 2-column SFrame of two item columns, or a list of
(item_1, item_2) tuples.
Returns
-------
out : SFrame
A SFrame with the two item columns given above, the number of
users that rated each, and a dictionary mapping the user to a
pair of the ratings, with the first rating being the rating of
the first item and the second being the rating of the second item.
If no ratings are provided, these values are always 1.0. | [
"For",
"a",
"collection",
"of",
"item",
"-",
">",
"item",
"pairs",
"returns",
"information",
"about",
"the",
"users",
"in",
"that",
"intersection",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1774-L1809 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.export_coreml | def export_coreml(self, filename):
"""
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml('myModel.mlmodel')
"""
print('This model is exported as a custom Core ML model. In order to use it in your\n'
'application, you must also include "libRecommender.dylib". For additional\n'
'details see:\n'
'https://apple.github.io/turicreate/docs/userguide/recommender/coreml-deployment.html')
import turicreate as tc
self.__proxy__.export_to_coreml(filename) | python | def export_coreml(self, filename):
"""
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml('myModel.mlmodel')
"""
print('This model is exported as a custom Core ML model. In order to use it in your\n'
'application, you must also include "libRecommender.dylib". For additional\n'
'details see:\n'
'https://apple.github.io/turicreate/docs/userguide/recommender/coreml-deployment.html')
import turicreate as tc
self.__proxy__.export_to_coreml(filename) | [
"def",
"export_coreml",
"(",
"self",
",",
"filename",
")",
":",
"print",
"(",
"'This model is exported as a custom Core ML model. In order to use it in your\\n'",
"'application, you must also include \"libRecommender.dylib\". For additional\\n'",
"'details see:\\n'",
"'https://apple.github.io/turicreate/docs/userguide/recommender/coreml-deployment.html'",
")",
"import",
"turicreate",
"as",
"tc",
"self",
".",
"__proxy__",
".",
"export_to_coreml",
"(",
"filename",
")"
] | Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml('myModel.mlmodel') | [
"Export",
"the",
"model",
"in",
"Core",
"ML",
"format",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1811-L1830 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/random_forest_regression.py | RandomForestRegression.evaluate | def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Possible values are:
'auto' : Compute all metrics.
'rmse' : Rooted mean squared error.
'max_error' : Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
--------
create, predict
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
"""
_raise_error_evaluation_metric_is_valid(
metric, ['auto', 'rmse', 'max_error'])
return super(RandomForestRegression, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric) | python | def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Possible values are:
'auto' : Compute all metrics.
'rmse' : Rooted mean squared error.
'max_error' : Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
--------
create, predict
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
"""
_raise_error_evaluation_metric_is_valid(
metric, ['auto', 'rmse', 'max_error'])
return super(RandomForestRegression, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric) | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"_raise_error_evaluation_metric_is_valid",
"(",
"metric",
",",
"[",
"'auto'",
",",
"'rmse'",
",",
"'max_error'",
"]",
")",
"return",
"super",
"(",
"RandomForestRegression",
",",
"self",
")",
".",
"evaluate",
"(",
"dataset",
",",
"missing_value_action",
"=",
"missing_value_action",
",",
"metric",
"=",
"metric",
")"
] | Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Possible values are:
'auto' : Compute all metrics.
'rmse' : Rooted mean squared error.
'max_error' : Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
--------
create, predict
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse') | [
"Evaluate",
"the",
"model",
"on",
"the",
"given",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/random_forest_regression.py#L179-L228 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/random_forest_regression.py | RandomForestRegression.predict | def predict(self, dataset, missing_value_action='auto'):
"""
Predict the target column of the given dataset.
The target column is provided during
:func:`~turicreate.random_forest_regression.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, predict
Examples
--------
>>> m.predict(testdata)
"""
return super(RandomForestRegression, self).predict(dataset,
output_type='margin',
missing_value_action=missing_value_action) | python | def predict(self, dataset, missing_value_action='auto'):
"""
Predict the target column of the given dataset.
The target column is provided during
:func:`~turicreate.random_forest_regression.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, predict
Examples
--------
>>> m.predict(testdata)
"""
return super(RandomForestRegression, self).predict(dataset,
output_type='margin',
missing_value_action=missing_value_action) | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"return",
"super",
"(",
"RandomForestRegression",
",",
"self",
")",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"'margin'",
",",
"missing_value_action",
"=",
"missing_value_action",
")"
] | Predict the target column of the given dataset.
The target column is provided during
:func:`~turicreate.random_forest_regression.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, predict
Examples
--------
>>> m.predict(testdata) | [
"Predict",
"the",
"target",
"column",
"of",
"the",
"given",
"dataset",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/random_forest_regression.py#L230-L272 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/feature_vectorizer.py | create_feature_vectorizer | def create_feature_vectorizer(input_features, output_feature_name,
known_size_map = {}):
"""
Creates a feature vectorizer from input features, return the spec for
a feature vectorizer that puts everything into a single array of length
equal to the total size of all the input features. Returns a 2-tuple
`(spec, num_dimension)`
Parameters
----------
input_features: [list of 2-tuples]
Name(s) of the input features, given as a list of `('name', datatype)`
tuples. The datatypes entry is one of the data types defined in the
:ref:`datatypes` module. Allowed datatypes are :ref:`datatype.Int64`,
:ref:`datatype.Double`, :ref:`datatypes.Dictionary`,
or :ref:`datatype.Array`.
If the feature is a dictionary type, then the dictionary must have integer
keys, and the number of dimensions to expand it into must be given by
`known_size_map`.
Feature indices in the final array are counted sequentially from the
from 0 through the total number of features.
output_feature_name: str
The name of the output feature. The type is an Array
List of output feature of the network.
known_size_map:
A dictionary mapping the feature name to the expanded size in the final
array. This is most useful for specifying the size of sparse vectors
given as dictionaries of index to value.
"""
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
input_features = process_or_validate_features(input_features)
feature_vectorizer = spec.featureVectorizer
num_output_dimensions = 0
for n, ft in input_features:
if n in known_size_map:
dim = known_size_map[n]
if ft.num_elements is not None:
if dim != ft.num_elements:
raise ValueError(("In feature %s, override size (%d) not "
"compatible with inherent value size (%d).")
% (n, dim, ft.num_elements))
else:
if ft.num_elements is None:
raise ValueError("In feature %s, inherent size unknown so must be manually supplied.")
dim = ft.num_elements
num_output_dimensions += dim
new_feature = feature_vectorizer.inputList.add()
new_feature.inputColumn = n
new_feature.inputDimensions = dim
if not isinstance(output_feature_name, _string_types):
if (is_valid_feature_list(output_feature_name)
and len(output_feature_name) == 1
and output_feature_name[0][1] == datatypes.Array(num_output_dimensions)):
output_feature_name = output_feature_name[0][0]
else:
raise TypeError("Output feature must be specified as a "
"feature name or correct output feature list.")
output_features = [(output_feature_name, datatypes.Array(num_output_dimensions))]
set_transform_interface_params(spec, input_features, output_features)
return spec, num_output_dimensions | python | def create_feature_vectorizer(input_features, output_feature_name,
known_size_map = {}):
"""
Creates a feature vectorizer from input features, return the spec for
a feature vectorizer that puts everything into a single array of length
equal to the total size of all the input features. Returns a 2-tuple
`(spec, num_dimension)`
Parameters
----------
input_features: [list of 2-tuples]
Name(s) of the input features, given as a list of `('name', datatype)`
tuples. The datatypes entry is one of the data types defined in the
:ref:`datatypes` module. Allowed datatypes are :ref:`datatype.Int64`,
:ref:`datatype.Double`, :ref:`datatypes.Dictionary`,
or :ref:`datatype.Array`.
If the feature is a dictionary type, then the dictionary must have integer
keys, and the number of dimensions to expand it into must be given by
`known_size_map`.
Feature indices in the final array are counted sequentially from the
from 0 through the total number of features.
output_feature_name: str
The name of the output feature. The type is an Array
List of output feature of the network.
known_size_map:
A dictionary mapping the feature name to the expanded size in the final
array. This is most useful for specifying the size of sparse vectors
given as dictionaries of index to value.
"""
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
input_features = process_or_validate_features(input_features)
feature_vectorizer = spec.featureVectorizer
num_output_dimensions = 0
for n, ft in input_features:
if n in known_size_map:
dim = known_size_map[n]
if ft.num_elements is not None:
if dim != ft.num_elements:
raise ValueError(("In feature %s, override size (%d) not "
"compatible with inherent value size (%d).")
% (n, dim, ft.num_elements))
else:
if ft.num_elements is None:
raise ValueError("In feature %s, inherent size unknown so must be manually supplied.")
dim = ft.num_elements
num_output_dimensions += dim
new_feature = feature_vectorizer.inputList.add()
new_feature.inputColumn = n
new_feature.inputDimensions = dim
if not isinstance(output_feature_name, _string_types):
if (is_valid_feature_list(output_feature_name)
and len(output_feature_name) == 1
and output_feature_name[0][1] == datatypes.Array(num_output_dimensions)):
output_feature_name = output_feature_name[0][0]
else:
raise TypeError("Output feature must be specified as a "
"feature name or correct output feature list.")
output_features = [(output_feature_name, datatypes.Array(num_output_dimensions))]
set_transform_interface_params(spec, input_features, output_features)
return spec, num_output_dimensions | [
"def",
"create_feature_vectorizer",
"(",
"input_features",
",",
"output_feature_name",
",",
"known_size_map",
"=",
"{",
"}",
")",
":",
"spec",
"=",
"_Model_pb2",
".",
"Model",
"(",
")",
"spec",
".",
"specificationVersion",
"=",
"SPECIFICATION_VERSION",
"input_features",
"=",
"process_or_validate_features",
"(",
"input_features",
")",
"feature_vectorizer",
"=",
"spec",
".",
"featureVectorizer",
"num_output_dimensions",
"=",
"0",
"for",
"n",
",",
"ft",
"in",
"input_features",
":",
"if",
"n",
"in",
"known_size_map",
":",
"dim",
"=",
"known_size_map",
"[",
"n",
"]",
"if",
"ft",
".",
"num_elements",
"is",
"not",
"None",
":",
"if",
"dim",
"!=",
"ft",
".",
"num_elements",
":",
"raise",
"ValueError",
"(",
"(",
"\"In feature %s, override size (%d) not \"",
"\"compatible with inherent value size (%d).\"",
")",
"%",
"(",
"n",
",",
"dim",
",",
"ft",
".",
"num_elements",
")",
")",
"else",
":",
"if",
"ft",
".",
"num_elements",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"In feature %s, inherent size unknown so must be manually supplied.\"",
")",
"dim",
"=",
"ft",
".",
"num_elements",
"num_output_dimensions",
"+=",
"dim",
"new_feature",
"=",
"feature_vectorizer",
".",
"inputList",
".",
"add",
"(",
")",
"new_feature",
".",
"inputColumn",
"=",
"n",
"new_feature",
".",
"inputDimensions",
"=",
"dim",
"if",
"not",
"isinstance",
"(",
"output_feature_name",
",",
"_string_types",
")",
":",
"if",
"(",
"is_valid_feature_list",
"(",
"output_feature_name",
")",
"and",
"len",
"(",
"output_feature_name",
")",
"==",
"1",
"and",
"output_feature_name",
"[",
"0",
"]",
"[",
"1",
"]",
"==",
"datatypes",
".",
"Array",
"(",
"num_output_dimensions",
")",
")",
":",
"output_feature_name",
"=",
"output_feature_name",
"[",
"0",
"]",
"[",
"0",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"Output feature must be specified as a \"",
"\"feature name or correct output feature list.\"",
")",
"output_features",
"=",
"[",
"(",
"output_feature_name",
",",
"datatypes",
".",
"Array",
"(",
"num_output_dimensions",
")",
")",
"]",
"set_transform_interface_params",
"(",
"spec",
",",
"input_features",
",",
"output_features",
")",
"return",
"spec",
",",
"num_output_dimensions"
] | Creates a feature vectorizer from input features, return the spec for
a feature vectorizer that puts everything into a single array of length
equal to the total size of all the input features. Returns a 2-tuple
`(spec, num_dimension)`
Parameters
----------
input_features: [list of 2-tuples]
Name(s) of the input features, given as a list of `('name', datatype)`
tuples. The datatypes entry is one of the data types defined in the
:ref:`datatypes` module. Allowed datatypes are :ref:`datatype.Int64`,
:ref:`datatype.Double`, :ref:`datatypes.Dictionary`,
or :ref:`datatype.Array`.
If the feature is a dictionary type, then the dictionary must have integer
keys, and the number of dimensions to expand it into must be given by
`known_size_map`.
Feature indices in the final array are counted sequentially from the
from 0 through the total number of features.
output_feature_name: str
The name of the output feature. The type is an Array
List of output feature of the network.
known_size_map:
A dictionary mapping the feature name to the expanded size in the final
array. This is most useful for specifying the size of sparse vectors
given as dictionaries of index to value. | [
"Creates",
"a",
"feature",
"vectorizer",
"from",
"input",
"features",
"return",
"the",
"spec",
"for",
"a",
"feature",
"vectorizer",
"that",
"puts",
"everything",
"into",
"a",
"single",
"array",
"of",
"length",
"equal",
"to",
"the",
"total",
"size",
"of",
"all",
"the",
"input",
"features",
".",
"Returns",
"a",
"2",
"-",
"tuple",
"(",
"spec",
"num_dimension",
")"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/feature_vectorizer.py#L15-L94 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/predef/tools/ci/common.py | utils.query_boost_version | def query_boost_version(boost_root):
'''
Read in the Boost version from a given boost_root.
'''
boost_version = None
if os.path.exists(os.path.join(boost_root,'Jamroot')):
with codecs.open(os.path.join(boost_root,'Jamroot'), 'r', 'utf-8') as f:
for line in f.readlines():
parts = line.split()
if len(parts) >= 5 and parts[1] == 'BOOST_VERSION':
boost_version = parts[3]
break
if not boost_version:
boost_version = 'default'
return boost_version | python | def query_boost_version(boost_root):
'''
Read in the Boost version from a given boost_root.
'''
boost_version = None
if os.path.exists(os.path.join(boost_root,'Jamroot')):
with codecs.open(os.path.join(boost_root,'Jamroot'), 'r', 'utf-8') as f:
for line in f.readlines():
parts = line.split()
if len(parts) >= 5 and parts[1] == 'BOOST_VERSION':
boost_version = parts[3]
break
if not boost_version:
boost_version = 'default'
return boost_version | [
"def",
"query_boost_version",
"(",
"boost_root",
")",
":",
"boost_version",
"=",
"None",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"boost_root",
",",
"'Jamroot'",
")",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"boost_root",
",",
"'Jamroot'",
")",
",",
"'r'",
",",
"'utf-8'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"parts",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"parts",
")",
">=",
"5",
"and",
"parts",
"[",
"1",
"]",
"==",
"'BOOST_VERSION'",
":",
"boost_version",
"=",
"parts",
"[",
"3",
"]",
"break",
"if",
"not",
"boost_version",
":",
"boost_version",
"=",
"'default'",
"return",
"boost_version"
] | Read in the Boost version from a given boost_root. | [
"Read",
"in",
"the",
"Boost",
"version",
"from",
"a",
"given",
"boost_root",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/predef/tools/ci/common.py#L421-L435 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/predef/tools/ci/common.py | utils.git_clone | def git_clone(sub_repo, branch, commit = None, cwd = None, no_submodules = False):
'''
This clone mimicks the way Travis-CI clones a project's repo. So far
Travis-CI is the most limiting in the sense of only fetching partial
history of the repo.
'''
if not cwd:
cwd = cwd = os.getcwd()
root_dir = os.path.join(cwd,'boostorg',sub_repo)
if not os.path.exists(os.path.join(root_dir,'.git')):
utils.check_call("git","clone",
"--depth=1",
"--branch=%s"%(branch),
"https://github.com/boostorg/%s.git"%(sub_repo),
root_dir)
os.chdir(root_dir)
else:
os.chdir(root_dir)
utils.check_call("git","pull",
# "--depth=1", # Can't do depth as we get merge errors.
"--quiet","--no-recurse-submodules")
if commit:
utils.check_call("git","checkout","-qf",commit)
if os.path.exists(os.path.join('.git','modules')):
if sys.platform == 'win32':
utils.check_call('dir',os.path.join('.git','modules'))
else:
utils.check_call('ls','-la',os.path.join('.git','modules'))
if not no_submodules:
utils.check_call("git","submodule","--quiet","update",
"--quiet","--init","--recursive",
)
utils.check_call("git","submodule","--quiet","foreach","git","fetch")
return root_dir | python | def git_clone(sub_repo, branch, commit = None, cwd = None, no_submodules = False):
'''
This clone mimicks the way Travis-CI clones a project's repo. So far
Travis-CI is the most limiting in the sense of only fetching partial
history of the repo.
'''
if not cwd:
cwd = cwd = os.getcwd()
root_dir = os.path.join(cwd,'boostorg',sub_repo)
if not os.path.exists(os.path.join(root_dir,'.git')):
utils.check_call("git","clone",
"--depth=1",
"--branch=%s"%(branch),
"https://github.com/boostorg/%s.git"%(sub_repo),
root_dir)
os.chdir(root_dir)
else:
os.chdir(root_dir)
utils.check_call("git","pull",
# "--depth=1", # Can't do depth as we get merge errors.
"--quiet","--no-recurse-submodules")
if commit:
utils.check_call("git","checkout","-qf",commit)
if os.path.exists(os.path.join('.git','modules')):
if sys.platform == 'win32':
utils.check_call('dir',os.path.join('.git','modules'))
else:
utils.check_call('ls','-la',os.path.join('.git','modules'))
if not no_submodules:
utils.check_call("git","submodule","--quiet","update",
"--quiet","--init","--recursive",
)
utils.check_call("git","submodule","--quiet","foreach","git","fetch")
return root_dir | [
"def",
"git_clone",
"(",
"sub_repo",
",",
"branch",
",",
"commit",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"no_submodules",
"=",
"False",
")",
":",
"if",
"not",
"cwd",
":",
"cwd",
"=",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"root_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cwd",
",",
"'boostorg'",
",",
"sub_repo",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'.git'",
")",
")",
":",
"utils",
".",
"check_call",
"(",
"\"git\"",
",",
"\"clone\"",
",",
"\"--depth=1\"",
",",
"\"--branch=%s\"",
"%",
"(",
"branch",
")",
",",
"\"https://github.com/boostorg/%s.git\"",
"%",
"(",
"sub_repo",
")",
",",
"root_dir",
")",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"else",
":",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"utils",
".",
"check_call",
"(",
"\"git\"",
",",
"\"pull\"",
",",
"# \"--depth=1\", # Can't do depth as we get merge errors.",
"\"--quiet\"",
",",
"\"--no-recurse-submodules\"",
")",
"if",
"commit",
":",
"utils",
".",
"check_call",
"(",
"\"git\"",
",",
"\"checkout\"",
",",
"\"-qf\"",
",",
"commit",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'.git'",
",",
"'modules'",
")",
")",
":",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
":",
"utils",
".",
"check_call",
"(",
"'dir'",
",",
"os",
".",
"path",
".",
"join",
"(",
"'.git'",
",",
"'modules'",
")",
")",
"else",
":",
"utils",
".",
"check_call",
"(",
"'ls'",
",",
"'-la'",
",",
"os",
".",
"path",
".",
"join",
"(",
"'.git'",
",",
"'modules'",
")",
")",
"if",
"not",
"no_submodules",
":",
"utils",
".",
"check_call",
"(",
"\"git\"",
",",
"\"submodule\"",
",",
"\"--quiet\"",
",",
"\"update\"",
",",
"\"--quiet\"",
",",
"\"--init\"",
",",
"\"--recursive\"",
",",
")",
"utils",
".",
"check_call",
"(",
"\"git\"",
",",
"\"submodule\"",
",",
"\"--quiet\"",
",",
"\"foreach\"",
",",
"\"git\"",
",",
"\"fetch\"",
")",
"return",
"root_dir"
] | This clone mimicks the way Travis-CI clones a project's repo. So far
Travis-CI is the most limiting in the sense of only fetching partial
history of the repo. | [
"This",
"clone",
"mimicks",
"the",
"way",
"Travis",
"-",
"CI",
"clones",
"a",
"project",
"s",
"repo",
".",
"So",
"far",
"Travis",
"-",
"CI",
"is",
"the",
"most",
"limiting",
"in",
"the",
"sense",
"of",
"only",
"fetching",
"partial",
"history",
"of",
"the",
"repo",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/predef/tools/ci/common.py#L438-L471 | train |
apple/turicreate | deps/src/boost_1_68_0/libs/predef/tools/ci/common.py | ci_travis.install_toolset | def install_toolset(self, toolset):
'''
Installs specific toolset on CI system.
'''
info = toolset_info[toolset]
if sys.platform.startswith('linux'):
os.chdir(self.work_dir)
if 'ppa' in info:
for ppa in info['ppa']:
utils.check_call(
'sudo','add-apt-repository','--yes',ppa)
if 'deb' in info:
utils.make_file('sources.list',
"deb %s"%(' '.join(info['deb'])),
"deb-src %s"%(' '.join(info['deb'])))
utils.check_call('sudo','bash','-c','cat sources.list >> /etc/apt/sources.list')
if 'apt-key' in info:
for key in info['apt-key']:
utils.check_call('wget',key,'-O','apt.key')
utils.check_call('sudo','apt-key','add','apt.key')
utils.check_call(
'sudo','apt-get','update','-qq')
utils.check_call(
'sudo','apt-get','install','-qq',info['package'])
if 'debugpackage' in info and info['debugpackage']:
utils.check_call(
'sudo','apt-get','install','-qq',info['debugpackage']) | python | def install_toolset(self, toolset):
'''
Installs specific toolset on CI system.
'''
info = toolset_info[toolset]
if sys.platform.startswith('linux'):
os.chdir(self.work_dir)
if 'ppa' in info:
for ppa in info['ppa']:
utils.check_call(
'sudo','add-apt-repository','--yes',ppa)
if 'deb' in info:
utils.make_file('sources.list',
"deb %s"%(' '.join(info['deb'])),
"deb-src %s"%(' '.join(info['deb'])))
utils.check_call('sudo','bash','-c','cat sources.list >> /etc/apt/sources.list')
if 'apt-key' in info:
for key in info['apt-key']:
utils.check_call('wget',key,'-O','apt.key')
utils.check_call('sudo','apt-key','add','apt.key')
utils.check_call(
'sudo','apt-get','update','-qq')
utils.check_call(
'sudo','apt-get','install','-qq',info['package'])
if 'debugpackage' in info and info['debugpackage']:
utils.check_call(
'sudo','apt-get','install','-qq',info['debugpackage']) | [
"def",
"install_toolset",
"(",
"self",
",",
"toolset",
")",
":",
"info",
"=",
"toolset_info",
"[",
"toolset",
"]",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'linux'",
")",
":",
"os",
".",
"chdir",
"(",
"self",
".",
"work_dir",
")",
"if",
"'ppa'",
"in",
"info",
":",
"for",
"ppa",
"in",
"info",
"[",
"'ppa'",
"]",
":",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'add-apt-repository'",
",",
"'--yes'",
",",
"ppa",
")",
"if",
"'deb'",
"in",
"info",
":",
"utils",
".",
"make_file",
"(",
"'sources.list'",
",",
"\"deb %s\"",
"%",
"(",
"' '",
".",
"join",
"(",
"info",
"[",
"'deb'",
"]",
")",
")",
",",
"\"deb-src %s\"",
"%",
"(",
"' '",
".",
"join",
"(",
"info",
"[",
"'deb'",
"]",
")",
")",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'bash'",
",",
"'-c'",
",",
"'cat sources.list >> /etc/apt/sources.list'",
")",
"if",
"'apt-key'",
"in",
"info",
":",
"for",
"key",
"in",
"info",
"[",
"'apt-key'",
"]",
":",
"utils",
".",
"check_call",
"(",
"'wget'",
",",
"key",
",",
"'-O'",
",",
"'apt.key'",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-key'",
",",
"'add'",
",",
"'apt.key'",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-get'",
",",
"'update'",
",",
"'-qq'",
")",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-get'",
",",
"'install'",
",",
"'-qq'",
",",
"info",
"[",
"'package'",
"]",
")",
"if",
"'debugpackage'",
"in",
"info",
"and",
"info",
"[",
"'debugpackage'",
"]",
":",
"utils",
".",
"check_call",
"(",
"'sudo'",
",",
"'apt-get'",
",",
"'install'",
",",
"'-qq'",
",",
"info",
"[",
"'debugpackage'",
"]",
")"
] | Installs specific toolset on CI system. | [
"Installs",
"specific",
"toolset",
"on",
"CI",
"system",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/predef/tools/ci/common.py#L683-L709 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/svm_classifier.py | create | def create(dataset, target, features=None,
penalty=1.0, solver='auto',
feature_rescaling=True,
convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'],
lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'],
max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'],
class_weights = None,
validation_set = 'auto',
verbose=True):
"""
Create a :class:`~turicreate.svm_classifier.SVMClassifier` to predict the class of a binary
target variable based on a model of which side of a hyperplane the example
falls on. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
This loss function for the SVM model is the sum of an L1 mis-classification
loss (multiplied by the 'penalty' term) and a l2-norm on the weight vectors.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in alphabetical order of the variable
values. For example, a target variable with 'cat' and 'dog' as possible
values is mapped to 0 and 1 respectively with 0 being the base class
and 1 being the reference class.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert them to array in
case all entries in the list are of numeric types and separate them
out into different columns if they are of mixed type.
penalty : float, optional
Penalty term on the mis-classification loss of the model. The larger
this weight, the more the model coefficients shrink toward 0. The
larger the penalty, the lower is the emphasis placed on misclassified
examples, and the classifier would spend more time maximizing the
margin for correctly classified examples. The default value is 1.0;
this parameter must be set to a value of at least 1e-10.
solver : string, optional
Name of the solver to be used to solve the problem. See the
references for more detail on each solver. Available solvers are:
- *auto (default)*: automatically chooses the best solver (from the ones
listed below) for the data and model parameters.
- *lbfgs*: lLimited memory BFGS (``lbfgs``) is a robust solver for wide
datasets(i.e datasets with many coefficients).
The solvers are all automatically tuned and the default options should
function well. See the solver options guide for setting additional
parameters for each of the solvers.
feature_rescaling : bool, default = true
Feature rescaling is an important pre-processing step that ensures
that all features are on the same scale. An l2-norm rescaling is
performed to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that
are used to represent them. The coefficients are returned in original
scale of the problem.
convergence_threshold :
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
lbfgs_memory_level : int, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level`` can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If set to `None`, all classes are supposed to have weight one. The
`auto` mode set the class weight to be inversely proportional to number of
examples in the training data with the given class.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : SVMClassifier
A trained model of type
:class:`~turicreate.svm_classifier.SVMClassifier`.
See Also
--------
SVMClassifier
Notes
-----
- Categorical variables are encoded by creating dummy variables. For
a variable with :math:`K` categories, the encoding creates :math:`K-1`
dummy variables, while the first category encountered in the data is used
as the baseline.
- For prediction and evaluation of SVM models with sparse dictionary
inputs, new keys/columns that were not seen during training are silently
ignored.
- The penalty parameter is analogous to the 'C' term in the C-SVM. See the
reference on training SVMs for more details.
- Any 'None' values in the data will result in an error being thrown.
- A constant term of '1' is automatically added for the model intercept to
model the bias term.
- Note that the hinge loss is approximated by the scaled logistic loss
function. (See user guide for details)
References
----------
- `Wikipedia - Support Vector Machines
<http://en.wikipedia.org/wiki/svm>`_
- Zhang et al. - Modified Logistic Regression: An Approximation to
SVM and its Applications in Large-Scale Text Categorization (ICML 2003)
Examples
--------
Given an :class:`~turicreate.SFrame` ``sf``, a list of feature columns
[``feature_1`` ... ``feature_K``], and a target column ``target`` with 0 and
1 values, create a
:class:`~turicreate.svm.SVMClassifier` as follows:
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.svm_classifier.create(data, 'is_expensive')
"""
# Regression model names.
model_name = "classifier_svm"
solver = solver.lower()
model = _sl.create(dataset, target, model_name, features=features,
validation_set = validation_set, verbose = verbose,
penalty = penalty,
feature_rescaling = feature_rescaling,
convergence_threshold = convergence_threshold,
lbfgs_memory_level = lbfgs_memory_level,
max_iterations = max_iterations,
class_weights = class_weights)
return SVMClassifier(model.__proxy__) | python | def create(dataset, target, features=None,
penalty=1.0, solver='auto',
feature_rescaling=True,
convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'],
lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'],
max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'],
class_weights = None,
validation_set = 'auto',
verbose=True):
"""
Create a :class:`~turicreate.svm_classifier.SVMClassifier` to predict the class of a binary
target variable based on a model of which side of a hyperplane the example
falls on. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
This loss function for the SVM model is the sum of an L1 mis-classification
loss (multiplied by the 'penalty' term) and a l2-norm on the weight vectors.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in alphabetical order of the variable
values. For example, a target variable with 'cat' and 'dog' as possible
values is mapped to 0 and 1 respectively with 0 being the base class
and 1 being the reference class.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert them to array in
case all entries in the list are of numeric types and separate them
out into different columns if they are of mixed type.
penalty : float, optional
Penalty term on the mis-classification loss of the model. The larger
this weight, the more the model coefficients shrink toward 0. The
larger the penalty, the lower is the emphasis placed on misclassified
examples, and the classifier would spend more time maximizing the
margin for correctly classified examples. The default value is 1.0;
this parameter must be set to a value of at least 1e-10.
solver : string, optional
Name of the solver to be used to solve the problem. See the
references for more detail on each solver. Available solvers are:
- *auto (default)*: automatically chooses the best solver (from the ones
listed below) for the data and model parameters.
- *lbfgs*: lLimited memory BFGS (``lbfgs``) is a robust solver for wide
datasets(i.e datasets with many coefficients).
The solvers are all automatically tuned and the default options should
function well. See the solver options guide for setting additional
parameters for each of the solvers.
feature_rescaling : bool, default = true
Feature rescaling is an important pre-processing step that ensures
that all features are on the same scale. An l2-norm rescaling is
performed to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that
are used to represent them. The coefficients are returned in original
scale of the problem.
convergence_threshold :
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
lbfgs_memory_level : int, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level`` can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If set to `None`, all classes are supposed to have weight one. The
`auto` mode set the class weight to be inversely proportional to number of
examples in the training data with the given class.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : SVMClassifier
A trained model of type
:class:`~turicreate.svm_classifier.SVMClassifier`.
See Also
--------
SVMClassifier
Notes
-----
- Categorical variables are encoded by creating dummy variables. For
a variable with :math:`K` categories, the encoding creates :math:`K-1`
dummy variables, while the first category encountered in the data is used
as the baseline.
- For prediction and evaluation of SVM models with sparse dictionary
inputs, new keys/columns that were not seen during training are silently
ignored.
- The penalty parameter is analogous to the 'C' term in the C-SVM. See the
reference on training SVMs for more details.
- Any 'None' values in the data will result in an error being thrown.
- A constant term of '1' is automatically added for the model intercept to
model the bias term.
- Note that the hinge loss is approximated by the scaled logistic loss
function. (See user guide for details)
References
----------
- `Wikipedia - Support Vector Machines
<http://en.wikipedia.org/wiki/svm>`_
- Zhang et al. - Modified Logistic Regression: An Approximation to
SVM and its Applications in Large-Scale Text Categorization (ICML 2003)
Examples
--------
Given an :class:`~turicreate.SFrame` ``sf``, a list of feature columns
[``feature_1`` ... ``feature_K``], and a target column ``target`` with 0 and
1 values, create a
:class:`~turicreate.svm.SVMClassifier` as follows:
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.svm_classifier.create(data, 'is_expensive')
"""
# Regression model names.
model_name = "classifier_svm"
solver = solver.lower()
model = _sl.create(dataset, target, model_name, features=features,
validation_set = validation_set, verbose = verbose,
penalty = penalty,
feature_rescaling = feature_rescaling,
convergence_threshold = convergence_threshold,
lbfgs_memory_level = lbfgs_memory_level,
max_iterations = max_iterations,
class_weights = class_weights)
return SVMClassifier(model.__proxy__) | [
"def",
"create",
"(",
"dataset",
",",
"target",
",",
"features",
"=",
"None",
",",
"penalty",
"=",
"1.0",
",",
"solver",
"=",
"'auto'",
",",
"feature_rescaling",
"=",
"True",
",",
"convergence_threshold",
"=",
"_DEFAULT_SOLVER_OPTIONS",
"[",
"'convergence_threshold'",
"]",
",",
"lbfgs_memory_level",
"=",
"_DEFAULT_SOLVER_OPTIONS",
"[",
"'lbfgs_memory_level'",
"]",
",",
"max_iterations",
"=",
"_DEFAULT_SOLVER_OPTIONS",
"[",
"'max_iterations'",
"]",
",",
"class_weights",
"=",
"None",
",",
"validation_set",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
")",
":",
"# Regression model names.",
"model_name",
"=",
"\"classifier_svm\"",
"solver",
"=",
"solver",
".",
"lower",
"(",
")",
"model",
"=",
"_sl",
".",
"create",
"(",
"dataset",
",",
"target",
",",
"model_name",
",",
"features",
"=",
"features",
",",
"validation_set",
"=",
"validation_set",
",",
"verbose",
"=",
"verbose",
",",
"penalty",
"=",
"penalty",
",",
"feature_rescaling",
"=",
"feature_rescaling",
",",
"convergence_threshold",
"=",
"convergence_threshold",
",",
"lbfgs_memory_level",
"=",
"lbfgs_memory_level",
",",
"max_iterations",
"=",
"max_iterations",
",",
"class_weights",
"=",
"class_weights",
")",
"return",
"SVMClassifier",
"(",
"model",
".",
"__proxy__",
")"
] | Create a :class:`~turicreate.svm_classifier.SVMClassifier` to predict the class of a binary
target variable based on a model of which side of a hyperplane the example
falls on. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
This loss function for the SVM model is the sum of an L1 mis-classification
loss (multiplied by the 'penalty' term) and a l2-norm on the weight vectors.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in alphabetical order of the variable
values. For example, a target variable with 'cat' and 'dog' as possible
values is mapped to 0 and 1 respectively with 0 being the base class
and 1 being the reference class.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert them to array in
case all entries in the list are of numeric types and separate them
out into different columns if they are of mixed type.
penalty : float, optional
Penalty term on the mis-classification loss of the model. The larger
this weight, the more the model coefficients shrink toward 0. The
larger the penalty, the lower is the emphasis placed on misclassified
examples, and the classifier would spend more time maximizing the
margin for correctly classified examples. The default value is 1.0;
this parameter must be set to a value of at least 1e-10.
solver : string, optional
Name of the solver to be used to solve the problem. See the
references for more detail on each solver. Available solvers are:
- *auto (default)*: automatically chooses the best solver (from the ones
listed below) for the data and model parameters.
- *lbfgs*: lLimited memory BFGS (``lbfgs``) is a robust solver for wide
datasets(i.e datasets with many coefficients).
The solvers are all automatically tuned and the default options should
function well. See the solver options guide for setting additional
parameters for each of the solvers.
feature_rescaling : bool, default = true
Feature rescaling is an important pre-processing step that ensures
that all features are on the same scale. An l2-norm rescaling is
performed to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that
are used to represent them. The coefficients are returned in original
scale of the problem.
convergence_threshold :
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
lbfgs_memory_level : int, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level`` can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If set to `None`, all classes are supposed to have weight one. The
`auto` mode set the class weight to be inversely proportional to number of
examples in the training data with the given class.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : SVMClassifier
A trained model of type
:class:`~turicreate.svm_classifier.SVMClassifier`.
See Also
--------
SVMClassifier
Notes
-----
- Categorical variables are encoded by creating dummy variables. For
a variable with :math:`K` categories, the encoding creates :math:`K-1`
dummy variables, while the first category encountered in the data is used
as the baseline.
- For prediction and evaluation of SVM models with sparse dictionary
inputs, new keys/columns that were not seen during training are silently
ignored.
- The penalty parameter is analogous to the 'C' term in the C-SVM. See the
reference on training SVMs for more details.
- Any 'None' values in the data will result in an error being thrown.
- A constant term of '1' is automatically added for the model intercept to
model the bias term.
- Note that the hinge loss is approximated by the scaled logistic loss
function. (See user guide for details)
References
----------
- `Wikipedia - Support Vector Machines
<http://en.wikipedia.org/wiki/svm>`_
- Zhang et al. - Modified Logistic Regression: An Approximation to
SVM and its Applications in Large-Scale Text Categorization (ICML 2003)
Examples
--------
Given an :class:`~turicreate.SFrame` ``sf``, a list of feature columns
[``feature_1`` ... ``feature_K``], and a target column ``target`` with 0 and
1 values, create a
:class:`~turicreate.svm.SVMClassifier` as follows:
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.svm_classifier.create(data, 'is_expensive') | [
"Create",
"a",
":",
"class",
":",
"~turicreate",
".",
"svm_classifier",
".",
"SVMClassifier",
"to",
"predict",
"the",
"class",
"of",
"a",
"binary",
"target",
"variable",
"based",
"on",
"a",
"model",
"of",
"which",
"side",
"of",
"a",
"hyperplane",
"the",
"example",
"falls",
"on",
".",
"In",
"addition",
"to",
"standard",
"numeric",
"and",
"categorical",
"types",
"features",
"can",
"also",
"be",
"extracted",
"automatically",
"from",
"list",
"-",
"or",
"dictionary",
"-",
"type",
"SFrame",
"columns",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/svm_classifier.py#L27-L226 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/svm_classifier.py | SVMClassifier.classify | def classify(self, dataset, missing_value_action='auto'):
"""
Return a classification, for each example in the ``dataset``, using the
trained SVM model. The output SFrame contains predictions
as class labels (0 or 1) associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.svm_classifier.create(data, target='is_expensive',
features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
"""
return super(SVMClassifier, self).classify(dataset, missing_value_action=missing_value_action) | python | def classify(self, dataset, missing_value_action='auto'):
"""
Return a classification, for each example in the ``dataset``, using the
trained SVM model. The output SFrame contains predictions
as class labels (0 or 1) associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.svm_classifier.create(data, target='is_expensive',
features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
"""
return super(SVMClassifier, self).classify(dataset, missing_value_action=missing_value_action) | [
"def",
"classify",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"return",
"super",
"(",
"SVMClassifier",
",",
"self",
")",
".",
"classify",
"(",
"dataset",
",",
"missing_value_action",
"=",
"missing_value_action",
")"
] | Return a classification, for each example in the ``dataset``, using the
trained SVM model. The output SFrame contains predictions
as class labels (0 or 1) associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.svm_classifier.create(data, target='is_expensive',
features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data) | [
"Return",
"a",
"classification",
"for",
"each",
"example",
"in",
"the",
"dataset",
"using",
"the",
"trained",
"SVM",
"model",
".",
"The",
"output",
"SFrame",
"contains",
"predictions",
"as",
"class",
"labels",
"(",
"0",
"or",
"1",
")",
"associated",
"with",
"the",
"the",
"example",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/svm_classifier.py#L521-L566 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras2_converter.py | _get_layer_converter_fn | def _get_layer_converter_fn(layer, add_custom_layers = False):
"""Get the right converter function for Keras
"""
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
convert_func = _KERAS_LAYER_REGISTRY[layer_type]
if convert_func is _layers2.convert_activation:
act_name = _layers2._get_activation_name_from_keras_layer(layer)
if act_name == 'CUSTOM':
return None
return convert_func
elif add_custom_layers:
return None
else:
raise TypeError("Keras layer of type %s is not supported." % type(layer)) | python | def _get_layer_converter_fn(layer, add_custom_layers = False):
"""Get the right converter function for Keras
"""
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
convert_func = _KERAS_LAYER_REGISTRY[layer_type]
if convert_func is _layers2.convert_activation:
act_name = _layers2._get_activation_name_from_keras_layer(layer)
if act_name == 'CUSTOM':
return None
return convert_func
elif add_custom_layers:
return None
else:
raise TypeError("Keras layer of type %s is not supported." % type(layer)) | [
"def",
"_get_layer_converter_fn",
"(",
"layer",
",",
"add_custom_layers",
"=",
"False",
")",
":",
"layer_type",
"=",
"type",
"(",
"layer",
")",
"if",
"layer_type",
"in",
"_KERAS_LAYER_REGISTRY",
":",
"convert_func",
"=",
"_KERAS_LAYER_REGISTRY",
"[",
"layer_type",
"]",
"if",
"convert_func",
"is",
"_layers2",
".",
"convert_activation",
":",
"act_name",
"=",
"_layers2",
".",
"_get_activation_name_from_keras_layer",
"(",
"layer",
")",
"if",
"act_name",
"==",
"'CUSTOM'",
":",
"return",
"None",
"return",
"convert_func",
"elif",
"add_custom_layers",
":",
"return",
"None",
"else",
":",
"raise",
"TypeError",
"(",
"\"Keras layer of type %s is not supported.\"",
"%",
"type",
"(",
"layer",
")",
")"
] | Get the right converter function for Keras | [
"Get",
"the",
"right",
"converter",
"function",
"for",
"Keras"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras2_converter.py#L117-L131 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras2_converter.py | _load_keras_model | def _load_keras_model(model_network_path, model_weight_path, custom_objects=None):
"""Load a keras model from disk
Parameters
----------
model_network_path: str
Path where the model network path is (json file)
model_weight_path: str
Path where the model network weights are (hd5 file)
custom_objects:
A dictionary of layers or other custom classes
or functions used by the model
Returns
-------
model: A keras model
"""
from keras.models import model_from_json
import json
# Load the model network
json_file = open(model_network_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
if not custom_objects:
custom_objects = {}
# Load the model weights
loaded_model = model_from_json(loaded_model_json, custom_objects=custom_objects)
loaded_model.load_weights(model_weight_path)
return loaded_model | python | def _load_keras_model(model_network_path, model_weight_path, custom_objects=None):
"""Load a keras model from disk
Parameters
----------
model_network_path: str
Path where the model network path is (json file)
model_weight_path: str
Path where the model network weights are (hd5 file)
custom_objects:
A dictionary of layers or other custom classes
or functions used by the model
Returns
-------
model: A keras model
"""
from keras.models import model_from_json
import json
# Load the model network
json_file = open(model_network_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
if not custom_objects:
custom_objects = {}
# Load the model weights
loaded_model = model_from_json(loaded_model_json, custom_objects=custom_objects)
loaded_model.load_weights(model_weight_path)
return loaded_model | [
"def",
"_load_keras_model",
"(",
"model_network_path",
",",
"model_weight_path",
",",
"custom_objects",
"=",
"None",
")",
":",
"from",
"keras",
".",
"models",
"import",
"model_from_json",
"import",
"json",
"# Load the model network",
"json_file",
"=",
"open",
"(",
"model_network_path",
",",
"'r'",
")",
"loaded_model_json",
"=",
"json_file",
".",
"read",
"(",
")",
"json_file",
".",
"close",
"(",
")",
"if",
"not",
"custom_objects",
":",
"custom_objects",
"=",
"{",
"}",
"# Load the model weights",
"loaded_model",
"=",
"model_from_json",
"(",
"loaded_model_json",
",",
"custom_objects",
"=",
"custom_objects",
")",
"loaded_model",
".",
"load_weights",
"(",
"model_weight_path",
")",
"return",
"loaded_model"
] | Load a keras model from disk
Parameters
----------
model_network_path: str
Path where the model network path is (json file)
model_weight_path: str
Path where the model network weights are (hd5 file)
custom_objects:
A dictionary of layers or other custom classes
or functions used by the model
Returns
-------
model: A keras model | [
"Load",
"a",
"keras",
"model",
"from",
"disk"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_keras2_converter.py#L134-L168 | train |
apple/turicreate | src/unity/python/turicreate/visualization/_plot.py | Plot.show | def show(self):
"""
A method for displaying the Plot object
Notes
-----
- The plot will render either inline in a Jupyter Notebook, or in a
native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Examples
--------
Suppose 'plt' is an Plot Object
We can view it using:
>>> plt.show()
"""
global _target
display = False
try:
if _target == 'auto' and \
get_ipython().__class__.__name__ == "ZMQInteractiveShell":
self._repr_javascript_()
display = True
except NameError:
pass
finally:
if not display:
if _sys.platform != 'darwin' and _sys.platform != 'linux2' and _sys.platform != 'linux':
raise NotImplementedError('Visualization is currently supported only on macOS and Linux.')
path_to_client = _get_client_app_path()
# TODO: allow autodetection of light/dark mode.
# Disabled for now, since the GUI side needs some work (ie. background color).
plot_variation = 0x10 # force light mode
self.__proxy__.call_function('show', {'path_to_client': path_to_client, 'variation': plot_variation}) | python | def show(self):
"""
A method for displaying the Plot object
Notes
-----
- The plot will render either inline in a Jupyter Notebook, or in a
native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Examples
--------
Suppose 'plt' is an Plot Object
We can view it using:
>>> plt.show()
"""
global _target
display = False
try:
if _target == 'auto' and \
get_ipython().__class__.__name__ == "ZMQInteractiveShell":
self._repr_javascript_()
display = True
except NameError:
pass
finally:
if not display:
if _sys.platform != 'darwin' and _sys.platform != 'linux2' and _sys.platform != 'linux':
raise NotImplementedError('Visualization is currently supported only on macOS and Linux.')
path_to_client = _get_client_app_path()
# TODO: allow autodetection of light/dark mode.
# Disabled for now, since the GUI side needs some work (ie. background color).
plot_variation = 0x10 # force light mode
self.__proxy__.call_function('show', {'path_to_client': path_to_client, 'variation': plot_variation}) | [
"def",
"show",
"(",
"self",
")",
":",
"global",
"_target",
"display",
"=",
"False",
"try",
":",
"if",
"_target",
"==",
"'auto'",
"and",
"get_ipython",
"(",
")",
".",
"__class__",
".",
"__name__",
"==",
"\"ZMQInteractiveShell\"",
":",
"self",
".",
"_repr_javascript_",
"(",
")",
"display",
"=",
"True",
"except",
"NameError",
":",
"pass",
"finally",
":",
"if",
"not",
"display",
":",
"if",
"_sys",
".",
"platform",
"!=",
"'darwin'",
"and",
"_sys",
".",
"platform",
"!=",
"'linux2'",
"and",
"_sys",
".",
"platform",
"!=",
"'linux'",
":",
"raise",
"NotImplementedError",
"(",
"'Visualization is currently supported only on macOS and Linux.'",
")",
"path_to_client",
"=",
"_get_client_app_path",
"(",
")",
"# TODO: allow autodetection of light/dark mode.",
"# Disabled for now, since the GUI side needs some work (ie. background color).",
"plot_variation",
"=",
"0x10",
"# force light mode",
"self",
".",
"__proxy__",
".",
"call_function",
"(",
"'show'",
",",
"{",
"'path_to_client'",
":",
"path_to_client",
",",
"'variation'",
":",
"plot_variation",
"}",
")"
] | A method for displaying the Plot object
Notes
-----
- The plot will render either inline in a Jupyter Notebook, or in a
native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Examples
--------
Suppose 'plt' is an Plot Object
We can view it using:
>>> plt.show() | [
"A",
"method",
"for",
"displaying",
"the",
"Plot",
"object"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/visualization/_plot.py#L104-L142 | train |
apple/turicreate | src/unity/python/turicreate/visualization/_plot.py | Plot.save | def save(self, filepath):
"""
A method for saving the Plot object in a vega representation
Parameters
----------
filepath: string
The destination filepath where the plot object must be saved as.
The extension of this filepath determines what format the plot will
be saved as. Currently supported formats are JSON, PNG, and SVG.
Examples
--------
Suppose 'plt' is an Plot Object
We can save it using:
>>> plt.save('vega_spec.json')
We can also save the vega representation of the plot without data:
>>> plt.save('vega_spec.json', False)
We can save the plot as a PNG/SVG using:
>>> plt.save('test.png')
>>> plt.save('test.svg')
"""
if type(filepath) != str:
raise ValueError("filepath provided is not a string")
if filepath.endswith(".json"):
# save as vega json
spec = self.get_vega(include_data = True)
with open(filepath, 'w') as fp:
_json.dump(spec, fp)
elif filepath.endswith(".png") or filepath.endswith(".svg"):
# save as png/svg, but json first
spec = self.get_vega(include_data = True)
EXTENSION_START_INDEX = -3
extension = filepath[EXTENSION_START_INDEX:]
temp_file_tuple = _mkstemp()
temp_file_path = temp_file_tuple[1]
with open(temp_file_path, 'w') as fp:
_json.dump(spec, fp)
dirname = _os.path.dirname(__file__)
relative_path_to_vg2png_vg2svg = "../vg2" + extension
absolute_path_to_vg2png_vg2svg = _os.path.join(dirname,
relative_path_to_vg2png_vg2svg)
# try node vg2[png|svg] json_filepath out_filepath
(exitcode, stdout, stderr) = _run_cmdline("node " +
absolute_path_to_vg2png_vg2svg + " "
+ temp_file_path + " " + filepath)
if exitcode == _NODE_NOT_FOUND_ERROR_CODE:
# user doesn't have node installed
raise RuntimeError("Node.js not found. Saving as PNG and SVG" +
" requires Node.js, please download and install Node.js " +
"from here and try again: https://nodejs.org/en/download/")
elif exitcode == _CANVAS_PREBUILT_NOT_FOUND_ERROR:
# try to see if canvas-prebuilt is globally installed
# if it is, then link it
# if not, tell the user to install it
(is_installed_exitcode,
is_installed_stdout,
is_installed_stderr) = _run_cmdline(
"npm ls -g -json | grep canvas-prebuilt")
if is_installed_exitcode == _SUCCESS:
# npm link canvas-prebuilt
link_exitcode, link_stdout, link_stderr = _run_cmdline(
"npm link canvas-prebuilt")
if link_exitcode == _PERMISSION_DENIED_ERROR_CODE:
# They don't have permission, tell them.
raise RuntimeError(link_stderr + '\n\n' +
"`npm link canvas-prebuilt` failed, " +
"Permission Denied.")
elif link_exitcode == _SUCCESS:
# canvas-prebuilt link is now successful, so run the
# node vg2[png|svg] json_filepath out_filepath
# command again.
(exitcode, stdout, stderr) = _run_cmdline("node " +
absolute_path_to_vg2png_vg2svg + " "
+ temp_file_path + " " + filepath)
if exitcode != _SUCCESS:
# something else that we have not identified yet
# happened.
raise RuntimeError(stderr)
else:
raise RuntimeError(link_stderr)
else:
raise RuntimeError("canvas-prebuilt not found. " +
"Saving as PNG and SVG requires canvas-prebuilt, " +
"please download and install canvas-prebuilt by " +
"running this command, and try again: " +
"`npm install -g canvas-prebuilt`")
elif exitcode == _SUCCESS:
pass
else:
raise RuntimeError(stderr)
# delete temp file that user didn't ask for
_run_cmdline("rm " + temp_file_path)
else:
raise NotImplementedError("filename must end in" +
" .json, .svg, or .png") | python | def save(self, filepath):
"""
A method for saving the Plot object in a vega representation
Parameters
----------
filepath: string
The destination filepath where the plot object must be saved as.
The extension of this filepath determines what format the plot will
be saved as. Currently supported formats are JSON, PNG, and SVG.
Examples
--------
Suppose 'plt' is an Plot Object
We can save it using:
>>> plt.save('vega_spec.json')
We can also save the vega representation of the plot without data:
>>> plt.save('vega_spec.json', False)
We can save the plot as a PNG/SVG using:
>>> plt.save('test.png')
>>> plt.save('test.svg')
"""
if type(filepath) != str:
raise ValueError("filepath provided is not a string")
if filepath.endswith(".json"):
# save as vega json
spec = self.get_vega(include_data = True)
with open(filepath, 'w') as fp:
_json.dump(spec, fp)
elif filepath.endswith(".png") or filepath.endswith(".svg"):
# save as png/svg, but json first
spec = self.get_vega(include_data = True)
EXTENSION_START_INDEX = -3
extension = filepath[EXTENSION_START_INDEX:]
temp_file_tuple = _mkstemp()
temp_file_path = temp_file_tuple[1]
with open(temp_file_path, 'w') as fp:
_json.dump(spec, fp)
dirname = _os.path.dirname(__file__)
relative_path_to_vg2png_vg2svg = "../vg2" + extension
absolute_path_to_vg2png_vg2svg = _os.path.join(dirname,
relative_path_to_vg2png_vg2svg)
# try node vg2[png|svg] json_filepath out_filepath
(exitcode, stdout, stderr) = _run_cmdline("node " +
absolute_path_to_vg2png_vg2svg + " "
+ temp_file_path + " " + filepath)
if exitcode == _NODE_NOT_FOUND_ERROR_CODE:
# user doesn't have node installed
raise RuntimeError("Node.js not found. Saving as PNG and SVG" +
" requires Node.js, please download and install Node.js " +
"from here and try again: https://nodejs.org/en/download/")
elif exitcode == _CANVAS_PREBUILT_NOT_FOUND_ERROR:
# try to see if canvas-prebuilt is globally installed
# if it is, then link it
# if not, tell the user to install it
(is_installed_exitcode,
is_installed_stdout,
is_installed_stderr) = _run_cmdline(
"npm ls -g -json | grep canvas-prebuilt")
if is_installed_exitcode == _SUCCESS:
# npm link canvas-prebuilt
link_exitcode, link_stdout, link_stderr = _run_cmdline(
"npm link canvas-prebuilt")
if link_exitcode == _PERMISSION_DENIED_ERROR_CODE:
# They don't have permission, tell them.
raise RuntimeError(link_stderr + '\n\n' +
"`npm link canvas-prebuilt` failed, " +
"Permission Denied.")
elif link_exitcode == _SUCCESS:
# canvas-prebuilt link is now successful, so run the
# node vg2[png|svg] json_filepath out_filepath
# command again.
(exitcode, stdout, stderr) = _run_cmdline("node " +
absolute_path_to_vg2png_vg2svg + " "
+ temp_file_path + " " + filepath)
if exitcode != _SUCCESS:
# something else that we have not identified yet
# happened.
raise RuntimeError(stderr)
else:
raise RuntimeError(link_stderr)
else:
raise RuntimeError("canvas-prebuilt not found. " +
"Saving as PNG and SVG requires canvas-prebuilt, " +
"please download and install canvas-prebuilt by " +
"running this command, and try again: " +
"`npm install -g canvas-prebuilt`")
elif exitcode == _SUCCESS:
pass
else:
raise RuntimeError(stderr)
# delete temp file that user didn't ask for
_run_cmdline("rm " + temp_file_path)
else:
raise NotImplementedError("filename must end in" +
" .json, .svg, or .png") | [
"def",
"save",
"(",
"self",
",",
"filepath",
")",
":",
"if",
"type",
"(",
"filepath",
")",
"!=",
"str",
":",
"raise",
"ValueError",
"(",
"\"filepath provided is not a string\"",
")",
"if",
"filepath",
".",
"endswith",
"(",
"\".json\"",
")",
":",
"# save as vega json",
"spec",
"=",
"self",
".",
"get_vega",
"(",
"include_data",
"=",
"True",
")",
"with",
"open",
"(",
"filepath",
",",
"'w'",
")",
"as",
"fp",
":",
"_json",
".",
"dump",
"(",
"spec",
",",
"fp",
")",
"elif",
"filepath",
".",
"endswith",
"(",
"\".png\"",
")",
"or",
"filepath",
".",
"endswith",
"(",
"\".svg\"",
")",
":",
"# save as png/svg, but json first",
"spec",
"=",
"self",
".",
"get_vega",
"(",
"include_data",
"=",
"True",
")",
"EXTENSION_START_INDEX",
"=",
"-",
"3",
"extension",
"=",
"filepath",
"[",
"EXTENSION_START_INDEX",
":",
"]",
"temp_file_tuple",
"=",
"_mkstemp",
"(",
")",
"temp_file_path",
"=",
"temp_file_tuple",
"[",
"1",
"]",
"with",
"open",
"(",
"temp_file_path",
",",
"'w'",
")",
"as",
"fp",
":",
"_json",
".",
"dump",
"(",
"spec",
",",
"fp",
")",
"dirname",
"=",
"_os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"relative_path_to_vg2png_vg2svg",
"=",
"\"../vg2\"",
"+",
"extension",
"absolute_path_to_vg2png_vg2svg",
"=",
"_os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"relative_path_to_vg2png_vg2svg",
")",
"# try node vg2[png|svg] json_filepath out_filepath",
"(",
"exitcode",
",",
"stdout",
",",
"stderr",
")",
"=",
"_run_cmdline",
"(",
"\"node \"",
"+",
"absolute_path_to_vg2png_vg2svg",
"+",
"\" \"",
"+",
"temp_file_path",
"+",
"\" \"",
"+",
"filepath",
")",
"if",
"exitcode",
"==",
"_NODE_NOT_FOUND_ERROR_CODE",
":",
"# user doesn't have node installed",
"raise",
"RuntimeError",
"(",
"\"Node.js not found. Saving as PNG and SVG\"",
"+",
"\" requires Node.js, please download and install Node.js \"",
"+",
"\"from here and try again: https://nodejs.org/en/download/\"",
")",
"elif",
"exitcode",
"==",
"_CANVAS_PREBUILT_NOT_FOUND_ERROR",
":",
"# try to see if canvas-prebuilt is globally installed",
"# if it is, then link it",
"# if not, tell the user to install it",
"(",
"is_installed_exitcode",
",",
"is_installed_stdout",
",",
"is_installed_stderr",
")",
"=",
"_run_cmdline",
"(",
"\"npm ls -g -json | grep canvas-prebuilt\"",
")",
"if",
"is_installed_exitcode",
"==",
"_SUCCESS",
":",
"# npm link canvas-prebuilt ",
"link_exitcode",
",",
"link_stdout",
",",
"link_stderr",
"=",
"_run_cmdline",
"(",
"\"npm link canvas-prebuilt\"",
")",
"if",
"link_exitcode",
"==",
"_PERMISSION_DENIED_ERROR_CODE",
":",
"# They don't have permission, tell them.",
"raise",
"RuntimeError",
"(",
"link_stderr",
"+",
"'\\n\\n'",
"+",
"\"`npm link canvas-prebuilt` failed, \"",
"+",
"\"Permission Denied.\"",
")",
"elif",
"link_exitcode",
"==",
"_SUCCESS",
":",
"# canvas-prebuilt link is now successful, so run the ",
"# node vg2[png|svg] json_filepath out_filepath",
"# command again.",
"(",
"exitcode",
",",
"stdout",
",",
"stderr",
")",
"=",
"_run_cmdline",
"(",
"\"node \"",
"+",
"absolute_path_to_vg2png_vg2svg",
"+",
"\" \"",
"+",
"temp_file_path",
"+",
"\" \"",
"+",
"filepath",
")",
"if",
"exitcode",
"!=",
"_SUCCESS",
":",
"# something else that we have not identified yet",
"# happened.",
"raise",
"RuntimeError",
"(",
"stderr",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"link_stderr",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"canvas-prebuilt not found. \"",
"+",
"\"Saving as PNG and SVG requires canvas-prebuilt, \"",
"+",
"\"please download and install canvas-prebuilt by \"",
"+",
"\"running this command, and try again: \"",
"+",
"\"`npm install -g canvas-prebuilt`\"",
")",
"elif",
"exitcode",
"==",
"_SUCCESS",
":",
"pass",
"else",
":",
"raise",
"RuntimeError",
"(",
"stderr",
")",
"# delete temp file that user didn't ask for",
"_run_cmdline",
"(",
"\"rm \"",
"+",
"temp_file_path",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"filename must end in\"",
"+",
"\" .json, .svg, or .png\"",
")"
] | A method for saving the Plot object in a vega representation
Parameters
----------
filepath: string
The destination filepath where the plot object must be saved as.
The extension of this filepath determines what format the plot will
be saved as. Currently supported formats are JSON, PNG, and SVG.
Examples
--------
Suppose 'plt' is an Plot Object
We can save it using:
>>> plt.save('vega_spec.json')
We can also save the vega representation of the plot without data:
>>> plt.save('vega_spec.json', False)
We can save the plot as a PNG/SVG using:
>>> plt.save('test.png')
>>> plt.save('test.svg') | [
"A",
"method",
"for",
"saving",
"the",
"Plot",
"object",
"in",
"a",
"vega",
"representation"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/visualization/_plot.py#L144-L248 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/tracker/rabit_demo.py | mthread_submit | def mthread_submit(nslave, worker_args, worker_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nslave number of slave process to start up
args arguments to launch each job
this usually includes the parameters of master_uri and parameters passed into submit
"""
procs = {}
for i in range(nslave):
procs[i] = Thread(target = exec_cmd, args = (args.command + worker_args, i, worker_envs))
procs[i].daemon = True
procs[i].start()
for i in range(nslave):
procs[i].join() | python | def mthread_submit(nslave, worker_args, worker_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nslave number of slave process to start up
args arguments to launch each job
this usually includes the parameters of master_uri and parameters passed into submit
"""
procs = {}
for i in range(nslave):
procs[i] = Thread(target = exec_cmd, args = (args.command + worker_args, i, worker_envs))
procs[i].daemon = True
procs[i].start()
for i in range(nslave):
procs[i].join() | [
"def",
"mthread_submit",
"(",
"nslave",
",",
"worker_args",
",",
"worker_envs",
")",
":",
"procs",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"nslave",
")",
":",
"procs",
"[",
"i",
"]",
"=",
"Thread",
"(",
"target",
"=",
"exec_cmd",
",",
"args",
"=",
"(",
"args",
".",
"command",
"+",
"worker_args",
",",
"i",
",",
"worker_envs",
")",
")",
"procs",
"[",
"i",
"]",
".",
"daemon",
"=",
"True",
"procs",
"[",
"i",
"]",
".",
"start",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"nslave",
")",
":",
"procs",
"[",
"i",
"]",
".",
"join",
"(",
")"
] | customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nslave number of slave process to start up
args arguments to launch each job
this usually includes the parameters of master_uri and parameters passed into submit | [
"customized",
"submit",
"script",
"that",
"submit",
"nslave",
"jobs",
"each",
"must",
"contain",
"args",
"as",
"parameter",
"note",
"this",
"can",
"be",
"a",
"lambda",
"function",
"containing",
"additional",
"parameters",
"in",
"input",
"Parameters",
"nslave",
"number",
"of",
"slave",
"process",
"to",
"start",
"up",
"args",
"arguments",
"to",
"launch",
"each",
"job",
"this",
"usually",
"includes",
"the",
"parameters",
"of",
"master_uri",
"and",
"parameters",
"passed",
"into",
"submit"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/tracker/rabit_demo.py#L78-L93 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py | _get_value | def _get_value(scikit_value, mode = 'regressor', scaling = 1.0, n_classes = 2, tree_index = 0):
""" Get the right value from the scikit-tree
"""
# Regression
if mode == 'regressor':
return scikit_value[0] * scaling
# Binary classification
if n_classes == 2:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0][1] * scaling / scikit_value[0].sum()
# boosted tree
else:
value = scikit_value[0][0] * scaling
if value == 0.5:
value = value - 1e-7
# Multiclass classification
else:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0] / scikit_value[0].sum()
# boosted tree
else:
value = {tree_index: scikit_value[0] * scaling}
return value | python | def _get_value(scikit_value, mode = 'regressor', scaling = 1.0, n_classes = 2, tree_index = 0):
""" Get the right value from the scikit-tree
"""
# Regression
if mode == 'regressor':
return scikit_value[0] * scaling
# Binary classification
if n_classes == 2:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0][1] * scaling / scikit_value[0].sum()
# boosted tree
else:
value = scikit_value[0][0] * scaling
if value == 0.5:
value = value - 1e-7
# Multiclass classification
else:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0] / scikit_value[0].sum()
# boosted tree
else:
value = {tree_index: scikit_value[0] * scaling}
return value | [
"def",
"_get_value",
"(",
"scikit_value",
",",
"mode",
"=",
"'regressor'",
",",
"scaling",
"=",
"1.0",
",",
"n_classes",
"=",
"2",
",",
"tree_index",
"=",
"0",
")",
":",
"# Regression",
"if",
"mode",
"==",
"'regressor'",
":",
"return",
"scikit_value",
"[",
"0",
"]",
"*",
"scaling",
"# Binary classification",
"if",
"n_classes",
"==",
"2",
":",
"# Decision tree",
"if",
"len",
"(",
"scikit_value",
"[",
"0",
"]",
")",
"!=",
"1",
":",
"value",
"=",
"scikit_value",
"[",
"0",
"]",
"[",
"1",
"]",
"*",
"scaling",
"/",
"scikit_value",
"[",
"0",
"]",
".",
"sum",
"(",
")",
"# boosted tree",
"else",
":",
"value",
"=",
"scikit_value",
"[",
"0",
"]",
"[",
"0",
"]",
"*",
"scaling",
"if",
"value",
"==",
"0.5",
":",
"value",
"=",
"value",
"-",
"1e-7",
"# Multiclass classification",
"else",
":",
"# Decision tree",
"if",
"len",
"(",
"scikit_value",
"[",
"0",
"]",
")",
"!=",
"1",
":",
"value",
"=",
"scikit_value",
"[",
"0",
"]",
"/",
"scikit_value",
"[",
"0",
"]",
".",
"sum",
"(",
")",
"# boosted tree",
"else",
":",
"value",
"=",
"{",
"tree_index",
":",
"scikit_value",
"[",
"0",
"]",
"*",
"scaling",
"}",
"return",
"value"
] | Get the right value from the scikit-tree | [
"Get",
"the",
"right",
"value",
"from",
"the",
"scikit",
"-",
"tree"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py#L16-L42 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py | _recurse | def _recurse(coreml_tree, scikit_tree, tree_id, node_id, scaling = 1.0, mode = 'regressor',
n_classes = 2, tree_index = 0):
"""Traverse through the tree and append to the tree spec.
"""
if not(HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
## Recursion should not be called on the leaf node.
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
# Add a branch node to the tree
if scikit_tree.children_left[node_id] != _tree.TREE_LEAF:
branch_mode = 'BranchOnValueLessThanEqual'
feature_index = scikit_tree.feature[node_id]
feature_value = scikit_tree.threshold[node_id]
left_child_id = scikit_tree.children_left[node_id]
right_child_id = scikit_tree.children_right[node_id]
# Add a branch node
coreml_tree.add_branch_node(tree_id, node_id, feature_index,
feature_value, branch_mode, left_child_id, right_child_id)
# Now recurse
_recurse(coreml_tree, scikit_tree, tree_id, left_child_id, scaling, mode, n_classes, tree_index)
_recurse(coreml_tree, scikit_tree, tree_id, right_child_id, scaling, mode, n_classes, tree_index)
# Add a leaf node to the tree
else:
# Get the scikit-learn value
if scikit_tree.n_outputs != 1:
raise ValueError('Expected only 1 output in the scikit-learn tree.')
value = _get_value(scikit_tree.value[node_id], mode, scaling, n_classes, tree_index)
coreml_tree.add_leaf_node(tree_id, node_id, value) | python | def _recurse(coreml_tree, scikit_tree, tree_id, node_id, scaling = 1.0, mode = 'regressor',
n_classes = 2, tree_index = 0):
"""Traverse through the tree and append to the tree spec.
"""
if not(HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
## Recursion should not be called on the leaf node.
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
# Add a branch node to the tree
if scikit_tree.children_left[node_id] != _tree.TREE_LEAF:
branch_mode = 'BranchOnValueLessThanEqual'
feature_index = scikit_tree.feature[node_id]
feature_value = scikit_tree.threshold[node_id]
left_child_id = scikit_tree.children_left[node_id]
right_child_id = scikit_tree.children_right[node_id]
# Add a branch node
coreml_tree.add_branch_node(tree_id, node_id, feature_index,
feature_value, branch_mode, left_child_id, right_child_id)
# Now recurse
_recurse(coreml_tree, scikit_tree, tree_id, left_child_id, scaling, mode, n_classes, tree_index)
_recurse(coreml_tree, scikit_tree, tree_id, right_child_id, scaling, mode, n_classes, tree_index)
# Add a leaf node to the tree
else:
# Get the scikit-learn value
if scikit_tree.n_outputs != 1:
raise ValueError('Expected only 1 output in the scikit-learn tree.')
value = _get_value(scikit_tree.value[node_id], mode, scaling, n_classes, tree_index)
coreml_tree.add_leaf_node(tree_id, node_id, value) | [
"def",
"_recurse",
"(",
"coreml_tree",
",",
"scikit_tree",
",",
"tree_id",
",",
"node_id",
",",
"scaling",
"=",
"1.0",
",",
"mode",
"=",
"'regressor'",
",",
"n_classes",
"=",
"2",
",",
"tree_index",
"=",
"0",
")",
":",
"if",
"not",
"(",
"HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"## Recursion should not be called on the leaf node.",
"if",
"node_id",
"==",
"_tree",
".",
"TREE_LEAF",
":",
"raise",
"ValueError",
"(",
"\"Invalid node_id %s\"",
"%",
"_tree",
".",
"TREE_LEAF",
")",
"# Add a branch node to the tree",
"if",
"scikit_tree",
".",
"children_left",
"[",
"node_id",
"]",
"!=",
"_tree",
".",
"TREE_LEAF",
":",
"branch_mode",
"=",
"'BranchOnValueLessThanEqual'",
"feature_index",
"=",
"scikit_tree",
".",
"feature",
"[",
"node_id",
"]",
"feature_value",
"=",
"scikit_tree",
".",
"threshold",
"[",
"node_id",
"]",
"left_child_id",
"=",
"scikit_tree",
".",
"children_left",
"[",
"node_id",
"]",
"right_child_id",
"=",
"scikit_tree",
".",
"children_right",
"[",
"node_id",
"]",
"# Add a branch node",
"coreml_tree",
".",
"add_branch_node",
"(",
"tree_id",
",",
"node_id",
",",
"feature_index",
",",
"feature_value",
",",
"branch_mode",
",",
"left_child_id",
",",
"right_child_id",
")",
"# Now recurse",
"_recurse",
"(",
"coreml_tree",
",",
"scikit_tree",
",",
"tree_id",
",",
"left_child_id",
",",
"scaling",
",",
"mode",
",",
"n_classes",
",",
"tree_index",
")",
"_recurse",
"(",
"coreml_tree",
",",
"scikit_tree",
",",
"tree_id",
",",
"right_child_id",
",",
"scaling",
",",
"mode",
",",
"n_classes",
",",
"tree_index",
")",
"# Add a leaf node to the tree",
"else",
":",
"# Get the scikit-learn value",
"if",
"scikit_tree",
".",
"n_outputs",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Expected only 1 output in the scikit-learn tree.'",
")",
"value",
"=",
"_get_value",
"(",
"scikit_tree",
".",
"value",
"[",
"node_id",
"]",
",",
"mode",
",",
"scaling",
",",
"n_classes",
",",
"tree_index",
")",
"coreml_tree",
".",
"add_leaf_node",
"(",
"tree_id",
",",
"node_id",
",",
"value",
")"
] | Traverse through the tree and append to the tree spec. | [
"Traverse",
"through",
"the",
"tree",
"and",
"append",
"to",
"the",
"tree",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py#L44-L77 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py | convert_tree_ensemble | def convert_tree_ensemble(model, input_features,
output_features = ('predicted_class', float),
mode = 'regressor',
base_prediction = None,
class_labels = None,
post_evaluation_transform = None):
"""
Convert a generic tree regressor model to the protobuf spec.
This currently supports:
* Decision tree regression
* Gradient boosted tree regression
* Random forest regression
* Decision tree classifier.
* Gradient boosted tree classifier.
* Random forest classifier.
----------
Parameters
model: [DecisionTreeRegressor | GradientBoostingRegression | RandomForestRegressor]
A scikit learn tree model.
feature_names : list of strings, optional (default=None)
Names of each of the features.
target: str
Name of the output column.
base_prediction: double
Base prediction value.
mode: str in ['regressor', 'classifier']
Mode of the tree model.
class_labels: list[int]
List of classes
post_evaluation_transform: list[int]
Post evaluation transform
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
num_dimensions = get_input_dimension(model)
features = process_or_validate_features(input_features, num_dimensions)
n_classes = None
if mode == 'classifier':
n_classes = model.n_classes_
if class_labels is None:
class_labels = range(n_classes)
else:
if len(class_labels) != n_classes:
raise ValueError("Number of classes in model (%d) does not match "
"length of supplied class list (%d)."
% (n_classes, len(class_labels)))
coreml_tree = TreeEnsembleClassifier(input_features, class_labels, output_features)
if post_evaluation_transform is not None:
coreml_tree.set_post_evaluation_transform(post_evaluation_transform)
# Base prediction not provided
if base_prediction is None:
if n_classes == 2:
base_prediction = [0.0]
else:
base_prediction = [0.0 for c in range(n_classes)]
coreml_tree.set_default_prediction_value(base_prediction)
else:
if base_prediction is None:
base_prediction = 0.0
coreml_tree = TreeEnsembleRegressor(input_features, output_features)
coreml_tree.set_default_prediction_value(base_prediction)
# Single tree
if hasattr(model, 'tree_'):
_recurse(coreml_tree, model.tree_, tree_id = 0, node_id = 0,
mode = mode, n_classes = n_classes)
# Multiple trees
elif hasattr(model, 'estimators_'):
is_ensembling_in_separate_trees = False
if type(model.estimators_) != list:
is_ensembling_in_separate_trees = len(model.estimators_.shape) > 0 and model.estimators_.shape[1] > 1
estimators = model.estimators_.flatten()
else:
estimators = model.estimators_
scaling = model.learning_rate if hasattr(model, 'learning_rate') else 1.0 / len(estimators)
for tree_id, base_model in enumerate(estimators):
if is_ensembling_in_separate_trees:
tree_index = tree_id % n_classes
else:
tree_index = 0
_recurse(coreml_tree, base_model.tree_, tree_id, node_id = 0,
scaling = scaling, mode = mode, n_classes = n_classes, tree_index = tree_index)
else:
raise TypeError('Unknown scikit-learn tree model type.')
return coreml_tree.spec | python | def convert_tree_ensemble(model, input_features,
output_features = ('predicted_class', float),
mode = 'regressor',
base_prediction = None,
class_labels = None,
post_evaluation_transform = None):
"""
Convert a generic tree regressor model to the protobuf spec.
This currently supports:
* Decision tree regression
* Gradient boosted tree regression
* Random forest regression
* Decision tree classifier.
* Gradient boosted tree classifier.
* Random forest classifier.
----------
Parameters
model: [DecisionTreeRegressor | GradientBoostingRegression | RandomForestRegressor]
A scikit learn tree model.
feature_names : list of strings, optional (default=None)
Names of each of the features.
target: str
Name of the output column.
base_prediction: double
Base prediction value.
mode: str in ['regressor', 'classifier']
Mode of the tree model.
class_labels: list[int]
List of classes
post_evaluation_transform: list[int]
Post evaluation transform
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
num_dimensions = get_input_dimension(model)
features = process_or_validate_features(input_features, num_dimensions)
n_classes = None
if mode == 'classifier':
n_classes = model.n_classes_
if class_labels is None:
class_labels = range(n_classes)
else:
if len(class_labels) != n_classes:
raise ValueError("Number of classes in model (%d) does not match "
"length of supplied class list (%d)."
% (n_classes, len(class_labels)))
coreml_tree = TreeEnsembleClassifier(input_features, class_labels, output_features)
if post_evaluation_transform is not None:
coreml_tree.set_post_evaluation_transform(post_evaluation_transform)
# Base prediction not provided
if base_prediction is None:
if n_classes == 2:
base_prediction = [0.0]
else:
base_prediction = [0.0 for c in range(n_classes)]
coreml_tree.set_default_prediction_value(base_prediction)
else:
if base_prediction is None:
base_prediction = 0.0
coreml_tree = TreeEnsembleRegressor(input_features, output_features)
coreml_tree.set_default_prediction_value(base_prediction)
# Single tree
if hasattr(model, 'tree_'):
_recurse(coreml_tree, model.tree_, tree_id = 0, node_id = 0,
mode = mode, n_classes = n_classes)
# Multiple trees
elif hasattr(model, 'estimators_'):
is_ensembling_in_separate_trees = False
if type(model.estimators_) != list:
is_ensembling_in_separate_trees = len(model.estimators_.shape) > 0 and model.estimators_.shape[1] > 1
estimators = model.estimators_.flatten()
else:
estimators = model.estimators_
scaling = model.learning_rate if hasattr(model, 'learning_rate') else 1.0 / len(estimators)
for tree_id, base_model in enumerate(estimators):
if is_ensembling_in_separate_trees:
tree_index = tree_id % n_classes
else:
tree_index = 0
_recurse(coreml_tree, base_model.tree_, tree_id, node_id = 0,
scaling = scaling, mode = mode, n_classes = n_classes, tree_index = tree_index)
else:
raise TypeError('Unknown scikit-learn tree model type.')
return coreml_tree.spec | [
"def",
"convert_tree_ensemble",
"(",
"model",
",",
"input_features",
",",
"output_features",
"=",
"(",
"'predicted_class'",
",",
"float",
")",
",",
"mode",
"=",
"'regressor'",
",",
"base_prediction",
"=",
"None",
",",
"class_labels",
"=",
"None",
",",
"post_evaluation_transform",
"=",
"None",
")",
":",
"num_dimensions",
"=",
"get_input_dimension",
"(",
"model",
")",
"features",
"=",
"process_or_validate_features",
"(",
"input_features",
",",
"num_dimensions",
")",
"n_classes",
"=",
"None",
"if",
"mode",
"==",
"'classifier'",
":",
"n_classes",
"=",
"model",
".",
"n_classes_",
"if",
"class_labels",
"is",
"None",
":",
"class_labels",
"=",
"range",
"(",
"n_classes",
")",
"else",
":",
"if",
"len",
"(",
"class_labels",
")",
"!=",
"n_classes",
":",
"raise",
"ValueError",
"(",
"\"Number of classes in model (%d) does not match \"",
"\"length of supplied class list (%d).\"",
"%",
"(",
"n_classes",
",",
"len",
"(",
"class_labels",
")",
")",
")",
"coreml_tree",
"=",
"TreeEnsembleClassifier",
"(",
"input_features",
",",
"class_labels",
",",
"output_features",
")",
"if",
"post_evaluation_transform",
"is",
"not",
"None",
":",
"coreml_tree",
".",
"set_post_evaluation_transform",
"(",
"post_evaluation_transform",
")",
"# Base prediction not provided",
"if",
"base_prediction",
"is",
"None",
":",
"if",
"n_classes",
"==",
"2",
":",
"base_prediction",
"=",
"[",
"0.0",
"]",
"else",
":",
"base_prediction",
"=",
"[",
"0.0",
"for",
"c",
"in",
"range",
"(",
"n_classes",
")",
"]",
"coreml_tree",
".",
"set_default_prediction_value",
"(",
"base_prediction",
")",
"else",
":",
"if",
"base_prediction",
"is",
"None",
":",
"base_prediction",
"=",
"0.0",
"coreml_tree",
"=",
"TreeEnsembleRegressor",
"(",
"input_features",
",",
"output_features",
")",
"coreml_tree",
".",
"set_default_prediction_value",
"(",
"base_prediction",
")",
"# Single tree",
"if",
"hasattr",
"(",
"model",
",",
"'tree_'",
")",
":",
"_recurse",
"(",
"coreml_tree",
",",
"model",
".",
"tree_",
",",
"tree_id",
"=",
"0",
",",
"node_id",
"=",
"0",
",",
"mode",
"=",
"mode",
",",
"n_classes",
"=",
"n_classes",
")",
"# Multiple trees",
"elif",
"hasattr",
"(",
"model",
",",
"'estimators_'",
")",
":",
"is_ensembling_in_separate_trees",
"=",
"False",
"if",
"type",
"(",
"model",
".",
"estimators_",
")",
"!=",
"list",
":",
"is_ensembling_in_separate_trees",
"=",
"len",
"(",
"model",
".",
"estimators_",
".",
"shape",
")",
">",
"0",
"and",
"model",
".",
"estimators_",
".",
"shape",
"[",
"1",
"]",
">",
"1",
"estimators",
"=",
"model",
".",
"estimators_",
".",
"flatten",
"(",
")",
"else",
":",
"estimators",
"=",
"model",
".",
"estimators_",
"scaling",
"=",
"model",
".",
"learning_rate",
"if",
"hasattr",
"(",
"model",
",",
"'learning_rate'",
")",
"else",
"1.0",
"/",
"len",
"(",
"estimators",
")",
"for",
"tree_id",
",",
"base_model",
"in",
"enumerate",
"(",
"estimators",
")",
":",
"if",
"is_ensembling_in_separate_trees",
":",
"tree_index",
"=",
"tree_id",
"%",
"n_classes",
"else",
":",
"tree_index",
"=",
"0",
"_recurse",
"(",
"coreml_tree",
",",
"base_model",
".",
"tree_",
",",
"tree_id",
",",
"node_id",
"=",
"0",
",",
"scaling",
"=",
"scaling",
",",
"mode",
"=",
"mode",
",",
"n_classes",
"=",
"n_classes",
",",
"tree_index",
"=",
"tree_index",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unknown scikit-learn tree model type.'",
")",
"return",
"coreml_tree",
".",
"spec"
] | Convert a generic tree regressor model to the protobuf spec.
This currently supports:
* Decision tree regression
* Gradient boosted tree regression
* Random forest regression
* Decision tree classifier.
* Gradient boosted tree classifier.
* Random forest classifier.
----------
Parameters
model: [DecisionTreeRegressor | GradientBoostingRegression | RandomForestRegressor]
A scikit learn tree model.
feature_names : list of strings, optional (default=None)
Names of each of the features.
target: str
Name of the output column.
base_prediction: double
Base prediction value.
mode: str in ['regressor', 'classifier']
Mode of the tree model.
class_labels: list[int]
List of classes
post_evaluation_transform: list[int]
Post evaluation transform
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | [
"Convert",
"a",
"generic",
"tree",
"regressor",
"model",
"to",
"the",
"protobuf",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py#L97-L199 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py | _vgg16_data_prep | def _vgg16_data_prep(batch):
"""
Takes images scaled to [0, 1] and returns them appropriately scaled and
mean-subtracted for VGG-16
"""
from mxnet import nd
mean = nd.array([123.68, 116.779, 103.939], ctx=batch.context)
return nd.broadcast_sub(255 * batch, mean.reshape((-1, 1, 1))) | python | def _vgg16_data_prep(batch):
"""
Takes images scaled to [0, 1] and returns them appropriately scaled and
mean-subtracted for VGG-16
"""
from mxnet import nd
mean = nd.array([123.68, 116.779, 103.939], ctx=batch.context)
return nd.broadcast_sub(255 * batch, mean.reshape((-1, 1, 1))) | [
"def",
"_vgg16_data_prep",
"(",
"batch",
")",
":",
"from",
"mxnet",
"import",
"nd",
"mean",
"=",
"nd",
".",
"array",
"(",
"[",
"123.68",
",",
"116.779",
",",
"103.939",
"]",
",",
"ctx",
"=",
"batch",
".",
"context",
")",
"return",
"nd",
".",
"broadcast_sub",
"(",
"255",
"*",
"batch",
",",
"mean",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
",",
"1",
")",
")",
")"
] | Takes images scaled to [0, 1] and returns them appropriately scaled and
mean-subtracted for VGG-16 | [
"Takes",
"images",
"scaled",
"to",
"[",
"0",
"1",
"]",
"and",
"returns",
"them",
"appropriately",
"scaled",
"and",
"mean",
"-",
"subtracted",
"for",
"VGG",
"-",
"16"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py#L26-L33 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py | create | def create(style_dataset, content_dataset, style_feature=None,
content_feature=None, max_iterations=None, model='resnet-16',
verbose=True, batch_size = 6, **kwargs):
"""
Create a :class:`StyleTransfer` model.
Parameters
----------
style_dataset: SFrame
Input style images. The columns named by the ``style_feature`` parameters will
be extracted for training the model.
content_dataset : SFrame
Input content images. The columns named by the ``content_feature`` parameters will
be extracted for training the model.
style_feature: string
Name of the column containing the input images in style SFrame.
'None' (the default) indicates the only image column in the style SFrame
should be used as the feature.
content_feature: string
Name of the column containing the input images in content SFrame.
'None' (the default) indicates the only image column in the content
SFrame should be used as the feature.
max_iterations : int
The number of training iterations. If 'None' (the default), then it will
be automatically determined based on the amount of data you provide.
model : string optional
Style transfer model to use:
- "resnet-16" : Fast and small-sized residual network that uses
VGG-16 as reference network during training.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve training
throughput.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : StyleTransfer
A trained :class:`StyleTransfer` model.
See Also
--------
StyleTransfer
Examples
--------
.. sourcecode:: python
# Create datasets
>>> content_dataset = turicreate.image_analysis.load_images('content_images/')
>>> style_dataset = turicreate.image_analysis.load_images('style_images/')
# Train a style transfer model
>>> model = turicreate.style_transfer.create(content_dataset, style_dataset)
# Stylize an image on all styles
>>> stylized_images = model.stylize(data)
# Visualize the stylized images
>>> stylized_images.explore()
"""
if len(style_dataset) == 0:
raise _ToolkitError("style_dataset SFrame cannot be empty")
if len(content_dataset) == 0:
raise _ToolkitError("content_dataset SFrame cannot be empty")
if(batch_size < 1):
raise _ToolkitError("'batch_size' must be greater than or equal to 1")
from ._sframe_loader import SFrameSTIter as _SFrameSTIter
import mxnet as _mx
from .._mxnet import _mxnet_utils
if style_feature is None:
style_feature = _tkutl._find_only_image_column(style_dataset)
if content_feature is None:
content_feature = _tkutl._find_only_image_column(content_dataset)
if verbose:
print("Using '{}' in style_dataset as feature column and using "
"'{}' in content_dataset as feature column".format(style_feature, content_feature))
_raise_error_if_not_training_sframe(style_dataset, style_feature)
_raise_error_if_not_training_sframe(content_dataset, content_feature)
params = {
'batch_size': batch_size,
'vgg16_content_loss_layer': 2, # conv3_3 layer
'lr': 0.001,
'content_loss_mult': 1.0,
'style_loss_mult': [1e-4, 1e-4, 1e-4, 1e-4], # conv 1-4 layers
'finetune_all_params': True,
'pretrained_weights': False,
'print_loss_breakdown': False,
'input_shape': (256, 256),
'training_content_loader_type': 'stretch',
'use_augmentation': False,
'sequential_image_processing': False,
# Only used if use_augmentaion is True
'aug_resize': 0,
'aug_min_object_covered': 0,
'aug_rand_crop': 0.9,
'aug_rand_pad': 0.9,
'aug_rand_gray': 0.0,
'aug_aspect_ratio': 1.25,
'aug_hue': 0.05,
'aug_brightness': 0.05,
'aug_saturation': 0.05,
'aug_contrast': 0.05,
'aug_horizontal_flip': True,
'aug_area_range': (.05, 1.5),
'aug_pca_noise': 0.0,
'aug_max_attempts': 20,
'aug_inter_method': 2,
}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported))
params.update(kwargs['_advanced_parameters'])
_content_loss_mult = params['content_loss_mult']
_style_loss_mult = params['style_loss_mult']
num_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=params['batch_size'])
batch_size_each = params['batch_size'] // max(num_gpus, 1)
batch_size = max(num_gpus, 1) * batch_size_each
input_shape = params['input_shape']
iterations = 0
if max_iterations is None:
max_iterations = len(style_dataset) * 10000
if verbose:
print('Setting max_iterations to be {}'.format(max_iterations))
# data loader
if params['use_augmentation']:
content_loader_type = '%s-with-augmentation' % params['training_content_loader_type']
else:
content_loader_type = params['training_content_loader_type']
content_images_loader = _SFrameSTIter(content_dataset, batch_size, shuffle=True,
feature_column=content_feature, input_shape=input_shape,
loader_type=content_loader_type, aug_params=params,
sequential=params['sequential_image_processing'])
ctx = _mxnet_utils.get_mxnet_context(max_devices=params['batch_size'])
num_styles = len(style_dataset)
# TRANSFORMER MODEL
from ._model import Transformer as _Transformer
transformer_model_path = _pre_trained_models.STYLE_TRANSFER_BASE_MODELS[model]().get_model_path()
transformer = _Transformer(num_styles, batch_size_each)
transformer.collect_params().initialize(ctx=ctx)
if params['pretrained_weights']:
transformer.load_params(transformer_model_path, ctx, allow_missing=True)
# For some reason, the transformer fails to hybridize for training, so we
# avoid this until resolved
# transformer.hybridize()
# VGG MODEL
from ._model import Vgg16 as _Vgg16
vgg_model_path = _pre_trained_models.STYLE_TRANSFER_BASE_MODELS['Vgg16']().get_model_path()
vgg_model = _Vgg16()
vgg_model.collect_params().initialize(ctx=ctx)
vgg_model.load_params(vgg_model_path, ctx=ctx, ignore_extra=True)
vgg_model.hybridize()
# TRAINER
from mxnet import gluon as _gluon
from ._model import gram_matrix as _gram_matrix
if params['finetune_all_params']:
trainable_params = transformer.collect_params()
else:
trainable_params = transformer.collect_params('.*gamma|.*beta')
trainer = _gluon.Trainer(trainable_params, 'adam', {'learning_rate': params['lr']})
mse_loss = _gluon.loss.L2Loss()
start_time = _time.time()
smoothed_loss = None
last_time = 0
cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=params['batch_size'])
num_mxnet_gpus = len(cuda_gpus)
if verbose:
# Estimate memory usage (based on experiments)
cuda_mem_req = 260 + batch_size_each * 880 + num_styles * 1.4
_tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=False,
cuda_mem_req=cuda_mem_req, has_mps_impl=False)
#
# Pre-compute gram matrices for style images
#
if verbose:
print('Analyzing visual features of the style images')
style_images_loader = _SFrameSTIter(style_dataset, batch_size, shuffle=False, num_epochs=1,
feature_column=style_feature, input_shape=input_shape,
loader_type='stretch',
sequential=params['sequential_image_processing'])
num_layers = len(params['style_loss_mult'])
gram_chunks = [[] for _ in range(num_layers)]
for s_batch in style_images_loader:
s_data = _gluon.utils.split_and_load(s_batch.data[0], ctx_list=ctx, batch_axis=0)
for s in s_data:
vgg16_s = _vgg16_data_prep(s)
ret = vgg_model(vgg16_s)
grams = [_gram_matrix(x) for x in ret]
for i, gram in enumerate(grams):
if gram.context != _mx.cpu(0):
gram = gram.as_in_context(_mx.cpu(0))
gram_chunks[i].append(gram)
del style_images_loader
grams = [
# The concatenated styles may be padded, so we slice overflow
_mx.nd.concat(*chunks, dim=0)[:num_styles]
for chunks in gram_chunks
]
# A context->grams look-up table, where all the gram matrices have been
# distributed
ctx_grams = {}
if ctx[0] == _mx.cpu(0):
ctx_grams[_mx.cpu(0)] = grams
else:
for ctx0 in ctx:
ctx_grams[ctx0] = [gram.as_in_context(ctx0) for gram in grams]
#
# Training loop
#
vgg_content_loss_layer = params['vgg16_content_loss_layer']
rs = _np.random.RandomState(1234)
while iterations < max_iterations:
content_images_loader.reset()
for c_batch in content_images_loader:
c_data = _gluon.utils.split_and_load(c_batch.data[0], ctx_list=ctx, batch_axis=0)
Ls = []
curr_content_loss = []
curr_style_loss = []
with _mx.autograd.record():
for c in c_data:
# Randomize styles to train
indices = _mx.nd.array(rs.randint(num_styles, size=batch_size_each),
dtype=_np.int64, ctx=c.context)
# Generate pastiche
p = transformer(c, indices)
# mean subtraction
vgg16_p = _vgg16_data_prep(p)
vgg16_c = _vgg16_data_prep(c)
# vgg forward
p_vgg_outputs = vgg_model(vgg16_p)
c_vgg_outputs = vgg_model(vgg16_c)
c_content_layer = c_vgg_outputs[vgg_content_loss_layer]
p_content_layer = p_vgg_outputs[vgg_content_loss_layer]
# Calculate Loss
# Style Loss between style image and stylized image
# Ls = sum of L2 norm of gram matrix of vgg16's conv layers
style_losses = []
for gram, p_vgg_output, style_loss_mult in zip(ctx_grams[c.context], p_vgg_outputs, _style_loss_mult):
gram_s_vgg = gram[indices]
gram_p_vgg = _gram_matrix(p_vgg_output)
style_losses.append(style_loss_mult * mse_loss(gram_s_vgg, gram_p_vgg))
style_loss = _mx.nd.add_n(*style_losses)
# Content Loss between content image and stylized image
# Lc = L2 norm at a single layer in vgg16
content_loss = _content_loss_mult * mse_loss(c_content_layer,
p_content_layer)
curr_content_loss.append(content_loss)
curr_style_loss.append(style_loss)
# Divide loss by large number to get into a more legible
# range
total_loss = (content_loss + style_loss) / 10000.0
Ls.append(total_loss)
for L in Ls:
L.backward()
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
if smoothed_loss is None:
smoothed_loss = cur_loss
else:
smoothed_loss = 0.9 * smoothed_loss + 0.1 * cur_loss
iterations += 1
trainer.step(batch_size)
if verbose and iterations == 1:
# Print progress table header
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
print(hr)
cur_time = _time.time()
if verbose and (cur_time > last_time + 10 or iterations == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format(
cur_iter = iterations, loss = smoothed_loss,
time = elapsed_time , width = column_width-1))
if params['print_loss_breakdown']:
print_content_loss = _np.mean([L.asnumpy()[0] for L in curr_content_loss])
print_style_loss = _np.mean([L.asnumpy()[0] for L in curr_style_loss])
print('Total Loss: {:6.3f} | Content Loss: {:6.3f} | Style Loss: {:6.3f}'.format(cur_loss, print_content_loss, print_style_loss))
last_time = cur_time
if iterations == max_iterations:
print(hr)
break
training_time = _time.time() - start_time
style_sa = style_dataset[style_feature]
idx_column = _tc.SArray(range(0, style_sa.shape[0]))
style_sframe = _tc.SFrame({"style": idx_column, style_feature: style_sa})
# Save the model state
state = {
'_model': transformer,
'_training_time_as_string': _seconds_as_string(training_time),
'batch_size': batch_size,
'num_styles': num_styles,
'model': model,
'input_image_shape': input_shape,
'styles': style_sframe,
'num_content_images': len(content_dataset),
'training_time': training_time,
'max_iterations': max_iterations,
'training_iterations': iterations,
'training_epochs': content_images_loader.cur_epoch,
'style_feature': style_feature,
'content_feature': content_feature,
"_index_column": "style",
'training_loss': smoothed_loss,
}
return StyleTransfer(state) | python | def create(style_dataset, content_dataset, style_feature=None,
content_feature=None, max_iterations=None, model='resnet-16',
verbose=True, batch_size = 6, **kwargs):
"""
Create a :class:`StyleTransfer` model.
Parameters
----------
style_dataset: SFrame
Input style images. The columns named by the ``style_feature`` parameters will
be extracted for training the model.
content_dataset : SFrame
Input content images. The columns named by the ``content_feature`` parameters will
be extracted for training the model.
style_feature: string
Name of the column containing the input images in style SFrame.
'None' (the default) indicates the only image column in the style SFrame
should be used as the feature.
content_feature: string
Name of the column containing the input images in content SFrame.
'None' (the default) indicates the only image column in the content
SFrame should be used as the feature.
max_iterations : int
The number of training iterations. If 'None' (the default), then it will
be automatically determined based on the amount of data you provide.
model : string optional
Style transfer model to use:
- "resnet-16" : Fast and small-sized residual network that uses
VGG-16 as reference network during training.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve training
throughput.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : StyleTransfer
A trained :class:`StyleTransfer` model.
See Also
--------
StyleTransfer
Examples
--------
.. sourcecode:: python
# Create datasets
>>> content_dataset = turicreate.image_analysis.load_images('content_images/')
>>> style_dataset = turicreate.image_analysis.load_images('style_images/')
# Train a style transfer model
>>> model = turicreate.style_transfer.create(content_dataset, style_dataset)
# Stylize an image on all styles
>>> stylized_images = model.stylize(data)
# Visualize the stylized images
>>> stylized_images.explore()
"""
if len(style_dataset) == 0:
raise _ToolkitError("style_dataset SFrame cannot be empty")
if len(content_dataset) == 0:
raise _ToolkitError("content_dataset SFrame cannot be empty")
if(batch_size < 1):
raise _ToolkitError("'batch_size' must be greater than or equal to 1")
from ._sframe_loader import SFrameSTIter as _SFrameSTIter
import mxnet as _mx
from .._mxnet import _mxnet_utils
if style_feature is None:
style_feature = _tkutl._find_only_image_column(style_dataset)
if content_feature is None:
content_feature = _tkutl._find_only_image_column(content_dataset)
if verbose:
print("Using '{}' in style_dataset as feature column and using "
"'{}' in content_dataset as feature column".format(style_feature, content_feature))
_raise_error_if_not_training_sframe(style_dataset, style_feature)
_raise_error_if_not_training_sframe(content_dataset, content_feature)
params = {
'batch_size': batch_size,
'vgg16_content_loss_layer': 2, # conv3_3 layer
'lr': 0.001,
'content_loss_mult': 1.0,
'style_loss_mult': [1e-4, 1e-4, 1e-4, 1e-4], # conv 1-4 layers
'finetune_all_params': True,
'pretrained_weights': False,
'print_loss_breakdown': False,
'input_shape': (256, 256),
'training_content_loader_type': 'stretch',
'use_augmentation': False,
'sequential_image_processing': False,
# Only used if use_augmentaion is True
'aug_resize': 0,
'aug_min_object_covered': 0,
'aug_rand_crop': 0.9,
'aug_rand_pad': 0.9,
'aug_rand_gray': 0.0,
'aug_aspect_ratio': 1.25,
'aug_hue': 0.05,
'aug_brightness': 0.05,
'aug_saturation': 0.05,
'aug_contrast': 0.05,
'aug_horizontal_flip': True,
'aug_area_range': (.05, 1.5),
'aug_pca_noise': 0.0,
'aug_max_attempts': 20,
'aug_inter_method': 2,
}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported))
params.update(kwargs['_advanced_parameters'])
_content_loss_mult = params['content_loss_mult']
_style_loss_mult = params['style_loss_mult']
num_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=params['batch_size'])
batch_size_each = params['batch_size'] // max(num_gpus, 1)
batch_size = max(num_gpus, 1) * batch_size_each
input_shape = params['input_shape']
iterations = 0
if max_iterations is None:
max_iterations = len(style_dataset) * 10000
if verbose:
print('Setting max_iterations to be {}'.format(max_iterations))
# data loader
if params['use_augmentation']:
content_loader_type = '%s-with-augmentation' % params['training_content_loader_type']
else:
content_loader_type = params['training_content_loader_type']
content_images_loader = _SFrameSTIter(content_dataset, batch_size, shuffle=True,
feature_column=content_feature, input_shape=input_shape,
loader_type=content_loader_type, aug_params=params,
sequential=params['sequential_image_processing'])
ctx = _mxnet_utils.get_mxnet_context(max_devices=params['batch_size'])
num_styles = len(style_dataset)
# TRANSFORMER MODEL
from ._model import Transformer as _Transformer
transformer_model_path = _pre_trained_models.STYLE_TRANSFER_BASE_MODELS[model]().get_model_path()
transformer = _Transformer(num_styles, batch_size_each)
transformer.collect_params().initialize(ctx=ctx)
if params['pretrained_weights']:
transformer.load_params(transformer_model_path, ctx, allow_missing=True)
# For some reason, the transformer fails to hybridize for training, so we
# avoid this until resolved
# transformer.hybridize()
# VGG MODEL
from ._model import Vgg16 as _Vgg16
vgg_model_path = _pre_trained_models.STYLE_TRANSFER_BASE_MODELS['Vgg16']().get_model_path()
vgg_model = _Vgg16()
vgg_model.collect_params().initialize(ctx=ctx)
vgg_model.load_params(vgg_model_path, ctx=ctx, ignore_extra=True)
vgg_model.hybridize()
# TRAINER
from mxnet import gluon as _gluon
from ._model import gram_matrix as _gram_matrix
if params['finetune_all_params']:
trainable_params = transformer.collect_params()
else:
trainable_params = transformer.collect_params('.*gamma|.*beta')
trainer = _gluon.Trainer(trainable_params, 'adam', {'learning_rate': params['lr']})
mse_loss = _gluon.loss.L2Loss()
start_time = _time.time()
smoothed_loss = None
last_time = 0
cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=params['batch_size'])
num_mxnet_gpus = len(cuda_gpus)
if verbose:
# Estimate memory usage (based on experiments)
cuda_mem_req = 260 + batch_size_each * 880 + num_styles * 1.4
_tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=False,
cuda_mem_req=cuda_mem_req, has_mps_impl=False)
#
# Pre-compute gram matrices for style images
#
if verbose:
print('Analyzing visual features of the style images')
style_images_loader = _SFrameSTIter(style_dataset, batch_size, shuffle=False, num_epochs=1,
feature_column=style_feature, input_shape=input_shape,
loader_type='stretch',
sequential=params['sequential_image_processing'])
num_layers = len(params['style_loss_mult'])
gram_chunks = [[] for _ in range(num_layers)]
for s_batch in style_images_loader:
s_data = _gluon.utils.split_and_load(s_batch.data[0], ctx_list=ctx, batch_axis=0)
for s in s_data:
vgg16_s = _vgg16_data_prep(s)
ret = vgg_model(vgg16_s)
grams = [_gram_matrix(x) for x in ret]
for i, gram in enumerate(grams):
if gram.context != _mx.cpu(0):
gram = gram.as_in_context(_mx.cpu(0))
gram_chunks[i].append(gram)
del style_images_loader
grams = [
# The concatenated styles may be padded, so we slice overflow
_mx.nd.concat(*chunks, dim=0)[:num_styles]
for chunks in gram_chunks
]
# A context->grams look-up table, where all the gram matrices have been
# distributed
ctx_grams = {}
if ctx[0] == _mx.cpu(0):
ctx_grams[_mx.cpu(0)] = grams
else:
for ctx0 in ctx:
ctx_grams[ctx0] = [gram.as_in_context(ctx0) for gram in grams]
#
# Training loop
#
vgg_content_loss_layer = params['vgg16_content_loss_layer']
rs = _np.random.RandomState(1234)
while iterations < max_iterations:
content_images_loader.reset()
for c_batch in content_images_loader:
c_data = _gluon.utils.split_and_load(c_batch.data[0], ctx_list=ctx, batch_axis=0)
Ls = []
curr_content_loss = []
curr_style_loss = []
with _mx.autograd.record():
for c in c_data:
# Randomize styles to train
indices = _mx.nd.array(rs.randint(num_styles, size=batch_size_each),
dtype=_np.int64, ctx=c.context)
# Generate pastiche
p = transformer(c, indices)
# mean subtraction
vgg16_p = _vgg16_data_prep(p)
vgg16_c = _vgg16_data_prep(c)
# vgg forward
p_vgg_outputs = vgg_model(vgg16_p)
c_vgg_outputs = vgg_model(vgg16_c)
c_content_layer = c_vgg_outputs[vgg_content_loss_layer]
p_content_layer = p_vgg_outputs[vgg_content_loss_layer]
# Calculate Loss
# Style Loss between style image and stylized image
# Ls = sum of L2 norm of gram matrix of vgg16's conv layers
style_losses = []
for gram, p_vgg_output, style_loss_mult in zip(ctx_grams[c.context], p_vgg_outputs, _style_loss_mult):
gram_s_vgg = gram[indices]
gram_p_vgg = _gram_matrix(p_vgg_output)
style_losses.append(style_loss_mult * mse_loss(gram_s_vgg, gram_p_vgg))
style_loss = _mx.nd.add_n(*style_losses)
# Content Loss between content image and stylized image
# Lc = L2 norm at a single layer in vgg16
content_loss = _content_loss_mult * mse_loss(c_content_layer,
p_content_layer)
curr_content_loss.append(content_loss)
curr_style_loss.append(style_loss)
# Divide loss by large number to get into a more legible
# range
total_loss = (content_loss + style_loss) / 10000.0
Ls.append(total_loss)
for L in Ls:
L.backward()
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
if smoothed_loss is None:
smoothed_loss = cur_loss
else:
smoothed_loss = 0.9 * smoothed_loss + 0.1 * cur_loss
iterations += 1
trainer.step(batch_size)
if verbose and iterations == 1:
# Print progress table header
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
print(hr)
cur_time = _time.time()
if verbose and (cur_time > last_time + 10 or iterations == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format(
cur_iter = iterations, loss = smoothed_loss,
time = elapsed_time , width = column_width-1))
if params['print_loss_breakdown']:
print_content_loss = _np.mean([L.asnumpy()[0] for L in curr_content_loss])
print_style_loss = _np.mean([L.asnumpy()[0] for L in curr_style_loss])
print('Total Loss: {:6.3f} | Content Loss: {:6.3f} | Style Loss: {:6.3f}'.format(cur_loss, print_content_loss, print_style_loss))
last_time = cur_time
if iterations == max_iterations:
print(hr)
break
training_time = _time.time() - start_time
style_sa = style_dataset[style_feature]
idx_column = _tc.SArray(range(0, style_sa.shape[0]))
style_sframe = _tc.SFrame({"style": idx_column, style_feature: style_sa})
# Save the model state
state = {
'_model': transformer,
'_training_time_as_string': _seconds_as_string(training_time),
'batch_size': batch_size,
'num_styles': num_styles,
'model': model,
'input_image_shape': input_shape,
'styles': style_sframe,
'num_content_images': len(content_dataset),
'training_time': training_time,
'max_iterations': max_iterations,
'training_iterations': iterations,
'training_epochs': content_images_loader.cur_epoch,
'style_feature': style_feature,
'content_feature': content_feature,
"_index_column": "style",
'training_loss': smoothed_loss,
}
return StyleTransfer(state) | [
"def",
"create",
"(",
"style_dataset",
",",
"content_dataset",
",",
"style_feature",
"=",
"None",
",",
"content_feature",
"=",
"None",
",",
"max_iterations",
"=",
"None",
",",
"model",
"=",
"'resnet-16'",
",",
"verbose",
"=",
"True",
",",
"batch_size",
"=",
"6",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"style_dataset",
")",
"==",
"0",
":",
"raise",
"_ToolkitError",
"(",
"\"style_dataset SFrame cannot be empty\"",
")",
"if",
"len",
"(",
"content_dataset",
")",
"==",
"0",
":",
"raise",
"_ToolkitError",
"(",
"\"content_dataset SFrame cannot be empty\"",
")",
"if",
"(",
"batch_size",
"<",
"1",
")",
":",
"raise",
"_ToolkitError",
"(",
"\"'batch_size' must be greater than or equal to 1\"",
")",
"from",
".",
"_sframe_loader",
"import",
"SFrameSTIter",
"as",
"_SFrameSTIter",
"import",
"mxnet",
"as",
"_mx",
"from",
".",
".",
"_mxnet",
"import",
"_mxnet_utils",
"if",
"style_feature",
"is",
"None",
":",
"style_feature",
"=",
"_tkutl",
".",
"_find_only_image_column",
"(",
"style_dataset",
")",
"if",
"content_feature",
"is",
"None",
":",
"content_feature",
"=",
"_tkutl",
".",
"_find_only_image_column",
"(",
"content_dataset",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Using '{}' in style_dataset as feature column and using \"",
"\"'{}' in content_dataset as feature column\"",
".",
"format",
"(",
"style_feature",
",",
"content_feature",
")",
")",
"_raise_error_if_not_training_sframe",
"(",
"style_dataset",
",",
"style_feature",
")",
"_raise_error_if_not_training_sframe",
"(",
"content_dataset",
",",
"content_feature",
")",
"params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
",",
"'vgg16_content_loss_layer'",
":",
"2",
",",
"# conv3_3 layer",
"'lr'",
":",
"0.001",
",",
"'content_loss_mult'",
":",
"1.0",
",",
"'style_loss_mult'",
":",
"[",
"1e-4",
",",
"1e-4",
",",
"1e-4",
",",
"1e-4",
"]",
",",
"# conv 1-4 layers",
"'finetune_all_params'",
":",
"True",
",",
"'pretrained_weights'",
":",
"False",
",",
"'print_loss_breakdown'",
":",
"False",
",",
"'input_shape'",
":",
"(",
"256",
",",
"256",
")",
",",
"'training_content_loader_type'",
":",
"'stretch'",
",",
"'use_augmentation'",
":",
"False",
",",
"'sequential_image_processing'",
":",
"False",
",",
"# Only used if use_augmentaion is True",
"'aug_resize'",
":",
"0",
",",
"'aug_min_object_covered'",
":",
"0",
",",
"'aug_rand_crop'",
":",
"0.9",
",",
"'aug_rand_pad'",
":",
"0.9",
",",
"'aug_rand_gray'",
":",
"0.0",
",",
"'aug_aspect_ratio'",
":",
"1.25",
",",
"'aug_hue'",
":",
"0.05",
",",
"'aug_brightness'",
":",
"0.05",
",",
"'aug_saturation'",
":",
"0.05",
",",
"'aug_contrast'",
":",
"0.05",
",",
"'aug_horizontal_flip'",
":",
"True",
",",
"'aug_area_range'",
":",
"(",
".05",
",",
"1.5",
")",
",",
"'aug_pca_noise'",
":",
"0.0",
",",
"'aug_max_attempts'",
":",
"20",
",",
"'aug_inter_method'",
":",
"2",
",",
"}",
"if",
"'_advanced_parameters'",
"in",
"kwargs",
":",
"# Make sure no additional parameters are provided",
"new_keys",
"=",
"set",
"(",
"kwargs",
"[",
"'_advanced_parameters'",
"]",
".",
"keys",
"(",
")",
")",
"set_keys",
"=",
"set",
"(",
"params",
".",
"keys",
"(",
")",
")",
"unsupported",
"=",
"new_keys",
"-",
"set_keys",
"if",
"unsupported",
":",
"raise",
"_ToolkitError",
"(",
"'Unknown advanced parameters: {}'",
".",
"format",
"(",
"unsupported",
")",
")",
"params",
".",
"update",
"(",
"kwargs",
"[",
"'_advanced_parameters'",
"]",
")",
"_content_loss_mult",
"=",
"params",
"[",
"'content_loss_mult'",
"]",
"_style_loss_mult",
"=",
"params",
"[",
"'style_loss_mult'",
"]",
"num_gpus",
"=",
"_mxnet_utils",
".",
"get_num_gpus_in_use",
"(",
"max_devices",
"=",
"params",
"[",
"'batch_size'",
"]",
")",
"batch_size_each",
"=",
"params",
"[",
"'batch_size'",
"]",
"//",
"max",
"(",
"num_gpus",
",",
"1",
")",
"batch_size",
"=",
"max",
"(",
"num_gpus",
",",
"1",
")",
"*",
"batch_size_each",
"input_shape",
"=",
"params",
"[",
"'input_shape'",
"]",
"iterations",
"=",
"0",
"if",
"max_iterations",
"is",
"None",
":",
"max_iterations",
"=",
"len",
"(",
"style_dataset",
")",
"*",
"10000",
"if",
"verbose",
":",
"print",
"(",
"'Setting max_iterations to be {}'",
".",
"format",
"(",
"max_iterations",
")",
")",
"# data loader",
"if",
"params",
"[",
"'use_augmentation'",
"]",
":",
"content_loader_type",
"=",
"'%s-with-augmentation'",
"%",
"params",
"[",
"'training_content_loader_type'",
"]",
"else",
":",
"content_loader_type",
"=",
"params",
"[",
"'training_content_loader_type'",
"]",
"content_images_loader",
"=",
"_SFrameSTIter",
"(",
"content_dataset",
",",
"batch_size",
",",
"shuffle",
"=",
"True",
",",
"feature_column",
"=",
"content_feature",
",",
"input_shape",
"=",
"input_shape",
",",
"loader_type",
"=",
"content_loader_type",
",",
"aug_params",
"=",
"params",
",",
"sequential",
"=",
"params",
"[",
"'sequential_image_processing'",
"]",
")",
"ctx",
"=",
"_mxnet_utils",
".",
"get_mxnet_context",
"(",
"max_devices",
"=",
"params",
"[",
"'batch_size'",
"]",
")",
"num_styles",
"=",
"len",
"(",
"style_dataset",
")",
"# TRANSFORMER MODEL",
"from",
".",
"_model",
"import",
"Transformer",
"as",
"_Transformer",
"transformer_model_path",
"=",
"_pre_trained_models",
".",
"STYLE_TRANSFER_BASE_MODELS",
"[",
"model",
"]",
"(",
")",
".",
"get_model_path",
"(",
")",
"transformer",
"=",
"_Transformer",
"(",
"num_styles",
",",
"batch_size_each",
")",
"transformer",
".",
"collect_params",
"(",
")",
".",
"initialize",
"(",
"ctx",
"=",
"ctx",
")",
"if",
"params",
"[",
"'pretrained_weights'",
"]",
":",
"transformer",
".",
"load_params",
"(",
"transformer_model_path",
",",
"ctx",
",",
"allow_missing",
"=",
"True",
")",
"# For some reason, the transformer fails to hybridize for training, so we",
"# avoid this until resolved",
"# transformer.hybridize()",
"# VGG MODEL",
"from",
".",
"_model",
"import",
"Vgg16",
"as",
"_Vgg16",
"vgg_model_path",
"=",
"_pre_trained_models",
".",
"STYLE_TRANSFER_BASE_MODELS",
"[",
"'Vgg16'",
"]",
"(",
")",
".",
"get_model_path",
"(",
")",
"vgg_model",
"=",
"_Vgg16",
"(",
")",
"vgg_model",
".",
"collect_params",
"(",
")",
".",
"initialize",
"(",
"ctx",
"=",
"ctx",
")",
"vgg_model",
".",
"load_params",
"(",
"vgg_model_path",
",",
"ctx",
"=",
"ctx",
",",
"ignore_extra",
"=",
"True",
")",
"vgg_model",
".",
"hybridize",
"(",
")",
"# TRAINER",
"from",
"mxnet",
"import",
"gluon",
"as",
"_gluon",
"from",
".",
"_model",
"import",
"gram_matrix",
"as",
"_gram_matrix",
"if",
"params",
"[",
"'finetune_all_params'",
"]",
":",
"trainable_params",
"=",
"transformer",
".",
"collect_params",
"(",
")",
"else",
":",
"trainable_params",
"=",
"transformer",
".",
"collect_params",
"(",
"'.*gamma|.*beta'",
")",
"trainer",
"=",
"_gluon",
".",
"Trainer",
"(",
"trainable_params",
",",
"'adam'",
",",
"{",
"'learning_rate'",
":",
"params",
"[",
"'lr'",
"]",
"}",
")",
"mse_loss",
"=",
"_gluon",
".",
"loss",
".",
"L2Loss",
"(",
")",
"start_time",
"=",
"_time",
".",
"time",
"(",
")",
"smoothed_loss",
"=",
"None",
"last_time",
"=",
"0",
"cuda_gpus",
"=",
"_mxnet_utils",
".",
"get_gpus_in_use",
"(",
"max_devices",
"=",
"params",
"[",
"'batch_size'",
"]",
")",
"num_mxnet_gpus",
"=",
"len",
"(",
"cuda_gpus",
")",
"if",
"verbose",
":",
"# Estimate memory usage (based on experiments)",
"cuda_mem_req",
"=",
"260",
"+",
"batch_size_each",
"*",
"880",
"+",
"num_styles",
"*",
"1.4",
"_tkutl",
".",
"_print_neural_compute_device",
"(",
"cuda_gpus",
"=",
"cuda_gpus",
",",
"use_mps",
"=",
"False",
",",
"cuda_mem_req",
"=",
"cuda_mem_req",
",",
"has_mps_impl",
"=",
"False",
")",
"#",
"# Pre-compute gram matrices for style images",
"#",
"if",
"verbose",
":",
"print",
"(",
"'Analyzing visual features of the style images'",
")",
"style_images_loader",
"=",
"_SFrameSTIter",
"(",
"style_dataset",
",",
"batch_size",
",",
"shuffle",
"=",
"False",
",",
"num_epochs",
"=",
"1",
",",
"feature_column",
"=",
"style_feature",
",",
"input_shape",
"=",
"input_shape",
",",
"loader_type",
"=",
"'stretch'",
",",
"sequential",
"=",
"params",
"[",
"'sequential_image_processing'",
"]",
")",
"num_layers",
"=",
"len",
"(",
"params",
"[",
"'style_loss_mult'",
"]",
")",
"gram_chunks",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"num_layers",
")",
"]",
"for",
"s_batch",
"in",
"style_images_loader",
":",
"s_data",
"=",
"_gluon",
".",
"utils",
".",
"split_and_load",
"(",
"s_batch",
".",
"data",
"[",
"0",
"]",
",",
"ctx_list",
"=",
"ctx",
",",
"batch_axis",
"=",
"0",
")",
"for",
"s",
"in",
"s_data",
":",
"vgg16_s",
"=",
"_vgg16_data_prep",
"(",
"s",
")",
"ret",
"=",
"vgg_model",
"(",
"vgg16_s",
")",
"grams",
"=",
"[",
"_gram_matrix",
"(",
"x",
")",
"for",
"x",
"in",
"ret",
"]",
"for",
"i",
",",
"gram",
"in",
"enumerate",
"(",
"grams",
")",
":",
"if",
"gram",
".",
"context",
"!=",
"_mx",
".",
"cpu",
"(",
"0",
")",
":",
"gram",
"=",
"gram",
".",
"as_in_context",
"(",
"_mx",
".",
"cpu",
"(",
"0",
")",
")",
"gram_chunks",
"[",
"i",
"]",
".",
"append",
"(",
"gram",
")",
"del",
"style_images_loader",
"grams",
"=",
"[",
"# The concatenated styles may be padded, so we slice overflow",
"_mx",
".",
"nd",
".",
"concat",
"(",
"*",
"chunks",
",",
"dim",
"=",
"0",
")",
"[",
":",
"num_styles",
"]",
"for",
"chunks",
"in",
"gram_chunks",
"]",
"# A context->grams look-up table, where all the gram matrices have been",
"# distributed",
"ctx_grams",
"=",
"{",
"}",
"if",
"ctx",
"[",
"0",
"]",
"==",
"_mx",
".",
"cpu",
"(",
"0",
")",
":",
"ctx_grams",
"[",
"_mx",
".",
"cpu",
"(",
"0",
")",
"]",
"=",
"grams",
"else",
":",
"for",
"ctx0",
"in",
"ctx",
":",
"ctx_grams",
"[",
"ctx0",
"]",
"=",
"[",
"gram",
".",
"as_in_context",
"(",
"ctx0",
")",
"for",
"gram",
"in",
"grams",
"]",
"#",
"# Training loop",
"#",
"vgg_content_loss_layer",
"=",
"params",
"[",
"'vgg16_content_loss_layer'",
"]",
"rs",
"=",
"_np",
".",
"random",
".",
"RandomState",
"(",
"1234",
")",
"while",
"iterations",
"<",
"max_iterations",
":",
"content_images_loader",
".",
"reset",
"(",
")",
"for",
"c_batch",
"in",
"content_images_loader",
":",
"c_data",
"=",
"_gluon",
".",
"utils",
".",
"split_and_load",
"(",
"c_batch",
".",
"data",
"[",
"0",
"]",
",",
"ctx_list",
"=",
"ctx",
",",
"batch_axis",
"=",
"0",
")",
"Ls",
"=",
"[",
"]",
"curr_content_loss",
"=",
"[",
"]",
"curr_style_loss",
"=",
"[",
"]",
"with",
"_mx",
".",
"autograd",
".",
"record",
"(",
")",
":",
"for",
"c",
"in",
"c_data",
":",
"# Randomize styles to train",
"indices",
"=",
"_mx",
".",
"nd",
".",
"array",
"(",
"rs",
".",
"randint",
"(",
"num_styles",
",",
"size",
"=",
"batch_size_each",
")",
",",
"dtype",
"=",
"_np",
".",
"int64",
",",
"ctx",
"=",
"c",
".",
"context",
")",
"# Generate pastiche",
"p",
"=",
"transformer",
"(",
"c",
",",
"indices",
")",
"# mean subtraction",
"vgg16_p",
"=",
"_vgg16_data_prep",
"(",
"p",
")",
"vgg16_c",
"=",
"_vgg16_data_prep",
"(",
"c",
")",
"# vgg forward",
"p_vgg_outputs",
"=",
"vgg_model",
"(",
"vgg16_p",
")",
"c_vgg_outputs",
"=",
"vgg_model",
"(",
"vgg16_c",
")",
"c_content_layer",
"=",
"c_vgg_outputs",
"[",
"vgg_content_loss_layer",
"]",
"p_content_layer",
"=",
"p_vgg_outputs",
"[",
"vgg_content_loss_layer",
"]",
"# Calculate Loss",
"# Style Loss between style image and stylized image",
"# Ls = sum of L2 norm of gram matrix of vgg16's conv layers",
"style_losses",
"=",
"[",
"]",
"for",
"gram",
",",
"p_vgg_output",
",",
"style_loss_mult",
"in",
"zip",
"(",
"ctx_grams",
"[",
"c",
".",
"context",
"]",
",",
"p_vgg_outputs",
",",
"_style_loss_mult",
")",
":",
"gram_s_vgg",
"=",
"gram",
"[",
"indices",
"]",
"gram_p_vgg",
"=",
"_gram_matrix",
"(",
"p_vgg_output",
")",
"style_losses",
".",
"append",
"(",
"style_loss_mult",
"*",
"mse_loss",
"(",
"gram_s_vgg",
",",
"gram_p_vgg",
")",
")",
"style_loss",
"=",
"_mx",
".",
"nd",
".",
"add_n",
"(",
"*",
"style_losses",
")",
"# Content Loss between content image and stylized image",
"# Lc = L2 norm at a single layer in vgg16",
"content_loss",
"=",
"_content_loss_mult",
"*",
"mse_loss",
"(",
"c_content_layer",
",",
"p_content_layer",
")",
"curr_content_loss",
".",
"append",
"(",
"content_loss",
")",
"curr_style_loss",
".",
"append",
"(",
"style_loss",
")",
"# Divide loss by large number to get into a more legible",
"# range",
"total_loss",
"=",
"(",
"content_loss",
"+",
"style_loss",
")",
"/",
"10000.0",
"Ls",
".",
"append",
"(",
"total_loss",
")",
"for",
"L",
"in",
"Ls",
":",
"L",
".",
"backward",
"(",
")",
"cur_loss",
"=",
"_np",
".",
"mean",
"(",
"[",
"L",
".",
"asnumpy",
"(",
")",
"[",
"0",
"]",
"for",
"L",
"in",
"Ls",
"]",
")",
"if",
"smoothed_loss",
"is",
"None",
":",
"smoothed_loss",
"=",
"cur_loss",
"else",
":",
"smoothed_loss",
"=",
"0.9",
"*",
"smoothed_loss",
"+",
"0.1",
"*",
"cur_loss",
"iterations",
"+=",
"1",
"trainer",
".",
"step",
"(",
"batch_size",
")",
"if",
"verbose",
"and",
"iterations",
"==",
"1",
":",
"# Print progress table header",
"column_names",
"=",
"[",
"'Iteration'",
",",
"'Loss'",
",",
"'Elapsed Time'",
"]",
"num_columns",
"=",
"len",
"(",
"column_names",
")",
"column_width",
"=",
"max",
"(",
"map",
"(",
"lambda",
"x",
":",
"len",
"(",
"x",
")",
",",
"column_names",
")",
")",
"+",
"2",
"hr",
"=",
"'+'",
"+",
"'+'",
".",
"join",
"(",
"[",
"'-'",
"*",
"column_width",
"]",
"*",
"num_columns",
")",
"+",
"'+'",
"print",
"(",
"hr",
")",
"print",
"(",
"(",
"'| {:<{width}}'",
"*",
"num_columns",
"+",
"'|'",
")",
".",
"format",
"(",
"*",
"column_names",
",",
"width",
"=",
"column_width",
"-",
"1",
")",
")",
"print",
"(",
"hr",
")",
"cur_time",
"=",
"_time",
".",
"time",
"(",
")",
"if",
"verbose",
"and",
"(",
"cur_time",
">",
"last_time",
"+",
"10",
"or",
"iterations",
"==",
"max_iterations",
")",
":",
"# Print progress table row",
"elapsed_time",
"=",
"cur_time",
"-",
"start_time",
"print",
"(",
"\"| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|\"",
".",
"format",
"(",
"cur_iter",
"=",
"iterations",
",",
"loss",
"=",
"smoothed_loss",
",",
"time",
"=",
"elapsed_time",
",",
"width",
"=",
"column_width",
"-",
"1",
")",
")",
"if",
"params",
"[",
"'print_loss_breakdown'",
"]",
":",
"print_content_loss",
"=",
"_np",
".",
"mean",
"(",
"[",
"L",
".",
"asnumpy",
"(",
")",
"[",
"0",
"]",
"for",
"L",
"in",
"curr_content_loss",
"]",
")",
"print_style_loss",
"=",
"_np",
".",
"mean",
"(",
"[",
"L",
".",
"asnumpy",
"(",
")",
"[",
"0",
"]",
"for",
"L",
"in",
"curr_style_loss",
"]",
")",
"print",
"(",
"'Total Loss: {:6.3f} | Content Loss: {:6.3f} | Style Loss: {:6.3f}'",
".",
"format",
"(",
"cur_loss",
",",
"print_content_loss",
",",
"print_style_loss",
")",
")",
"last_time",
"=",
"cur_time",
"if",
"iterations",
"==",
"max_iterations",
":",
"print",
"(",
"hr",
")",
"break",
"training_time",
"=",
"_time",
".",
"time",
"(",
")",
"-",
"start_time",
"style_sa",
"=",
"style_dataset",
"[",
"style_feature",
"]",
"idx_column",
"=",
"_tc",
".",
"SArray",
"(",
"range",
"(",
"0",
",",
"style_sa",
".",
"shape",
"[",
"0",
"]",
")",
")",
"style_sframe",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"\"style\"",
":",
"idx_column",
",",
"style_feature",
":",
"style_sa",
"}",
")",
"# Save the model state",
"state",
"=",
"{",
"'_model'",
":",
"transformer",
",",
"'_training_time_as_string'",
":",
"_seconds_as_string",
"(",
"training_time",
")",
",",
"'batch_size'",
":",
"batch_size",
",",
"'num_styles'",
":",
"num_styles",
",",
"'model'",
":",
"model",
",",
"'input_image_shape'",
":",
"input_shape",
",",
"'styles'",
":",
"style_sframe",
",",
"'num_content_images'",
":",
"len",
"(",
"content_dataset",
")",
",",
"'training_time'",
":",
"training_time",
",",
"'max_iterations'",
":",
"max_iterations",
",",
"'training_iterations'",
":",
"iterations",
",",
"'training_epochs'",
":",
"content_images_loader",
".",
"cur_epoch",
",",
"'style_feature'",
":",
"style_feature",
",",
"'content_feature'",
":",
"content_feature",
",",
"\"_index_column\"",
":",
"\"style\"",
",",
"'training_loss'",
":",
"smoothed_loss",
",",
"}",
"return",
"StyleTransfer",
"(",
"state",
")"
] | Create a :class:`StyleTransfer` model.
Parameters
----------
style_dataset: SFrame
Input style images. The columns named by the ``style_feature`` parameters will
be extracted for training the model.
content_dataset : SFrame
Input content images. The columns named by the ``content_feature`` parameters will
be extracted for training the model.
style_feature: string
Name of the column containing the input images in style SFrame.
'None' (the default) indicates the only image column in the style SFrame
should be used as the feature.
content_feature: string
Name of the column containing the input images in content SFrame.
'None' (the default) indicates the only image column in the content
SFrame should be used as the feature.
max_iterations : int
The number of training iterations. If 'None' (the default), then it will
be automatically determined based on the amount of data you provide.
model : string optional
Style transfer model to use:
- "resnet-16" : Fast and small-sized residual network that uses
VGG-16 as reference network during training.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve training
throughput.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : StyleTransfer
A trained :class:`StyleTransfer` model.
See Also
--------
StyleTransfer
Examples
--------
.. sourcecode:: python
# Create datasets
>>> content_dataset = turicreate.image_analysis.load_images('content_images/')
>>> style_dataset = turicreate.image_analysis.load_images('style_images/')
# Train a style transfer model
>>> model = turicreate.style_transfer.create(content_dataset, style_dataset)
# Stylize an image on all styles
>>> stylized_images = model.stylize(data)
# Visualize the stylized images
>>> stylized_images.explore() | [
"Create",
"a",
":",
"class",
":",
"StyleTransfer",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py#L35-L402 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py | StyleTransfer._canonize_content_input | def _canonize_content_input(self, dataset, single_style):
"""
Takes input and returns tuple of the input in canonical form (SFrame)
along with an unpack callback function that can be applied to
prediction results to "undo" the canonization.
"""
unpack = lambda x: x
if isinstance(dataset, _tc.SArray):
dataset = _tc.SFrame({self.content_feature: dataset})
if single_style:
unpack = lambda sf: sf['stylized_' + self.content_feature]
elif isinstance(dataset, _tc.Image):
dataset = _tc.SFrame({self.content_feature: [dataset]})
if single_style:
unpack = lambda sf: sf['stylized_' + self.content_feature][0]
return dataset, unpack | python | def _canonize_content_input(self, dataset, single_style):
"""
Takes input and returns tuple of the input in canonical form (SFrame)
along with an unpack callback function that can be applied to
prediction results to "undo" the canonization.
"""
unpack = lambda x: x
if isinstance(dataset, _tc.SArray):
dataset = _tc.SFrame({self.content_feature: dataset})
if single_style:
unpack = lambda sf: sf['stylized_' + self.content_feature]
elif isinstance(dataset, _tc.Image):
dataset = _tc.SFrame({self.content_feature: [dataset]})
if single_style:
unpack = lambda sf: sf['stylized_' + self.content_feature][0]
return dataset, unpack | [
"def",
"_canonize_content_input",
"(",
"self",
",",
"dataset",
",",
"single_style",
")",
":",
"unpack",
"=",
"lambda",
"x",
":",
"x",
"if",
"isinstance",
"(",
"dataset",
",",
"_tc",
".",
"SArray",
")",
":",
"dataset",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"self",
".",
"content_feature",
":",
"dataset",
"}",
")",
"if",
"single_style",
":",
"unpack",
"=",
"lambda",
"sf",
":",
"sf",
"[",
"'stylized_'",
"+",
"self",
".",
"content_feature",
"]",
"elif",
"isinstance",
"(",
"dataset",
",",
"_tc",
".",
"Image",
")",
":",
"dataset",
"=",
"_tc",
".",
"SFrame",
"(",
"{",
"self",
".",
"content_feature",
":",
"[",
"dataset",
"]",
"}",
")",
"if",
"single_style",
":",
"unpack",
"=",
"lambda",
"sf",
":",
"sf",
"[",
"'stylized_'",
"+",
"self",
".",
"content_feature",
"]",
"[",
"0",
"]",
"return",
"dataset",
",",
"unpack"
] | Takes input and returns tuple of the input in canonical form (SFrame)
along with an unpack callback function that can be applied to
prediction results to "undo" the canonization. | [
"Takes",
"input",
"and",
"returns",
"tuple",
"of",
"the",
"input",
"in",
"canonical",
"form",
"(",
"SFrame",
")",
"along",
"with",
"an",
"unpack",
"callback",
"function",
"that",
"can",
"be",
"applied",
"to",
"prediction",
"results",
"to",
"undo",
"the",
"canonization",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py#L542-L557 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py | StyleTransfer.stylize | def stylize(self, images, style=None, verbose=True, max_size=800, batch_size = 4):
"""
Stylize an SFrame of Images given a style index or a list of
styles.
Parameters
----------
images : SFrame | Image
A dataset that has the same content image column that was used
during training.
style : int or list, optional
The selected style or list of styles to use on the ``images``. If
`None`, all styles will be applied to each image in ``images``.
verbose : bool, optional
If True, print progress updates.
max_size : int or tuple
Max input image size that will not get resized during stylization.
Images with a side larger than this value, will be scaled down, due
to time and memory constraints. If tuple, interpreted as (max
width, max height). Without resizing, larger input images take more
time to stylize. Resizing can effect the quality of the final
stylized image.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame or SArray or turicreate.Image
If ``style`` is a list, an SFrame is always returned. If ``style``
is a single integer, the output type will match the input type
(Image, SArray, or SFrame).
See Also
--------
create
Examples
--------
>>> image = tc.Image("/path/to/image.jpg")
>>> stylized_images = model.stylize(image, style=[0, 1])
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
+--------+-------+------------------------+
[2 rows x 3 columns]
>>> images = tc.image_analysis.load_images('/path/to/images')
>>> stylized_images = model.stylize(images)
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
| 0 | 2 | Height: 256 Width: 256 |
| 0 | 3 | Height: 256 Width: 256 |
| 1 | 0 | Height: 640 Width: 648 |
| 1 | 1 | Height: 640 Width: 648 |
| 1 | 2 | Height: 640 Width: 648 |
| 1 | 3 | Height: 640 Width: 648 |
+--------+-------+------------------------+
[8 rows x 3 columns]
"""
if(batch_size < 1):
raise _ToolkitError("'batch_size' must be greater than or equal to 1")
from ._sframe_loader import SFrameSTIter as _SFrameSTIter
import mxnet as _mx
from mxnet import gluon as _gluon
from .._mxnet import _mxnet_utils
set_of_all_idx = self._style_indices()
style, single_style = self._style_input_check(style)
if isinstance(max_size, _six.integer_types):
input_shape = (max_size, max_size)
else:
# Outward-facing, we use (width, height), but internally we use
# (height, width)
input_shape = max_size[::-1]
images, unpack = self._canonize_content_input(images, single_style=single_style)
dataset_size = len(images)
output_size = dataset_size * len(style)
batch_size_each = min(batch_size, output_size)
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=batch_size_each)
if num_mxnet_gpus == 0:
# CPU processing prefers native size to prevent stylizing
# unnecessary regions
batch_size_each = 1
loader_type = 'favor-native-size'
else:
# GPU processing prefers batches of same size, using padding
# for smaller images
loader_type = 'pad'
self._model.batch_size = batch_size_each
self._model.hybridize()
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size_each)
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
last_time = 0
if dataset_size == 0:
raise _ToolkitError("SFrame cannot be empty")
content_feature = _tkutl._find_only_image_column(images)
_raise_error_if_not_training_sframe(images, content_feature)
max_h = 0
max_w = 0
oversized_count = 0
for img in images[content_feature]:
if img.height > input_shape[0] or img.width > input_shape[1]:
oversized_count += 1
max_h = max(img.height, max_h)
max_w = max(img.width, max_w)
if input_shape[0] > max_h:
input_shape = (max_h, input_shape[1])
if input_shape[1] > max_w:
input_shape = (input_shape[0], max_w)
# If we find large images, let's switch to sequential iterator
# pre-processing, to prevent memory issues.
sequential = max(max_h, max_w) > 2000
if verbose and output_size != 1:
print('Stylizing {} image(s) using {} style(s)'.format(dataset_size, len(style)))
if oversized_count > 0:
print('Scaling down {} image(s) exceeding {}x{}'.format(oversized_count, input_shape[1], input_shape[0]))
content_images_loader = _SFrameSTIter(images, batch_size,
shuffle=False,
feature_column=content_feature,
input_shape=input_shape,
num_epochs=1,
loader_type=loader_type,
repeat_each_image=len(style),
sequential=sequential)
sb = _tc.SFrameBuilder([int, int, _tc.Image],
column_names=['row_id', 'style', 'stylized_{}'.format(self.content_feature)])
count = 0
for i, batch in enumerate(content_images_loader):
if loader_type == 'favor-native-size':
c_data = [batch.data[0][0].expand_dims(0)]
else:
c_data = _gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
indices_data = _gluon.utils.split_and_load(_mx.nd.array(batch.repeat_indices, dtype=_np.int64),
ctx_list=ctx, batch_axis=0)
outputs = []
for b_img, b_indices in zip(c_data, indices_data):
mx_style = _mx.nd.array(style, dtype=_np.int64, ctx=b_indices.context)
b_batch_styles = mx_style[b_indices]
output = self._model(b_img, b_batch_styles)
outputs.append(output)
image_data = _np.concatenate([
(output.asnumpy().transpose(0, 2, 3, 1) * 255).astype(_np.uint8)
for output in outputs], axis=0)
batch_styles = [style[idx] for idx in batch.repeat_indices]
for b in range(batch_size - (batch.pad or 0)):
image = image_data[b]
# Crop to remove added padding
crop = batch.crop[b]
cropped_image = image[crop[0]:crop[1], crop[2]:crop[3]]
tc_img = _tc.Image(_image_data=cropped_image.tobytes(),
_width=cropped_image.shape[1],
_height=cropped_image.shape[0],
_channels=cropped_image.shape[2],
_format_enum=2,
_image_data_size=cropped_image.size)
sb.append([batch.indices[b], batch_styles[b], tc_img])
count += 1
cur_time = _time.time()
if verbose and output_size != 1 and (cur_time > last_time + 10 or count == output_size):
print('Stylizing {curr_image:{width}d}/{max_n:{width}d}'.
format(curr_image=count, max_n=output_size, width=len(str(output_size))))
last_time = cur_time
return unpack(sb.close()) | python | def stylize(self, images, style=None, verbose=True, max_size=800, batch_size = 4):
"""
Stylize an SFrame of Images given a style index or a list of
styles.
Parameters
----------
images : SFrame | Image
A dataset that has the same content image column that was used
during training.
style : int or list, optional
The selected style or list of styles to use on the ``images``. If
`None`, all styles will be applied to each image in ``images``.
verbose : bool, optional
If True, print progress updates.
max_size : int or tuple
Max input image size that will not get resized during stylization.
Images with a side larger than this value, will be scaled down, due
to time and memory constraints. If tuple, interpreted as (max
width, max height). Without resizing, larger input images take more
time to stylize. Resizing can effect the quality of the final
stylized image.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame or SArray or turicreate.Image
If ``style`` is a list, an SFrame is always returned. If ``style``
is a single integer, the output type will match the input type
(Image, SArray, or SFrame).
See Also
--------
create
Examples
--------
>>> image = tc.Image("/path/to/image.jpg")
>>> stylized_images = model.stylize(image, style=[0, 1])
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
+--------+-------+------------------------+
[2 rows x 3 columns]
>>> images = tc.image_analysis.load_images('/path/to/images')
>>> stylized_images = model.stylize(images)
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
| 0 | 2 | Height: 256 Width: 256 |
| 0 | 3 | Height: 256 Width: 256 |
| 1 | 0 | Height: 640 Width: 648 |
| 1 | 1 | Height: 640 Width: 648 |
| 1 | 2 | Height: 640 Width: 648 |
| 1 | 3 | Height: 640 Width: 648 |
+--------+-------+------------------------+
[8 rows x 3 columns]
"""
if(batch_size < 1):
raise _ToolkitError("'batch_size' must be greater than or equal to 1")
from ._sframe_loader import SFrameSTIter as _SFrameSTIter
import mxnet as _mx
from mxnet import gluon as _gluon
from .._mxnet import _mxnet_utils
set_of_all_idx = self._style_indices()
style, single_style = self._style_input_check(style)
if isinstance(max_size, _six.integer_types):
input_shape = (max_size, max_size)
else:
# Outward-facing, we use (width, height), but internally we use
# (height, width)
input_shape = max_size[::-1]
images, unpack = self._canonize_content_input(images, single_style=single_style)
dataset_size = len(images)
output_size = dataset_size * len(style)
batch_size_each = min(batch_size, output_size)
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=batch_size_each)
if num_mxnet_gpus == 0:
# CPU processing prefers native size to prevent stylizing
# unnecessary regions
batch_size_each = 1
loader_type = 'favor-native-size'
else:
# GPU processing prefers batches of same size, using padding
# for smaller images
loader_type = 'pad'
self._model.batch_size = batch_size_each
self._model.hybridize()
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size_each)
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
last_time = 0
if dataset_size == 0:
raise _ToolkitError("SFrame cannot be empty")
content_feature = _tkutl._find_only_image_column(images)
_raise_error_if_not_training_sframe(images, content_feature)
max_h = 0
max_w = 0
oversized_count = 0
for img in images[content_feature]:
if img.height > input_shape[0] or img.width > input_shape[1]:
oversized_count += 1
max_h = max(img.height, max_h)
max_w = max(img.width, max_w)
if input_shape[0] > max_h:
input_shape = (max_h, input_shape[1])
if input_shape[1] > max_w:
input_shape = (input_shape[0], max_w)
# If we find large images, let's switch to sequential iterator
# pre-processing, to prevent memory issues.
sequential = max(max_h, max_w) > 2000
if verbose and output_size != 1:
print('Stylizing {} image(s) using {} style(s)'.format(dataset_size, len(style)))
if oversized_count > 0:
print('Scaling down {} image(s) exceeding {}x{}'.format(oversized_count, input_shape[1], input_shape[0]))
content_images_loader = _SFrameSTIter(images, batch_size,
shuffle=False,
feature_column=content_feature,
input_shape=input_shape,
num_epochs=1,
loader_type=loader_type,
repeat_each_image=len(style),
sequential=sequential)
sb = _tc.SFrameBuilder([int, int, _tc.Image],
column_names=['row_id', 'style', 'stylized_{}'.format(self.content_feature)])
count = 0
for i, batch in enumerate(content_images_loader):
if loader_type == 'favor-native-size':
c_data = [batch.data[0][0].expand_dims(0)]
else:
c_data = _gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
indices_data = _gluon.utils.split_and_load(_mx.nd.array(batch.repeat_indices, dtype=_np.int64),
ctx_list=ctx, batch_axis=0)
outputs = []
for b_img, b_indices in zip(c_data, indices_data):
mx_style = _mx.nd.array(style, dtype=_np.int64, ctx=b_indices.context)
b_batch_styles = mx_style[b_indices]
output = self._model(b_img, b_batch_styles)
outputs.append(output)
image_data = _np.concatenate([
(output.asnumpy().transpose(0, 2, 3, 1) * 255).astype(_np.uint8)
for output in outputs], axis=0)
batch_styles = [style[idx] for idx in batch.repeat_indices]
for b in range(batch_size - (batch.pad or 0)):
image = image_data[b]
# Crop to remove added padding
crop = batch.crop[b]
cropped_image = image[crop[0]:crop[1], crop[2]:crop[3]]
tc_img = _tc.Image(_image_data=cropped_image.tobytes(),
_width=cropped_image.shape[1],
_height=cropped_image.shape[0],
_channels=cropped_image.shape[2],
_format_enum=2,
_image_data_size=cropped_image.size)
sb.append([batch.indices[b], batch_styles[b], tc_img])
count += 1
cur_time = _time.time()
if verbose and output_size != 1 and (cur_time > last_time + 10 or count == output_size):
print('Stylizing {curr_image:{width}d}/{max_n:{width}d}'.
format(curr_image=count, max_n=output_size, width=len(str(output_size))))
last_time = cur_time
return unpack(sb.close()) | [
"def",
"stylize",
"(",
"self",
",",
"images",
",",
"style",
"=",
"None",
",",
"verbose",
"=",
"True",
",",
"max_size",
"=",
"800",
",",
"batch_size",
"=",
"4",
")",
":",
"if",
"(",
"batch_size",
"<",
"1",
")",
":",
"raise",
"_ToolkitError",
"(",
"\"'batch_size' must be greater than or equal to 1\"",
")",
"from",
".",
"_sframe_loader",
"import",
"SFrameSTIter",
"as",
"_SFrameSTIter",
"import",
"mxnet",
"as",
"_mx",
"from",
"mxnet",
"import",
"gluon",
"as",
"_gluon",
"from",
".",
".",
"_mxnet",
"import",
"_mxnet_utils",
"set_of_all_idx",
"=",
"self",
".",
"_style_indices",
"(",
")",
"style",
",",
"single_style",
"=",
"self",
".",
"_style_input_check",
"(",
"style",
")",
"if",
"isinstance",
"(",
"max_size",
",",
"_six",
".",
"integer_types",
")",
":",
"input_shape",
"=",
"(",
"max_size",
",",
"max_size",
")",
"else",
":",
"# Outward-facing, we use (width, height), but internally we use",
"# (height, width)",
"input_shape",
"=",
"max_size",
"[",
":",
":",
"-",
"1",
"]",
"images",
",",
"unpack",
"=",
"self",
".",
"_canonize_content_input",
"(",
"images",
",",
"single_style",
"=",
"single_style",
")",
"dataset_size",
"=",
"len",
"(",
"images",
")",
"output_size",
"=",
"dataset_size",
"*",
"len",
"(",
"style",
")",
"batch_size_each",
"=",
"min",
"(",
"batch_size",
",",
"output_size",
")",
"num_mxnet_gpus",
"=",
"_mxnet_utils",
".",
"get_num_gpus_in_use",
"(",
"max_devices",
"=",
"batch_size_each",
")",
"if",
"num_mxnet_gpus",
"==",
"0",
":",
"# CPU processing prefers native size to prevent stylizing",
"# unnecessary regions",
"batch_size_each",
"=",
"1",
"loader_type",
"=",
"'favor-native-size'",
"else",
":",
"# GPU processing prefers batches of same size, using padding",
"# for smaller images",
"loader_type",
"=",
"'pad'",
"self",
".",
"_model",
".",
"batch_size",
"=",
"batch_size_each",
"self",
".",
"_model",
".",
"hybridize",
"(",
")",
"ctx",
"=",
"_mxnet_utils",
".",
"get_mxnet_context",
"(",
"max_devices",
"=",
"batch_size_each",
")",
"batch_size",
"=",
"max",
"(",
"num_mxnet_gpus",
",",
"1",
")",
"*",
"batch_size_each",
"last_time",
"=",
"0",
"if",
"dataset_size",
"==",
"0",
":",
"raise",
"_ToolkitError",
"(",
"\"SFrame cannot be empty\"",
")",
"content_feature",
"=",
"_tkutl",
".",
"_find_only_image_column",
"(",
"images",
")",
"_raise_error_if_not_training_sframe",
"(",
"images",
",",
"content_feature",
")",
"max_h",
"=",
"0",
"max_w",
"=",
"0",
"oversized_count",
"=",
"0",
"for",
"img",
"in",
"images",
"[",
"content_feature",
"]",
":",
"if",
"img",
".",
"height",
">",
"input_shape",
"[",
"0",
"]",
"or",
"img",
".",
"width",
">",
"input_shape",
"[",
"1",
"]",
":",
"oversized_count",
"+=",
"1",
"max_h",
"=",
"max",
"(",
"img",
".",
"height",
",",
"max_h",
")",
"max_w",
"=",
"max",
"(",
"img",
".",
"width",
",",
"max_w",
")",
"if",
"input_shape",
"[",
"0",
"]",
">",
"max_h",
":",
"input_shape",
"=",
"(",
"max_h",
",",
"input_shape",
"[",
"1",
"]",
")",
"if",
"input_shape",
"[",
"1",
"]",
">",
"max_w",
":",
"input_shape",
"=",
"(",
"input_shape",
"[",
"0",
"]",
",",
"max_w",
")",
"# If we find large images, let's switch to sequential iterator",
"# pre-processing, to prevent memory issues.",
"sequential",
"=",
"max",
"(",
"max_h",
",",
"max_w",
")",
">",
"2000",
"if",
"verbose",
"and",
"output_size",
"!=",
"1",
":",
"print",
"(",
"'Stylizing {} image(s) using {} style(s)'",
".",
"format",
"(",
"dataset_size",
",",
"len",
"(",
"style",
")",
")",
")",
"if",
"oversized_count",
">",
"0",
":",
"print",
"(",
"'Scaling down {} image(s) exceeding {}x{}'",
".",
"format",
"(",
"oversized_count",
",",
"input_shape",
"[",
"1",
"]",
",",
"input_shape",
"[",
"0",
"]",
")",
")",
"content_images_loader",
"=",
"_SFrameSTIter",
"(",
"images",
",",
"batch_size",
",",
"shuffle",
"=",
"False",
",",
"feature_column",
"=",
"content_feature",
",",
"input_shape",
"=",
"input_shape",
",",
"num_epochs",
"=",
"1",
",",
"loader_type",
"=",
"loader_type",
",",
"repeat_each_image",
"=",
"len",
"(",
"style",
")",
",",
"sequential",
"=",
"sequential",
")",
"sb",
"=",
"_tc",
".",
"SFrameBuilder",
"(",
"[",
"int",
",",
"int",
",",
"_tc",
".",
"Image",
"]",
",",
"column_names",
"=",
"[",
"'row_id'",
",",
"'style'",
",",
"'stylized_{}'",
".",
"format",
"(",
"self",
".",
"content_feature",
")",
"]",
")",
"count",
"=",
"0",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"content_images_loader",
")",
":",
"if",
"loader_type",
"==",
"'favor-native-size'",
":",
"c_data",
"=",
"[",
"batch",
".",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"expand_dims",
"(",
"0",
")",
"]",
"else",
":",
"c_data",
"=",
"_gluon",
".",
"utils",
".",
"split_and_load",
"(",
"batch",
".",
"data",
"[",
"0",
"]",
",",
"ctx_list",
"=",
"ctx",
",",
"batch_axis",
"=",
"0",
")",
"indices_data",
"=",
"_gluon",
".",
"utils",
".",
"split_and_load",
"(",
"_mx",
".",
"nd",
".",
"array",
"(",
"batch",
".",
"repeat_indices",
",",
"dtype",
"=",
"_np",
".",
"int64",
")",
",",
"ctx_list",
"=",
"ctx",
",",
"batch_axis",
"=",
"0",
")",
"outputs",
"=",
"[",
"]",
"for",
"b_img",
",",
"b_indices",
"in",
"zip",
"(",
"c_data",
",",
"indices_data",
")",
":",
"mx_style",
"=",
"_mx",
".",
"nd",
".",
"array",
"(",
"style",
",",
"dtype",
"=",
"_np",
".",
"int64",
",",
"ctx",
"=",
"b_indices",
".",
"context",
")",
"b_batch_styles",
"=",
"mx_style",
"[",
"b_indices",
"]",
"output",
"=",
"self",
".",
"_model",
"(",
"b_img",
",",
"b_batch_styles",
")",
"outputs",
".",
"append",
"(",
"output",
")",
"image_data",
"=",
"_np",
".",
"concatenate",
"(",
"[",
"(",
"output",
".",
"asnumpy",
"(",
")",
".",
"transpose",
"(",
"0",
",",
"2",
",",
"3",
",",
"1",
")",
"*",
"255",
")",
".",
"astype",
"(",
"_np",
".",
"uint8",
")",
"for",
"output",
"in",
"outputs",
"]",
",",
"axis",
"=",
"0",
")",
"batch_styles",
"=",
"[",
"style",
"[",
"idx",
"]",
"for",
"idx",
"in",
"batch",
".",
"repeat_indices",
"]",
"for",
"b",
"in",
"range",
"(",
"batch_size",
"-",
"(",
"batch",
".",
"pad",
"or",
"0",
")",
")",
":",
"image",
"=",
"image_data",
"[",
"b",
"]",
"# Crop to remove added padding",
"crop",
"=",
"batch",
".",
"crop",
"[",
"b",
"]",
"cropped_image",
"=",
"image",
"[",
"crop",
"[",
"0",
"]",
":",
"crop",
"[",
"1",
"]",
",",
"crop",
"[",
"2",
"]",
":",
"crop",
"[",
"3",
"]",
"]",
"tc_img",
"=",
"_tc",
".",
"Image",
"(",
"_image_data",
"=",
"cropped_image",
".",
"tobytes",
"(",
")",
",",
"_width",
"=",
"cropped_image",
".",
"shape",
"[",
"1",
"]",
",",
"_height",
"=",
"cropped_image",
".",
"shape",
"[",
"0",
"]",
",",
"_channels",
"=",
"cropped_image",
".",
"shape",
"[",
"2",
"]",
",",
"_format_enum",
"=",
"2",
",",
"_image_data_size",
"=",
"cropped_image",
".",
"size",
")",
"sb",
".",
"append",
"(",
"[",
"batch",
".",
"indices",
"[",
"b",
"]",
",",
"batch_styles",
"[",
"b",
"]",
",",
"tc_img",
"]",
")",
"count",
"+=",
"1",
"cur_time",
"=",
"_time",
".",
"time",
"(",
")",
"if",
"verbose",
"and",
"output_size",
"!=",
"1",
"and",
"(",
"cur_time",
">",
"last_time",
"+",
"10",
"or",
"count",
"==",
"output_size",
")",
":",
"print",
"(",
"'Stylizing {curr_image:{width}d}/{max_n:{width}d}'",
".",
"format",
"(",
"curr_image",
"=",
"count",
",",
"max_n",
"=",
"output_size",
",",
"width",
"=",
"len",
"(",
"str",
"(",
"output_size",
")",
")",
")",
")",
"last_time",
"=",
"cur_time",
"return",
"unpack",
"(",
"sb",
".",
"close",
"(",
")",
")"
] | Stylize an SFrame of Images given a style index or a list of
styles.
Parameters
----------
images : SFrame | Image
A dataset that has the same content image column that was used
during training.
style : int or list, optional
The selected style or list of styles to use on the ``images``. If
`None`, all styles will be applied to each image in ``images``.
verbose : bool, optional
If True, print progress updates.
max_size : int or tuple
Max input image size that will not get resized during stylization.
Images with a side larger than this value, will be scaled down, due
to time and memory constraints. If tuple, interpreted as (max
width, max height). Without resizing, larger input images take more
time to stylize. Resizing can effect the quality of the final
stylized image.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame or SArray or turicreate.Image
If ``style`` is a list, an SFrame is always returned. If ``style``
is a single integer, the output type will match the input type
(Image, SArray, or SFrame).
See Also
--------
create
Examples
--------
>>> image = tc.Image("/path/to/image.jpg")
>>> stylized_images = model.stylize(image, style=[0, 1])
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
+--------+-------+------------------------+
[2 rows x 3 columns]
>>> images = tc.image_analysis.load_images('/path/to/images')
>>> stylized_images = model.stylize(images)
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
| 0 | 2 | Height: 256 Width: 256 |
| 0 | 3 | Height: 256 Width: 256 |
| 1 | 0 | Height: 640 Width: 648 |
| 1 | 1 | Height: 640 Width: 648 |
| 1 | 2 | Height: 640 Width: 648 |
| 1 | 3 | Height: 640 Width: 648 |
+--------+-------+------------------------+
[8 rows x 3 columns] | [
"Stylize",
"an",
"SFrame",
"of",
"Images",
"given",
"a",
"style",
"index",
"or",
"a",
"list",
"of",
"styles",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py#L559-L754 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py | StyleTransfer.export_coreml | def export_coreml(self, path, image_shape=(256, 256),
include_flexible_shape=True):
"""
Save the model in Core ML format. The Core ML model takes an image of
fixed size, and a style index inputs and produces an output
of an image of fixed size
Parameters
----------
path : string
A string to the path for saving the Core ML model.
image_shape: tuple
A tuple (defaults to (256, 256)) will bind the coreml model to a fixed shape.
include_flexible_shape: bool
A boolean value indicating whether flexible_shape should be included or not.
See Also
--------
save
Examples
--------
>>> model.export_coreml('StyleTransfer.mlmodel')
"""
import mxnet as _mx
from .._mxnet._mxnet_to_coreml import _mxnet_converter
import coremltools
transformer = self._model
index = _mx.sym.Variable("index", shape=(1,), dtype=_np.int32)
# append batch size and channels
image_shape = (1, 3) + image_shape
c_image = _mx.sym.Variable(self.content_feature, shape=image_shape,
dtype=_np.float32)
# signal that we want the transformer to prepare for coreml export
# using a zero batch size
transformer.batch_size = 0
transformer.scale255 = True
sym_out = transformer(c_image, index)
mod = _mx.mod.Module(symbol=sym_out, data_names=[self.content_feature, "index"],
label_names=None)
mod.bind(data_shapes=zip([self.content_feature, "index"], [image_shape, (1,)]), for_training=False,
inputs_need_grad=False)
gluon_weights = transformer.collect_params()
gluon_layers = []
for layer in transformer.collect_params()._params:
gluon_layers.append(layer)
sym_layers = mod._param_names
sym_weight_dict = {}
for gluon_layer, sym_layer in zip(gluon_layers, sym_layers):
sym_weight_dict[sym_layer] = gluon_weights[gluon_layer]._data[0]
mod.set_params(sym_weight_dict, sym_weight_dict)
index_dim = (1, self.num_styles)
coreml_model = _mxnet_converter.convert(mod, input_shape=[(self.content_feature, image_shape), ('index', index_dim)],
mode=None, preprocessor_args=None, builder=None, verbose=False)
transformer.scale255 = False
spec = coreml_model.get_spec()
image_input = spec.description.input[0]
image_output = spec.description.output[0]
input_array_shape = tuple(image_input.type.multiArrayType.shape)
output_array_shape = tuple(image_output.type.multiArrayType.shape)
self._export_coreml_image(image_input, input_array_shape)
self._export_coreml_image(image_output, output_array_shape)
stylized_image = 'stylized%s' % self.content_feature.capitalize()
coremltools.utils.rename_feature(spec,
'transformer__mulscalar0_output', stylized_image, True, True)
if include_flexible_shape:
# Support flexible shape
flexible_shape_utils = _mxnet_converter._coremltools.models.neural_network.flexible_shape_utils
img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange()
img_size_ranges.add_height_range((64, -1))
img_size_ranges.add_width_range((64, -1))
flexible_shape_utils.update_image_size_range(spec, feature_name=self.content_feature, size_range=img_size_ranges)
flexible_shape_utils.update_image_size_range(spec, feature_name=stylized_image, size_range=img_size_ranges)
model_type = 'style transfer (%s)' % self.model
spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(
model_type)
spec.description.input[0].shortDescription = 'Input image'
spec.description.input[1].shortDescription = u'Style index array (set index I to 1.0 to enable Ith style)'
spec.description.output[0].shortDescription = 'Stylized image'
user_defined_metadata = _coreml_utils._get_model_metadata(
self.__class__.__name__, {
'model': self.model,
'num_styles': str(self.num_styles),
'content_feature': self.content_feature,
'style_feature': self.style_feature,
'max_iterations': str(self.max_iterations),
'training_iterations': str(self.training_iterations),
}, version=StyleTransfer._PYTHON_STYLE_TRANSFER_VERSION)
spec.description.metadata.userDefined.update(user_defined_metadata)
from coremltools.models.utils import save_spec as _save_spec
_save_spec(spec, path) | python | def export_coreml(self, path, image_shape=(256, 256),
include_flexible_shape=True):
"""
Save the model in Core ML format. The Core ML model takes an image of
fixed size, and a style index inputs and produces an output
of an image of fixed size
Parameters
----------
path : string
A string to the path for saving the Core ML model.
image_shape: tuple
A tuple (defaults to (256, 256)) will bind the coreml model to a fixed shape.
include_flexible_shape: bool
A boolean value indicating whether flexible_shape should be included or not.
See Also
--------
save
Examples
--------
>>> model.export_coreml('StyleTransfer.mlmodel')
"""
import mxnet as _mx
from .._mxnet._mxnet_to_coreml import _mxnet_converter
import coremltools
transformer = self._model
index = _mx.sym.Variable("index", shape=(1,), dtype=_np.int32)
# append batch size and channels
image_shape = (1, 3) + image_shape
c_image = _mx.sym.Variable(self.content_feature, shape=image_shape,
dtype=_np.float32)
# signal that we want the transformer to prepare for coreml export
# using a zero batch size
transformer.batch_size = 0
transformer.scale255 = True
sym_out = transformer(c_image, index)
mod = _mx.mod.Module(symbol=sym_out, data_names=[self.content_feature, "index"],
label_names=None)
mod.bind(data_shapes=zip([self.content_feature, "index"], [image_shape, (1,)]), for_training=False,
inputs_need_grad=False)
gluon_weights = transformer.collect_params()
gluon_layers = []
for layer in transformer.collect_params()._params:
gluon_layers.append(layer)
sym_layers = mod._param_names
sym_weight_dict = {}
for gluon_layer, sym_layer in zip(gluon_layers, sym_layers):
sym_weight_dict[sym_layer] = gluon_weights[gluon_layer]._data[0]
mod.set_params(sym_weight_dict, sym_weight_dict)
index_dim = (1, self.num_styles)
coreml_model = _mxnet_converter.convert(mod, input_shape=[(self.content_feature, image_shape), ('index', index_dim)],
mode=None, preprocessor_args=None, builder=None, verbose=False)
transformer.scale255 = False
spec = coreml_model.get_spec()
image_input = spec.description.input[0]
image_output = spec.description.output[0]
input_array_shape = tuple(image_input.type.multiArrayType.shape)
output_array_shape = tuple(image_output.type.multiArrayType.shape)
self._export_coreml_image(image_input, input_array_shape)
self._export_coreml_image(image_output, output_array_shape)
stylized_image = 'stylized%s' % self.content_feature.capitalize()
coremltools.utils.rename_feature(spec,
'transformer__mulscalar0_output', stylized_image, True, True)
if include_flexible_shape:
# Support flexible shape
flexible_shape_utils = _mxnet_converter._coremltools.models.neural_network.flexible_shape_utils
img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange()
img_size_ranges.add_height_range((64, -1))
img_size_ranges.add_width_range((64, -1))
flexible_shape_utils.update_image_size_range(spec, feature_name=self.content_feature, size_range=img_size_ranges)
flexible_shape_utils.update_image_size_range(spec, feature_name=stylized_image, size_range=img_size_ranges)
model_type = 'style transfer (%s)' % self.model
spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(
model_type)
spec.description.input[0].shortDescription = 'Input image'
spec.description.input[1].shortDescription = u'Style index array (set index I to 1.0 to enable Ith style)'
spec.description.output[0].shortDescription = 'Stylized image'
user_defined_metadata = _coreml_utils._get_model_metadata(
self.__class__.__name__, {
'model': self.model,
'num_styles': str(self.num_styles),
'content_feature': self.content_feature,
'style_feature': self.style_feature,
'max_iterations': str(self.max_iterations),
'training_iterations': str(self.training_iterations),
}, version=StyleTransfer._PYTHON_STYLE_TRANSFER_VERSION)
spec.description.metadata.userDefined.update(user_defined_metadata)
from coremltools.models.utils import save_spec as _save_spec
_save_spec(spec, path) | [
"def",
"export_coreml",
"(",
"self",
",",
"path",
",",
"image_shape",
"=",
"(",
"256",
",",
"256",
")",
",",
"include_flexible_shape",
"=",
"True",
")",
":",
"import",
"mxnet",
"as",
"_mx",
"from",
".",
".",
"_mxnet",
".",
"_mxnet_to_coreml",
"import",
"_mxnet_converter",
"import",
"coremltools",
"transformer",
"=",
"self",
".",
"_model",
"index",
"=",
"_mx",
".",
"sym",
".",
"Variable",
"(",
"\"index\"",
",",
"shape",
"=",
"(",
"1",
",",
")",
",",
"dtype",
"=",
"_np",
".",
"int32",
")",
"# append batch size and channels",
"image_shape",
"=",
"(",
"1",
",",
"3",
")",
"+",
"image_shape",
"c_image",
"=",
"_mx",
".",
"sym",
".",
"Variable",
"(",
"self",
".",
"content_feature",
",",
"shape",
"=",
"image_shape",
",",
"dtype",
"=",
"_np",
".",
"float32",
")",
"# signal that we want the transformer to prepare for coreml export",
"# using a zero batch size",
"transformer",
".",
"batch_size",
"=",
"0",
"transformer",
".",
"scale255",
"=",
"True",
"sym_out",
"=",
"transformer",
"(",
"c_image",
",",
"index",
")",
"mod",
"=",
"_mx",
".",
"mod",
".",
"Module",
"(",
"symbol",
"=",
"sym_out",
",",
"data_names",
"=",
"[",
"self",
".",
"content_feature",
",",
"\"index\"",
"]",
",",
"label_names",
"=",
"None",
")",
"mod",
".",
"bind",
"(",
"data_shapes",
"=",
"zip",
"(",
"[",
"self",
".",
"content_feature",
",",
"\"index\"",
"]",
",",
"[",
"image_shape",
",",
"(",
"1",
",",
")",
"]",
")",
",",
"for_training",
"=",
"False",
",",
"inputs_need_grad",
"=",
"False",
")",
"gluon_weights",
"=",
"transformer",
".",
"collect_params",
"(",
")",
"gluon_layers",
"=",
"[",
"]",
"for",
"layer",
"in",
"transformer",
".",
"collect_params",
"(",
")",
".",
"_params",
":",
"gluon_layers",
".",
"append",
"(",
"layer",
")",
"sym_layers",
"=",
"mod",
".",
"_param_names",
"sym_weight_dict",
"=",
"{",
"}",
"for",
"gluon_layer",
",",
"sym_layer",
"in",
"zip",
"(",
"gluon_layers",
",",
"sym_layers",
")",
":",
"sym_weight_dict",
"[",
"sym_layer",
"]",
"=",
"gluon_weights",
"[",
"gluon_layer",
"]",
".",
"_data",
"[",
"0",
"]",
"mod",
".",
"set_params",
"(",
"sym_weight_dict",
",",
"sym_weight_dict",
")",
"index_dim",
"=",
"(",
"1",
",",
"self",
".",
"num_styles",
")",
"coreml_model",
"=",
"_mxnet_converter",
".",
"convert",
"(",
"mod",
",",
"input_shape",
"=",
"[",
"(",
"self",
".",
"content_feature",
",",
"image_shape",
")",
",",
"(",
"'index'",
",",
"index_dim",
")",
"]",
",",
"mode",
"=",
"None",
",",
"preprocessor_args",
"=",
"None",
",",
"builder",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
"transformer",
".",
"scale255",
"=",
"False",
"spec",
"=",
"coreml_model",
".",
"get_spec",
"(",
")",
"image_input",
"=",
"spec",
".",
"description",
".",
"input",
"[",
"0",
"]",
"image_output",
"=",
"spec",
".",
"description",
".",
"output",
"[",
"0",
"]",
"input_array_shape",
"=",
"tuple",
"(",
"image_input",
".",
"type",
".",
"multiArrayType",
".",
"shape",
")",
"output_array_shape",
"=",
"tuple",
"(",
"image_output",
".",
"type",
".",
"multiArrayType",
".",
"shape",
")",
"self",
".",
"_export_coreml_image",
"(",
"image_input",
",",
"input_array_shape",
")",
"self",
".",
"_export_coreml_image",
"(",
"image_output",
",",
"output_array_shape",
")",
"stylized_image",
"=",
"'stylized%s'",
"%",
"self",
".",
"content_feature",
".",
"capitalize",
"(",
")",
"coremltools",
".",
"utils",
".",
"rename_feature",
"(",
"spec",
",",
"'transformer__mulscalar0_output'",
",",
"stylized_image",
",",
"True",
",",
"True",
")",
"if",
"include_flexible_shape",
":",
"# Support flexible shape",
"flexible_shape_utils",
"=",
"_mxnet_converter",
".",
"_coremltools",
".",
"models",
".",
"neural_network",
".",
"flexible_shape_utils",
"img_size_ranges",
"=",
"flexible_shape_utils",
".",
"NeuralNetworkImageSizeRange",
"(",
")",
"img_size_ranges",
".",
"add_height_range",
"(",
"(",
"64",
",",
"-",
"1",
")",
")",
"img_size_ranges",
".",
"add_width_range",
"(",
"(",
"64",
",",
"-",
"1",
")",
")",
"flexible_shape_utils",
".",
"update_image_size_range",
"(",
"spec",
",",
"feature_name",
"=",
"self",
".",
"content_feature",
",",
"size_range",
"=",
"img_size_ranges",
")",
"flexible_shape_utils",
".",
"update_image_size_range",
"(",
"spec",
",",
"feature_name",
"=",
"stylized_image",
",",
"size_range",
"=",
"img_size_ranges",
")",
"model_type",
"=",
"'style transfer (%s)'",
"%",
"self",
".",
"model",
"spec",
".",
"description",
".",
"metadata",
".",
"shortDescription",
"=",
"_coreml_utils",
".",
"_mlmodel_short_description",
"(",
"model_type",
")",
"spec",
".",
"description",
".",
"input",
"[",
"0",
"]",
".",
"shortDescription",
"=",
"'Input image'",
"spec",
".",
"description",
".",
"input",
"[",
"1",
"]",
".",
"shortDescription",
"=",
"u'Style index array (set index I to 1.0 to enable Ith style)'",
"spec",
".",
"description",
".",
"output",
"[",
"0",
"]",
".",
"shortDescription",
"=",
"'Stylized image'",
"user_defined_metadata",
"=",
"_coreml_utils",
".",
"_get_model_metadata",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"{",
"'model'",
":",
"self",
".",
"model",
",",
"'num_styles'",
":",
"str",
"(",
"self",
".",
"num_styles",
")",
",",
"'content_feature'",
":",
"self",
".",
"content_feature",
",",
"'style_feature'",
":",
"self",
".",
"style_feature",
",",
"'max_iterations'",
":",
"str",
"(",
"self",
".",
"max_iterations",
")",
",",
"'training_iterations'",
":",
"str",
"(",
"self",
".",
"training_iterations",
")",
",",
"}",
",",
"version",
"=",
"StyleTransfer",
".",
"_PYTHON_STYLE_TRANSFER_VERSION",
")",
"spec",
".",
"description",
".",
"metadata",
".",
"userDefined",
".",
"update",
"(",
"user_defined_metadata",
")",
"from",
"coremltools",
".",
"models",
".",
"utils",
"import",
"save_spec",
"as",
"_save_spec",
"_save_spec",
"(",
"spec",
",",
"path",
")"
] | Save the model in Core ML format. The Core ML model takes an image of
fixed size, and a style index inputs and produces an output
of an image of fixed size
Parameters
----------
path : string
A string to the path for saving the Core ML model.
image_shape: tuple
A tuple (defaults to (256, 256)) will bind the coreml model to a fixed shape.
include_flexible_shape: bool
A boolean value indicating whether flexible_shape should be included or not.
See Also
--------
save
Examples
--------
>>> model.export_coreml('StyleTransfer.mlmodel') | [
"Save",
"the",
"model",
"in",
"Core",
"ML",
"format",
".",
"The",
"Core",
"ML",
"model",
"takes",
"an",
"image",
"of",
"fixed",
"size",
"and",
"a",
"style",
"index",
"inputs",
"and",
"produces",
"an",
"output",
"of",
"an",
"image",
"of",
"fixed",
"size"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py#L770-L874 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py | StyleTransfer.get_styles | def get_styles(self, style=None):
"""
Returns SFrame of style images used for training the model
Parameters
----------
style: int or list, optional
The selected style or list of styles to return. If `None`, all
styles will be returned
See Also
--------
stylize
Examples
--------
>>> model.get_styles()
Columns:
style int
image Image
Rows: 4
Data:
+-------+--------------------------+
| style | image |
+-------+--------------------------+
| 0 | Height: 642 Width: 642 |
| 1 | Height: 642 Width: 642 |
| 2 | Height: 642 Width: 642 |
| 3 | Height: 642 Width: 642 |
+-------+--------------------------+
"""
style, _ = self._style_input_check(style)
return self.styles.filter_by(style, self._index_column) | python | def get_styles(self, style=None):
"""
Returns SFrame of style images used for training the model
Parameters
----------
style: int or list, optional
The selected style or list of styles to return. If `None`, all
styles will be returned
See Also
--------
stylize
Examples
--------
>>> model.get_styles()
Columns:
style int
image Image
Rows: 4
Data:
+-------+--------------------------+
| style | image |
+-------+--------------------------+
| 0 | Height: 642 Width: 642 |
| 1 | Height: 642 Width: 642 |
| 2 | Height: 642 Width: 642 |
| 3 | Height: 642 Width: 642 |
+-------+--------------------------+
"""
style, _ = self._style_input_check(style)
return self.styles.filter_by(style, self._index_column) | [
"def",
"get_styles",
"(",
"self",
",",
"style",
"=",
"None",
")",
":",
"style",
",",
"_",
"=",
"self",
".",
"_style_input_check",
"(",
"style",
")",
"return",
"self",
".",
"styles",
".",
"filter_by",
"(",
"style",
",",
"self",
".",
"_index_column",
")"
] | Returns SFrame of style images used for training the model
Parameters
----------
style: int or list, optional
The selected style or list of styles to return. If `None`, all
styles will be returned
See Also
--------
stylize
Examples
--------
>>> model.get_styles()
Columns:
style int
image Image
Rows: 4
Data:
+-------+--------------------------+
| style | image |
+-------+--------------------------+
| 0 | Height: 642 Width: 642 |
| 1 | Height: 642 Width: 642 |
| 2 | Height: 642 Width: 642 |
| 3 | Height: 642 Width: 642 |
+-------+--------------------------+ | [
"Returns",
"SFrame",
"of",
"style",
"images",
"used",
"for",
"training",
"the",
"model"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/style_transfer/style_transfer.py#L876-L911 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_mxnet/_mxnet_to_coreml/_mxnet_converter.py | convert | def convert(model, input_shape, class_labels=None, mode=None,
preprocessor_args=None, builder=None, verbose=True):
"""Convert an MXNet model to the protobuf spec.
Parameters
----------
model: MXNet model
A trained MXNet neural network model.
input_shape: list of tuples
A list of (name, shape) tuples, defining the input names and their
shapes. The list also serves to define the desired order of the inputs.
class_labels: A string or list of strings.
As a string it represents the name of the file which contains the classification labels (one per line).
As a list of strings it represents a list of categories that map the index of the output of a neural network to labels in a classifier.
mode: str ('classifier', 'regressor' or None)
Mode of the converted coreml model.
When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed.
When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed.
builder: `NeuralNetworkBuilder`
If `None`, a builder will be created internally. This also means the
builder will not be finalized and returned as an `MLModel`.
Post-processing arguments will be ignored and class labels will not be
integrated. This option is meant for advanced users.
verbose: bool
Print exported layers.
**kwargs :
Provide keyword arguments for:
- input shapes. Supplied as a dictionary object with keyword "input_shape".
- pre-processing arguments: Supplied as a dictionary object with keyword "preprocessor_args". The parameters in the dictionary
tell the converted coreml model how to pre-process any input before an inference is run on it.
For the list of pre-processing arguments see
http://pythonhosted.org/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters
Returns
-------
model: A coreml model.
"""
if not isinstance(input_shape, list):
raise TypeError("Must provide a list for input shape. e.g input_shape=[('data', (3,224,224))]")
def remove_batch(dim):
return dim[1:]
input_names, input_dims = zip(*input_shape)
input_dims = list(map(remove_batch, input_dims))
net = model.symbol
# Infer shapes and store in a dictionary
shapes = net.infer_shape(**dict(input_shape))
arg_names = net.list_arguments()
output_names = net.list_outputs()
aux_names = net.list_auxiliary_states()
shape_dict = {}
for idx, op in enumerate(arg_names):
shape_dict[op] = shapes[0][idx]
for idx, op in enumerate(output_names):
shape_dict[op] = shapes[1][idx]
for idx, op in enumerate(aux_names):
shape_dict[op] = shapes[2][idx]
# Get the inputs and outputs
output_dims = shapes[1]
if mode is None:
output_dims = list(map(remove_batch, output_dims))
input_types = [_datatypes.Array(*dim) for dim in input_dims]
output_types = [_datatypes.Array(*dim) for dim in output_dims]
# Make the builder
input_features = list(zip(input_names, input_types))
output_features = list(zip(output_names, output_types))
finalize = builder is None
if builder is None:
builder = _neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
# Get out the layers
net = _json.loads(net.tojson())
nodes = net['nodes']
for i, node in enumerate(nodes):
node['id'] = i
if node['name'] in shape_dict:
node['shape'] = shape_dict[node['name']]
node['outputs'] = []
if 'inputs' in node:
for ip in node['inputs']:
nodes[ip[0]]['outputs'].append([i, 0])
else:
node['inputs'] = []
# Mark the head nodes
for head in net['heads']:
head_id = head[0]
head_node = nodes[head_id]
head_node['outputs'] = [head]
head_node['name'] += "_output"
head_node['shape'] = shape_dict[head_node['name']]
# For skipped layers, make sure nodes are modified
for node in nodes:
op = node['op']
inputs = node['inputs']
outputs = node['outputs']
if op in _MXNET_SKIP_LAYERS:
nodes[inputs[0][0]]['outputs'][0] = outputs[0]
nodes[outputs[0][0]]['inputs'][0] = inputs[0]
# Find the input and output names for this node
for idx, node in enumerate(nodes):
op = node['op']
if op == 'null' or op in _MXNET_SKIP_LAYERS:
continue
name = node['name']
if verbose:
print("%d : %s, %s" % (idx, name, op))
converter_func = _get_layer_converter_fn(op)
converter_func(net, node, model, builder)
# Only finalize builder if it was created internally. Otherwise, leave it
# up to the user.
if finalize:
# Set the right inputs and outputs
_set_input_output_layers(builder, input_names, output_names)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
if preprocessor_args is not None:
builder.set_pre_processing_parameters(**preprocessor_args)
if class_labels is not None:
if type(class_labels) is str:
labels = [l.strip() for l in open(class_labels).readlines()]
elif type(class_labels) is list:
labels = class_labels
else:
raise TypeError("synset variable of unknown type. Type found: %s. Expected either string or list of strings." % type(class_labels))
builder.set_class_labels(class_labels = labels)
# Return the model
return _coremltools.models.MLModel(builder.spec) | python | def convert(model, input_shape, class_labels=None, mode=None,
preprocessor_args=None, builder=None, verbose=True):
"""Convert an MXNet model to the protobuf spec.
Parameters
----------
model: MXNet model
A trained MXNet neural network model.
input_shape: list of tuples
A list of (name, shape) tuples, defining the input names and their
shapes. The list also serves to define the desired order of the inputs.
class_labels: A string or list of strings.
As a string it represents the name of the file which contains the classification labels (one per line).
As a list of strings it represents a list of categories that map the index of the output of a neural network to labels in a classifier.
mode: str ('classifier', 'regressor' or None)
Mode of the converted coreml model.
When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed.
When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed.
builder: `NeuralNetworkBuilder`
If `None`, a builder will be created internally. This also means the
builder will not be finalized and returned as an `MLModel`.
Post-processing arguments will be ignored and class labels will not be
integrated. This option is meant for advanced users.
verbose: bool
Print exported layers.
**kwargs :
Provide keyword arguments for:
- input shapes. Supplied as a dictionary object with keyword "input_shape".
- pre-processing arguments: Supplied as a dictionary object with keyword "preprocessor_args". The parameters in the dictionary
tell the converted coreml model how to pre-process any input before an inference is run on it.
For the list of pre-processing arguments see
http://pythonhosted.org/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters
Returns
-------
model: A coreml model.
"""
if not isinstance(input_shape, list):
raise TypeError("Must provide a list for input shape. e.g input_shape=[('data', (3,224,224))]")
def remove_batch(dim):
return dim[1:]
input_names, input_dims = zip(*input_shape)
input_dims = list(map(remove_batch, input_dims))
net = model.symbol
# Infer shapes and store in a dictionary
shapes = net.infer_shape(**dict(input_shape))
arg_names = net.list_arguments()
output_names = net.list_outputs()
aux_names = net.list_auxiliary_states()
shape_dict = {}
for idx, op in enumerate(arg_names):
shape_dict[op] = shapes[0][idx]
for idx, op in enumerate(output_names):
shape_dict[op] = shapes[1][idx]
for idx, op in enumerate(aux_names):
shape_dict[op] = shapes[2][idx]
# Get the inputs and outputs
output_dims = shapes[1]
if mode is None:
output_dims = list(map(remove_batch, output_dims))
input_types = [_datatypes.Array(*dim) for dim in input_dims]
output_types = [_datatypes.Array(*dim) for dim in output_dims]
# Make the builder
input_features = list(zip(input_names, input_types))
output_features = list(zip(output_names, output_types))
finalize = builder is None
if builder is None:
builder = _neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
# Get out the layers
net = _json.loads(net.tojson())
nodes = net['nodes']
for i, node in enumerate(nodes):
node['id'] = i
if node['name'] in shape_dict:
node['shape'] = shape_dict[node['name']]
node['outputs'] = []
if 'inputs' in node:
for ip in node['inputs']:
nodes[ip[0]]['outputs'].append([i, 0])
else:
node['inputs'] = []
# Mark the head nodes
for head in net['heads']:
head_id = head[0]
head_node = nodes[head_id]
head_node['outputs'] = [head]
head_node['name'] += "_output"
head_node['shape'] = shape_dict[head_node['name']]
# For skipped layers, make sure nodes are modified
for node in nodes:
op = node['op']
inputs = node['inputs']
outputs = node['outputs']
if op in _MXNET_SKIP_LAYERS:
nodes[inputs[0][0]]['outputs'][0] = outputs[0]
nodes[outputs[0][0]]['inputs'][0] = inputs[0]
# Find the input and output names for this node
for idx, node in enumerate(nodes):
op = node['op']
if op == 'null' or op in _MXNET_SKIP_LAYERS:
continue
name = node['name']
if verbose:
print("%d : %s, %s" % (idx, name, op))
converter_func = _get_layer_converter_fn(op)
converter_func(net, node, model, builder)
# Only finalize builder if it was created internally. Otherwise, leave it
# up to the user.
if finalize:
# Set the right inputs and outputs
_set_input_output_layers(builder, input_names, output_names)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
if preprocessor_args is not None:
builder.set_pre_processing_parameters(**preprocessor_args)
if class_labels is not None:
if type(class_labels) is str:
labels = [l.strip() for l in open(class_labels).readlines()]
elif type(class_labels) is list:
labels = class_labels
else:
raise TypeError("synset variable of unknown type. Type found: %s. Expected either string or list of strings." % type(class_labels))
builder.set_class_labels(class_labels = labels)
# Return the model
return _coremltools.models.MLModel(builder.spec) | [
"def",
"convert",
"(",
"model",
",",
"input_shape",
",",
"class_labels",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"preprocessor_args",
"=",
"None",
",",
"builder",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"input_shape",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"Must provide a list for input shape. e.g input_shape=[('data', (3,224,224))]\"",
")",
"def",
"remove_batch",
"(",
"dim",
")",
":",
"return",
"dim",
"[",
"1",
":",
"]",
"input_names",
",",
"input_dims",
"=",
"zip",
"(",
"*",
"input_shape",
")",
"input_dims",
"=",
"list",
"(",
"map",
"(",
"remove_batch",
",",
"input_dims",
")",
")",
"net",
"=",
"model",
".",
"symbol",
"# Infer shapes and store in a dictionary",
"shapes",
"=",
"net",
".",
"infer_shape",
"(",
"*",
"*",
"dict",
"(",
"input_shape",
")",
")",
"arg_names",
"=",
"net",
".",
"list_arguments",
"(",
")",
"output_names",
"=",
"net",
".",
"list_outputs",
"(",
")",
"aux_names",
"=",
"net",
".",
"list_auxiliary_states",
"(",
")",
"shape_dict",
"=",
"{",
"}",
"for",
"idx",
",",
"op",
"in",
"enumerate",
"(",
"arg_names",
")",
":",
"shape_dict",
"[",
"op",
"]",
"=",
"shapes",
"[",
"0",
"]",
"[",
"idx",
"]",
"for",
"idx",
",",
"op",
"in",
"enumerate",
"(",
"output_names",
")",
":",
"shape_dict",
"[",
"op",
"]",
"=",
"shapes",
"[",
"1",
"]",
"[",
"idx",
"]",
"for",
"idx",
",",
"op",
"in",
"enumerate",
"(",
"aux_names",
")",
":",
"shape_dict",
"[",
"op",
"]",
"=",
"shapes",
"[",
"2",
"]",
"[",
"idx",
"]",
"# Get the inputs and outputs",
"output_dims",
"=",
"shapes",
"[",
"1",
"]",
"if",
"mode",
"is",
"None",
":",
"output_dims",
"=",
"list",
"(",
"map",
"(",
"remove_batch",
",",
"output_dims",
")",
")",
"input_types",
"=",
"[",
"_datatypes",
".",
"Array",
"(",
"*",
"dim",
")",
"for",
"dim",
"in",
"input_dims",
"]",
"output_types",
"=",
"[",
"_datatypes",
".",
"Array",
"(",
"*",
"dim",
")",
"for",
"dim",
"in",
"output_dims",
"]",
"# Make the builder",
"input_features",
"=",
"list",
"(",
"zip",
"(",
"input_names",
",",
"input_types",
")",
")",
"output_features",
"=",
"list",
"(",
"zip",
"(",
"output_names",
",",
"output_types",
")",
")",
"finalize",
"=",
"builder",
"is",
"None",
"if",
"builder",
"is",
"None",
":",
"builder",
"=",
"_neural_network",
".",
"NeuralNetworkBuilder",
"(",
"input_features",
",",
"output_features",
",",
"mode",
")",
"# Get out the layers",
"net",
"=",
"_json",
".",
"loads",
"(",
"net",
".",
"tojson",
"(",
")",
")",
"nodes",
"=",
"net",
"[",
"'nodes'",
"]",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"nodes",
")",
":",
"node",
"[",
"'id'",
"]",
"=",
"i",
"if",
"node",
"[",
"'name'",
"]",
"in",
"shape_dict",
":",
"node",
"[",
"'shape'",
"]",
"=",
"shape_dict",
"[",
"node",
"[",
"'name'",
"]",
"]",
"node",
"[",
"'outputs'",
"]",
"=",
"[",
"]",
"if",
"'inputs'",
"in",
"node",
":",
"for",
"ip",
"in",
"node",
"[",
"'inputs'",
"]",
":",
"nodes",
"[",
"ip",
"[",
"0",
"]",
"]",
"[",
"'outputs'",
"]",
".",
"append",
"(",
"[",
"i",
",",
"0",
"]",
")",
"else",
":",
"node",
"[",
"'inputs'",
"]",
"=",
"[",
"]",
"# Mark the head nodes",
"for",
"head",
"in",
"net",
"[",
"'heads'",
"]",
":",
"head_id",
"=",
"head",
"[",
"0",
"]",
"head_node",
"=",
"nodes",
"[",
"head_id",
"]",
"head_node",
"[",
"'outputs'",
"]",
"=",
"[",
"head",
"]",
"head_node",
"[",
"'name'",
"]",
"+=",
"\"_output\"",
"head_node",
"[",
"'shape'",
"]",
"=",
"shape_dict",
"[",
"head_node",
"[",
"'name'",
"]",
"]",
"# For skipped layers, make sure nodes are modified",
"for",
"node",
"in",
"nodes",
":",
"op",
"=",
"node",
"[",
"'op'",
"]",
"inputs",
"=",
"node",
"[",
"'inputs'",
"]",
"outputs",
"=",
"node",
"[",
"'outputs'",
"]",
"if",
"op",
"in",
"_MXNET_SKIP_LAYERS",
":",
"nodes",
"[",
"inputs",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
"[",
"'outputs'",
"]",
"[",
"0",
"]",
"=",
"outputs",
"[",
"0",
"]",
"nodes",
"[",
"outputs",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
"[",
"'inputs'",
"]",
"[",
"0",
"]",
"=",
"inputs",
"[",
"0",
"]",
"# Find the input and output names for this node",
"for",
"idx",
",",
"node",
"in",
"enumerate",
"(",
"nodes",
")",
":",
"op",
"=",
"node",
"[",
"'op'",
"]",
"if",
"op",
"==",
"'null'",
"or",
"op",
"in",
"_MXNET_SKIP_LAYERS",
":",
"continue",
"name",
"=",
"node",
"[",
"'name'",
"]",
"if",
"verbose",
":",
"print",
"(",
"\"%d : %s, %s\"",
"%",
"(",
"idx",
",",
"name",
",",
"op",
")",
")",
"converter_func",
"=",
"_get_layer_converter_fn",
"(",
"op",
")",
"converter_func",
"(",
"net",
",",
"node",
",",
"model",
",",
"builder",
")",
"# Only finalize builder if it was created internally. Otherwise, leave it",
"# up to the user.",
"if",
"finalize",
":",
"# Set the right inputs and outputs",
"_set_input_output_layers",
"(",
"builder",
",",
"input_names",
",",
"output_names",
")",
"builder",
".",
"set_input",
"(",
"input_names",
",",
"input_dims",
")",
"builder",
".",
"set_output",
"(",
"output_names",
",",
"output_dims",
")",
"if",
"preprocessor_args",
"is",
"not",
"None",
":",
"builder",
".",
"set_pre_processing_parameters",
"(",
"*",
"*",
"preprocessor_args",
")",
"if",
"class_labels",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"class_labels",
")",
"is",
"str",
":",
"labels",
"=",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"open",
"(",
"class_labels",
")",
".",
"readlines",
"(",
")",
"]",
"elif",
"type",
"(",
"class_labels",
")",
"is",
"list",
":",
"labels",
"=",
"class_labels",
"else",
":",
"raise",
"TypeError",
"(",
"\"synset variable of unknown type. Type found: %s. Expected either string or list of strings.\"",
"%",
"type",
"(",
"class_labels",
")",
")",
"builder",
".",
"set_class_labels",
"(",
"class_labels",
"=",
"labels",
")",
"# Return the model",
"return",
"_coremltools",
".",
"models",
".",
"MLModel",
"(",
"builder",
".",
"spec",
")"
] | Convert an MXNet model to the protobuf spec.
Parameters
----------
model: MXNet model
A trained MXNet neural network model.
input_shape: list of tuples
A list of (name, shape) tuples, defining the input names and their
shapes. The list also serves to define the desired order of the inputs.
class_labels: A string or list of strings.
As a string it represents the name of the file which contains the classification labels (one per line).
As a list of strings it represents a list of categories that map the index of the output of a neural network to labels in a classifier.
mode: str ('classifier', 'regressor' or None)
Mode of the converted coreml model.
When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed.
When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed.
builder: `NeuralNetworkBuilder`
If `None`, a builder will be created internally. This also means the
builder will not be finalized and returned as an `MLModel`.
Post-processing arguments will be ignored and class labels will not be
integrated. This option is meant for advanced users.
verbose: bool
Print exported layers.
**kwargs :
Provide keyword arguments for:
- input shapes. Supplied as a dictionary object with keyword "input_shape".
- pre-processing arguments: Supplied as a dictionary object with keyword "preprocessor_args". The parameters in the dictionary
tell the converted coreml model how to pre-process any input before an inference is run on it.
For the list of pre-processing arguments see
http://pythonhosted.org/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters
Returns
-------
model: A coreml model. | [
"Convert",
"an",
"MXNet",
"model",
"to",
"the",
"protobuf",
"spec",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mxnet/_mxnet_to_coreml/_mxnet_converter.py#L127-L272 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/libsvm/_libsvm_util.py | load_model | def load_model(model_path):
"""Load a libsvm model from a path on disk.
This currently supports:
* C-SVC
* NU-SVC
* Epsilon-SVR
* NU-SVR
Parameters
----------
model_path: str
Path on disk where the libsvm model representation is.
Returns
-------
model: libsvm_model
A model of the libsvm format.
"""
if not(HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
from svmutil import svm_load_model # From libsvm
import os
if (not os.path.exists(model_path)):
raise IOError("Expected a valid file path. %s does not exist" % model_path)
return svm_load_model(model_path) | python | def load_model(model_path):
"""Load a libsvm model from a path on disk.
This currently supports:
* C-SVC
* NU-SVC
* Epsilon-SVR
* NU-SVR
Parameters
----------
model_path: str
Path on disk where the libsvm model representation is.
Returns
-------
model: libsvm_model
A model of the libsvm format.
"""
if not(HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
from svmutil import svm_load_model # From libsvm
import os
if (not os.path.exists(model_path)):
raise IOError("Expected a valid file path. %s does not exist" % model_path)
return svm_load_model(model_path) | [
"def",
"load_model",
"(",
"model_path",
")",
":",
"if",
"not",
"(",
"HAS_LIBSVM",
")",
":",
"raise",
"RuntimeError",
"(",
"'libsvm not found. libsvm conversion API is disabled.'",
")",
"from",
"svmutil",
"import",
"svm_load_model",
"# From libsvm",
"import",
"os",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"model_path",
")",
")",
":",
"raise",
"IOError",
"(",
"\"Expected a valid file path. %s does not exist\"",
"%",
"model_path",
")",
"return",
"svm_load_model",
"(",
"model_path",
")"
] | Load a libsvm model from a path on disk.
This currently supports:
* C-SVC
* NU-SVC
* Epsilon-SVR
* NU-SVR
Parameters
----------
model_path: str
Path on disk where the libsvm model representation is.
Returns
-------
model: libsvm_model
A model of the libsvm format. | [
"Load",
"a",
"libsvm",
"model",
"from",
"a",
"path",
"on",
"disk",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/libsvm/_libsvm_util.py#L8-L34 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.