repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
apache/incubator-mxnet
|
tools/caffe_converter/caffe_proto_utils.py
|
read_caffe_mean
|
def read_caffe_mean(caffe_mean_file):
"""
Reads caffe formatted mean file
:param caffe_mean_file: path to caffe mean file, presumably with 'binaryproto' suffix
:return: mean image, converted from BGR to RGB format
"""
import caffe_parser
import numpy as np
mean_blob = caffe_parser.caffe_pb2.BlobProto()
with open(caffe_mean_file, 'rb') as f:
mean_blob.ParseFromString(f.read())
img_mean_np = np.array(mean_blob.data)
img_mean_np = img_mean_np.reshape(mean_blob.channels, mean_blob.height, mean_blob.width)
# swap channels from Caffe BGR to RGB
img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :]
return img_mean_np
|
python
|
def read_caffe_mean(caffe_mean_file):
"""
Reads caffe formatted mean file
:param caffe_mean_file: path to caffe mean file, presumably with 'binaryproto' suffix
:return: mean image, converted from BGR to RGB format
"""
import caffe_parser
import numpy as np
mean_blob = caffe_parser.caffe_pb2.BlobProto()
with open(caffe_mean_file, 'rb') as f:
mean_blob.ParseFromString(f.read())
img_mean_np = np.array(mean_blob.data)
img_mean_np = img_mean_np.reshape(mean_blob.channels, mean_blob.height, mean_blob.width)
# swap channels from Caffe BGR to RGB
img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :]
return img_mean_np
|
[
"def",
"read_caffe_mean",
"(",
"caffe_mean_file",
")",
":",
"import",
"caffe_parser",
"import",
"numpy",
"as",
"np",
"mean_blob",
"=",
"caffe_parser",
".",
"caffe_pb2",
".",
"BlobProto",
"(",
")",
"with",
"open",
"(",
"caffe_mean_file",
",",
"'rb'",
")",
"as",
"f",
":",
"mean_blob",
".",
"ParseFromString",
"(",
"f",
".",
"read",
"(",
")",
")",
"img_mean_np",
"=",
"np",
".",
"array",
"(",
"mean_blob",
".",
"data",
")",
"img_mean_np",
"=",
"img_mean_np",
".",
"reshape",
"(",
"mean_blob",
".",
"channels",
",",
"mean_blob",
".",
"height",
",",
"mean_blob",
".",
"width",
")",
"# swap channels from Caffe BGR to RGB",
"img_mean_np",
"[",
"[",
"0",
",",
"2",
"]",
",",
":",
",",
":",
"]",
"=",
"img_mean_np",
"[",
"[",
"2",
",",
"0",
"]",
",",
":",
",",
":",
"]",
"return",
"img_mean_np"
] |
Reads caffe formatted mean file
:param caffe_mean_file: path to caffe mean file, presumably with 'binaryproto' suffix
:return: mean image, converted from BGR to RGB format
|
[
"Reads",
"caffe",
"formatted",
"mean",
"file",
":",
"param",
"caffe_mean_file",
":",
"path",
"to",
"caffe",
"mean",
"file",
"presumably",
"with",
"binaryproto",
"suffix",
":",
"return",
":",
"mean",
"image",
"converted",
"from",
"BGR",
"to",
"RGB",
"format"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_converter/caffe_proto_utils.py#L185-L204
|
train
|
apache/incubator-mxnet
|
example/gluon/embedding_learning/model.py
|
get_distance
|
def get_distance(F, x):
"""Helper function for margin-based loss. Return a distance matrix given a matrix."""
n = x.shape[0]
square = F.sum(x ** 2.0, axis=1, keepdims=True)
distance_square = square + square.transpose() - (2.0 * F.dot(x, x.transpose()))
# Adding identity to make sqrt work.
return F.sqrt(distance_square + F.array(np.identity(n)))
|
python
|
def get_distance(F, x):
"""Helper function for margin-based loss. Return a distance matrix given a matrix."""
n = x.shape[0]
square = F.sum(x ** 2.0, axis=1, keepdims=True)
distance_square = square + square.transpose() - (2.0 * F.dot(x, x.transpose()))
# Adding identity to make sqrt work.
return F.sqrt(distance_square + F.array(np.identity(n)))
|
[
"def",
"get_distance",
"(",
"F",
",",
"x",
")",
":",
"n",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
"square",
"=",
"F",
".",
"sum",
"(",
"x",
"**",
"2.0",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"distance_square",
"=",
"square",
"+",
"square",
".",
"transpose",
"(",
")",
"-",
"(",
"2.0",
"*",
"F",
".",
"dot",
"(",
"x",
",",
"x",
".",
"transpose",
"(",
")",
")",
")",
"# Adding identity to make sqrt work.",
"return",
"F",
".",
"sqrt",
"(",
"distance_square",
"+",
"F",
".",
"array",
"(",
"np",
".",
"identity",
"(",
"n",
")",
")",
")"
] |
Helper function for margin-based loss. Return a distance matrix given a matrix.
|
[
"Helper",
"function",
"for",
"margin",
"-",
"based",
"loss",
".",
"Return",
"a",
"distance",
"matrix",
"given",
"a",
"matrix",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/embedding_learning/model.py#L51-L59
|
train
|
apache/incubator-mxnet
|
example/rnn/large_word_lm/model.py
|
cross_entropy_loss
|
def cross_entropy_loss(inputs, labels, rescale_loss=1):
""" cross entropy loss with a mask """
criterion = mx.gluon.loss.SoftmaxCrossEntropyLoss(weight=rescale_loss)
loss = criterion(inputs, labels)
mask = S.var('mask')
loss = loss * S.reshape(mask, shape=(-1,))
return S.make_loss(loss.mean())
|
python
|
def cross_entropy_loss(inputs, labels, rescale_loss=1):
""" cross entropy loss with a mask """
criterion = mx.gluon.loss.SoftmaxCrossEntropyLoss(weight=rescale_loss)
loss = criterion(inputs, labels)
mask = S.var('mask')
loss = loss * S.reshape(mask, shape=(-1,))
return S.make_loss(loss.mean())
|
[
"def",
"cross_entropy_loss",
"(",
"inputs",
",",
"labels",
",",
"rescale_loss",
"=",
"1",
")",
":",
"criterion",
"=",
"mx",
".",
"gluon",
".",
"loss",
".",
"SoftmaxCrossEntropyLoss",
"(",
"weight",
"=",
"rescale_loss",
")",
"loss",
"=",
"criterion",
"(",
"inputs",
",",
"labels",
")",
"mask",
"=",
"S",
".",
"var",
"(",
"'mask'",
")",
"loss",
"=",
"loss",
"*",
"S",
".",
"reshape",
"(",
"mask",
",",
"shape",
"=",
"(",
"-",
"1",
",",
")",
")",
"return",
"S",
".",
"make_loss",
"(",
"loss",
".",
"mean",
"(",
")",
")"
] |
cross entropy loss with a mask
|
[
"cross",
"entropy",
"loss",
"with",
"a",
"mask"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rnn/large_word_lm/model.py#L39-L45
|
train
|
apache/incubator-mxnet
|
example/rnn/large_word_lm/model.py
|
rnn
|
def rnn(bptt, vocab_size, num_embed, nhid, num_layers, dropout, num_proj, batch_size):
""" word embedding + LSTM Projected """
state_names = []
data = S.var('data')
weight = S.var("encoder_weight", stype='row_sparse')
embed = S.sparse.Embedding(data=data, weight=weight, input_dim=vocab_size,
output_dim=num_embed, name='embed', sparse_grad=True)
states = []
outputs = S.Dropout(embed, p=dropout)
for i in range(num_layers):
prefix = 'lstmp%d_' % i
init_h = S.var(prefix + 'init_h', shape=(batch_size, num_proj), init=mx.init.Zero())
init_c = S.var(prefix + 'init_c', shape=(batch_size, nhid), init=mx.init.Zero())
state_names += [prefix + 'init_h', prefix + 'init_c']
lstmp = mx.gluon.contrib.rnn.LSTMPCell(nhid, num_proj, prefix=prefix)
outputs, next_states = lstmp.unroll(bptt, outputs, begin_state=[init_h, init_c], \
layout='NTC', merge_outputs=True)
outputs = S.Dropout(outputs, p=dropout)
states += [S.stop_gradient(s) for s in next_states]
outputs = S.reshape(outputs, shape=(-1, num_proj))
trainable_lstm_args = []
for arg in outputs.list_arguments():
if 'lstmp' in arg and 'init' not in arg:
trainable_lstm_args.append(arg)
return outputs, states, trainable_lstm_args, state_names
|
python
|
def rnn(bptt, vocab_size, num_embed, nhid, num_layers, dropout, num_proj, batch_size):
""" word embedding + LSTM Projected """
state_names = []
data = S.var('data')
weight = S.var("encoder_weight", stype='row_sparse')
embed = S.sparse.Embedding(data=data, weight=weight, input_dim=vocab_size,
output_dim=num_embed, name='embed', sparse_grad=True)
states = []
outputs = S.Dropout(embed, p=dropout)
for i in range(num_layers):
prefix = 'lstmp%d_' % i
init_h = S.var(prefix + 'init_h', shape=(batch_size, num_proj), init=mx.init.Zero())
init_c = S.var(prefix + 'init_c', shape=(batch_size, nhid), init=mx.init.Zero())
state_names += [prefix + 'init_h', prefix + 'init_c']
lstmp = mx.gluon.contrib.rnn.LSTMPCell(nhid, num_proj, prefix=prefix)
outputs, next_states = lstmp.unroll(bptt, outputs, begin_state=[init_h, init_c], \
layout='NTC', merge_outputs=True)
outputs = S.Dropout(outputs, p=dropout)
states += [S.stop_gradient(s) for s in next_states]
outputs = S.reshape(outputs, shape=(-1, num_proj))
trainable_lstm_args = []
for arg in outputs.list_arguments():
if 'lstmp' in arg and 'init' not in arg:
trainable_lstm_args.append(arg)
return outputs, states, trainable_lstm_args, state_names
|
[
"def",
"rnn",
"(",
"bptt",
",",
"vocab_size",
",",
"num_embed",
",",
"nhid",
",",
"num_layers",
",",
"dropout",
",",
"num_proj",
",",
"batch_size",
")",
":",
"state_names",
"=",
"[",
"]",
"data",
"=",
"S",
".",
"var",
"(",
"'data'",
")",
"weight",
"=",
"S",
".",
"var",
"(",
"\"encoder_weight\"",
",",
"stype",
"=",
"'row_sparse'",
")",
"embed",
"=",
"S",
".",
"sparse",
".",
"Embedding",
"(",
"data",
"=",
"data",
",",
"weight",
"=",
"weight",
",",
"input_dim",
"=",
"vocab_size",
",",
"output_dim",
"=",
"num_embed",
",",
"name",
"=",
"'embed'",
",",
"sparse_grad",
"=",
"True",
")",
"states",
"=",
"[",
"]",
"outputs",
"=",
"S",
".",
"Dropout",
"(",
"embed",
",",
"p",
"=",
"dropout",
")",
"for",
"i",
"in",
"range",
"(",
"num_layers",
")",
":",
"prefix",
"=",
"'lstmp%d_'",
"%",
"i",
"init_h",
"=",
"S",
".",
"var",
"(",
"prefix",
"+",
"'init_h'",
",",
"shape",
"=",
"(",
"batch_size",
",",
"num_proj",
")",
",",
"init",
"=",
"mx",
".",
"init",
".",
"Zero",
"(",
")",
")",
"init_c",
"=",
"S",
".",
"var",
"(",
"prefix",
"+",
"'init_c'",
",",
"shape",
"=",
"(",
"batch_size",
",",
"nhid",
")",
",",
"init",
"=",
"mx",
".",
"init",
".",
"Zero",
"(",
")",
")",
"state_names",
"+=",
"[",
"prefix",
"+",
"'init_h'",
",",
"prefix",
"+",
"'init_c'",
"]",
"lstmp",
"=",
"mx",
".",
"gluon",
".",
"contrib",
".",
"rnn",
".",
"LSTMPCell",
"(",
"nhid",
",",
"num_proj",
",",
"prefix",
"=",
"prefix",
")",
"outputs",
",",
"next_states",
"=",
"lstmp",
".",
"unroll",
"(",
"bptt",
",",
"outputs",
",",
"begin_state",
"=",
"[",
"init_h",
",",
"init_c",
"]",
",",
"layout",
"=",
"'NTC'",
",",
"merge_outputs",
"=",
"True",
")",
"outputs",
"=",
"S",
".",
"Dropout",
"(",
"outputs",
",",
"p",
"=",
"dropout",
")",
"states",
"+=",
"[",
"S",
".",
"stop_gradient",
"(",
"s",
")",
"for",
"s",
"in",
"next_states",
"]",
"outputs",
"=",
"S",
".",
"reshape",
"(",
"outputs",
",",
"shape",
"=",
"(",
"-",
"1",
",",
"num_proj",
")",
")",
"trainable_lstm_args",
"=",
"[",
"]",
"for",
"arg",
"in",
"outputs",
".",
"list_arguments",
"(",
")",
":",
"if",
"'lstmp'",
"in",
"arg",
"and",
"'init'",
"not",
"in",
"arg",
":",
"trainable_lstm_args",
".",
"append",
"(",
"arg",
")",
"return",
"outputs",
",",
"states",
",",
"trainable_lstm_args",
",",
"state_names"
] |
word embedding + LSTM Projected
|
[
"word",
"embedding",
"+",
"LSTM",
"Projected"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rnn/large_word_lm/model.py#L47-L72
|
train
|
apache/incubator-mxnet
|
example/rnn/large_word_lm/model.py
|
sampled_softmax
|
def sampled_softmax(num_classes, num_samples, in_dim, inputs, weight, bias,
sampled_values, remove_accidental_hits=True):
""" Sampled softmax via importance sampling.
This under-estimates the full softmax and is only used for training.
"""
# inputs = (n, in_dim)
sample, prob_sample, prob_target = sampled_values
# (num_samples, )
sample = S.var('sample', shape=(num_samples,), dtype='float32')
# (n, )
label = S.var('label')
label = S.reshape(label, shape=(-1,), name="label_reshape")
# (num_samples+n, )
sample_label = S.concat(sample, label, dim=0)
# lookup weights and biases
# (num_samples+n, dim)
sample_target_w = S.sparse.Embedding(data=sample_label, weight=weight,
input_dim=num_classes, output_dim=in_dim,
sparse_grad=True)
# (num_samples+n, 1)
sample_target_b = S.sparse.Embedding(data=sample_label, weight=bias,
input_dim=num_classes, output_dim=1,
sparse_grad=True)
# (num_samples, dim)
sample_w = S.slice(sample_target_w, begin=(0, 0), end=(num_samples, None))
target_w = S.slice(sample_target_w, begin=(num_samples, 0), end=(None, None))
sample_b = S.slice(sample_target_b, begin=(0, 0), end=(num_samples, None))
target_b = S.slice(sample_target_b, begin=(num_samples, 0), end=(None, None))
# target
# (n, 1)
true_pred = S.sum(target_w * inputs, axis=1, keepdims=True) + target_b
# samples
# (n, num_samples)
sample_b = S.reshape(sample_b, (-1,))
sample_pred = S.FullyConnected(inputs, weight=sample_w, bias=sample_b,
num_hidden=num_samples)
# remove accidental hits
if remove_accidental_hits:
label_v = S.reshape(label, (-1, 1))
sample_v = S.reshape(sample, (1, -1))
neg = S.broadcast_equal(label_v, sample_v) * -1e37
sample_pred = sample_pred + neg
prob_sample = S.reshape(prob_sample, shape=(1, num_samples))
p_target = true_pred - S.log(prob_target)
p_sample = S.broadcast_sub(sample_pred, S.log(prob_sample))
# return logits and new_labels
# (n, 1+num_samples)
logits = S.concat(p_target, p_sample, dim=1)
new_targets = S.zeros_like(label)
return logits, new_targets
|
python
|
def sampled_softmax(num_classes, num_samples, in_dim, inputs, weight, bias,
sampled_values, remove_accidental_hits=True):
""" Sampled softmax via importance sampling.
This under-estimates the full softmax and is only used for training.
"""
# inputs = (n, in_dim)
sample, prob_sample, prob_target = sampled_values
# (num_samples, )
sample = S.var('sample', shape=(num_samples,), dtype='float32')
# (n, )
label = S.var('label')
label = S.reshape(label, shape=(-1,), name="label_reshape")
# (num_samples+n, )
sample_label = S.concat(sample, label, dim=0)
# lookup weights and biases
# (num_samples+n, dim)
sample_target_w = S.sparse.Embedding(data=sample_label, weight=weight,
input_dim=num_classes, output_dim=in_dim,
sparse_grad=True)
# (num_samples+n, 1)
sample_target_b = S.sparse.Embedding(data=sample_label, weight=bias,
input_dim=num_classes, output_dim=1,
sparse_grad=True)
# (num_samples, dim)
sample_w = S.slice(sample_target_w, begin=(0, 0), end=(num_samples, None))
target_w = S.slice(sample_target_w, begin=(num_samples, 0), end=(None, None))
sample_b = S.slice(sample_target_b, begin=(0, 0), end=(num_samples, None))
target_b = S.slice(sample_target_b, begin=(num_samples, 0), end=(None, None))
# target
# (n, 1)
true_pred = S.sum(target_w * inputs, axis=1, keepdims=True) + target_b
# samples
# (n, num_samples)
sample_b = S.reshape(sample_b, (-1,))
sample_pred = S.FullyConnected(inputs, weight=sample_w, bias=sample_b,
num_hidden=num_samples)
# remove accidental hits
if remove_accidental_hits:
label_v = S.reshape(label, (-1, 1))
sample_v = S.reshape(sample, (1, -1))
neg = S.broadcast_equal(label_v, sample_v) * -1e37
sample_pred = sample_pred + neg
prob_sample = S.reshape(prob_sample, shape=(1, num_samples))
p_target = true_pred - S.log(prob_target)
p_sample = S.broadcast_sub(sample_pred, S.log(prob_sample))
# return logits and new_labels
# (n, 1+num_samples)
logits = S.concat(p_target, p_sample, dim=1)
new_targets = S.zeros_like(label)
return logits, new_targets
|
[
"def",
"sampled_softmax",
"(",
"num_classes",
",",
"num_samples",
",",
"in_dim",
",",
"inputs",
",",
"weight",
",",
"bias",
",",
"sampled_values",
",",
"remove_accidental_hits",
"=",
"True",
")",
":",
"# inputs = (n, in_dim)",
"sample",
",",
"prob_sample",
",",
"prob_target",
"=",
"sampled_values",
"# (num_samples, )",
"sample",
"=",
"S",
".",
"var",
"(",
"'sample'",
",",
"shape",
"=",
"(",
"num_samples",
",",
")",
",",
"dtype",
"=",
"'float32'",
")",
"# (n, )",
"label",
"=",
"S",
".",
"var",
"(",
"'label'",
")",
"label",
"=",
"S",
".",
"reshape",
"(",
"label",
",",
"shape",
"=",
"(",
"-",
"1",
",",
")",
",",
"name",
"=",
"\"label_reshape\"",
")",
"# (num_samples+n, )",
"sample_label",
"=",
"S",
".",
"concat",
"(",
"sample",
",",
"label",
",",
"dim",
"=",
"0",
")",
"# lookup weights and biases",
"# (num_samples+n, dim)",
"sample_target_w",
"=",
"S",
".",
"sparse",
".",
"Embedding",
"(",
"data",
"=",
"sample_label",
",",
"weight",
"=",
"weight",
",",
"input_dim",
"=",
"num_classes",
",",
"output_dim",
"=",
"in_dim",
",",
"sparse_grad",
"=",
"True",
")",
"# (num_samples+n, 1)",
"sample_target_b",
"=",
"S",
".",
"sparse",
".",
"Embedding",
"(",
"data",
"=",
"sample_label",
",",
"weight",
"=",
"bias",
",",
"input_dim",
"=",
"num_classes",
",",
"output_dim",
"=",
"1",
",",
"sparse_grad",
"=",
"True",
")",
"# (num_samples, dim)",
"sample_w",
"=",
"S",
".",
"slice",
"(",
"sample_target_w",
",",
"begin",
"=",
"(",
"0",
",",
"0",
")",
",",
"end",
"=",
"(",
"num_samples",
",",
"None",
")",
")",
"target_w",
"=",
"S",
".",
"slice",
"(",
"sample_target_w",
",",
"begin",
"=",
"(",
"num_samples",
",",
"0",
")",
",",
"end",
"=",
"(",
"None",
",",
"None",
")",
")",
"sample_b",
"=",
"S",
".",
"slice",
"(",
"sample_target_b",
",",
"begin",
"=",
"(",
"0",
",",
"0",
")",
",",
"end",
"=",
"(",
"num_samples",
",",
"None",
")",
")",
"target_b",
"=",
"S",
".",
"slice",
"(",
"sample_target_b",
",",
"begin",
"=",
"(",
"num_samples",
",",
"0",
")",
",",
"end",
"=",
"(",
"None",
",",
"None",
")",
")",
"# target",
"# (n, 1)",
"true_pred",
"=",
"S",
".",
"sum",
"(",
"target_w",
"*",
"inputs",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"+",
"target_b",
"# samples",
"# (n, num_samples)",
"sample_b",
"=",
"S",
".",
"reshape",
"(",
"sample_b",
",",
"(",
"-",
"1",
",",
")",
")",
"sample_pred",
"=",
"S",
".",
"FullyConnected",
"(",
"inputs",
",",
"weight",
"=",
"sample_w",
",",
"bias",
"=",
"sample_b",
",",
"num_hidden",
"=",
"num_samples",
")",
"# remove accidental hits",
"if",
"remove_accidental_hits",
":",
"label_v",
"=",
"S",
".",
"reshape",
"(",
"label",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
"sample_v",
"=",
"S",
".",
"reshape",
"(",
"sample",
",",
"(",
"1",
",",
"-",
"1",
")",
")",
"neg",
"=",
"S",
".",
"broadcast_equal",
"(",
"label_v",
",",
"sample_v",
")",
"*",
"-",
"1e37",
"sample_pred",
"=",
"sample_pred",
"+",
"neg",
"prob_sample",
"=",
"S",
".",
"reshape",
"(",
"prob_sample",
",",
"shape",
"=",
"(",
"1",
",",
"num_samples",
")",
")",
"p_target",
"=",
"true_pred",
"-",
"S",
".",
"log",
"(",
"prob_target",
")",
"p_sample",
"=",
"S",
".",
"broadcast_sub",
"(",
"sample_pred",
",",
"S",
".",
"log",
"(",
"prob_sample",
")",
")",
"# return logits and new_labels",
"# (n, 1+num_samples)",
"logits",
"=",
"S",
".",
"concat",
"(",
"p_target",
",",
"p_sample",
",",
"dim",
"=",
"1",
")",
"new_targets",
"=",
"S",
".",
"zeros_like",
"(",
"label",
")",
"return",
"logits",
",",
"new_targets"
] |
Sampled softmax via importance sampling.
This under-estimates the full softmax and is only used for training.
|
[
"Sampled",
"softmax",
"via",
"importance",
"sampling",
".",
"This",
"under",
"-",
"estimates",
"the",
"full",
"softmax",
"and",
"is",
"only",
"used",
"for",
"training",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rnn/large_word_lm/model.py#L74-L128
|
train
|
apache/incubator-mxnet
|
example/rnn/large_word_lm/model.py
|
generate_samples
|
def generate_samples(label, num_splits, sampler):
""" Split labels into `num_splits` and
generate candidates based on log-uniform distribution.
"""
def listify(x):
return x if isinstance(x, list) else [x]
label_splits = listify(label.split(num_splits, axis=0))
prob_samples = []
prob_targets = []
samples = []
for label_split in label_splits:
label_split_2d = label_split.reshape((-1,1))
sampled_value = sampler.draw(label_split_2d)
sampled_classes, exp_cnt_true, exp_cnt_sampled = sampled_value
samples.append(sampled_classes.astype(np.float32))
prob_targets.append(exp_cnt_true.astype(np.float32).reshape((-1,1)))
prob_samples.append(exp_cnt_sampled.astype(np.float32))
return samples, prob_samples, prob_targets
|
python
|
def generate_samples(label, num_splits, sampler):
""" Split labels into `num_splits` and
generate candidates based on log-uniform distribution.
"""
def listify(x):
return x if isinstance(x, list) else [x]
label_splits = listify(label.split(num_splits, axis=0))
prob_samples = []
prob_targets = []
samples = []
for label_split in label_splits:
label_split_2d = label_split.reshape((-1,1))
sampled_value = sampler.draw(label_split_2d)
sampled_classes, exp_cnt_true, exp_cnt_sampled = sampled_value
samples.append(sampled_classes.astype(np.float32))
prob_targets.append(exp_cnt_true.astype(np.float32).reshape((-1,1)))
prob_samples.append(exp_cnt_sampled.astype(np.float32))
return samples, prob_samples, prob_targets
|
[
"def",
"generate_samples",
"(",
"label",
",",
"num_splits",
",",
"sampler",
")",
":",
"def",
"listify",
"(",
"x",
")",
":",
"return",
"x",
"if",
"isinstance",
"(",
"x",
",",
"list",
")",
"else",
"[",
"x",
"]",
"label_splits",
"=",
"listify",
"(",
"label",
".",
"split",
"(",
"num_splits",
",",
"axis",
"=",
"0",
")",
")",
"prob_samples",
"=",
"[",
"]",
"prob_targets",
"=",
"[",
"]",
"samples",
"=",
"[",
"]",
"for",
"label_split",
"in",
"label_splits",
":",
"label_split_2d",
"=",
"label_split",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"sampled_value",
"=",
"sampler",
".",
"draw",
"(",
"label_split_2d",
")",
"sampled_classes",
",",
"exp_cnt_true",
",",
"exp_cnt_sampled",
"=",
"sampled_value",
"samples",
".",
"append",
"(",
"sampled_classes",
".",
"astype",
"(",
"np",
".",
"float32",
")",
")",
"prob_targets",
".",
"append",
"(",
"exp_cnt_true",
".",
"astype",
"(",
"np",
".",
"float32",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
"prob_samples",
".",
"append",
"(",
"exp_cnt_sampled",
".",
"astype",
"(",
"np",
".",
"float32",
")",
")",
"return",
"samples",
",",
"prob_samples",
",",
"prob_targets"
] |
Split labels into `num_splits` and
generate candidates based on log-uniform distribution.
|
[
"Split",
"labels",
"into",
"num_splits",
"and",
"generate",
"candidates",
"based",
"on",
"log",
"-",
"uniform",
"distribution",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rnn/large_word_lm/model.py#L130-L147
|
train
|
apache/incubator-mxnet
|
example/multivariate_time_series/src/lstnet.py
|
build_iters
|
def build_iters(data_dir, max_records, q, horizon, splits, batch_size):
"""
Load & generate training examples from multivariate time series data
:return: data iters & variables required to define network architecture
"""
# Read in data as numpy array
df = pd.read_csv(os.path.join(data_dir, "electricity.txt"), sep=",", header=None)
feature_df = df.iloc[:, :].astype(float)
x = feature_df.as_matrix()
x = x[:max_records] if max_records else x
# Construct training examples based on horizon and window
x_ts = np.zeros((x.shape[0] - q, q, x.shape[1]))
y_ts = np.zeros((x.shape[0] - q, x.shape[1]))
for n in range(x.shape[0]):
if n + 1 < q:
continue
elif n + 1 + horizon > x.shape[0]:
continue
else:
y_n = x[n + horizon, :]
x_n = x[n + 1 - q:n + 1, :]
x_ts[n-q] = x_n
y_ts[n-q] = y_n
# Split into training and testing data
training_examples = int(x_ts.shape[0] * splits[0])
valid_examples = int(x_ts.shape[0] * splits[1])
x_train, y_train = x_ts[:training_examples], \
y_ts[:training_examples]
x_valid, y_valid = x_ts[training_examples:training_examples + valid_examples], \
y_ts[training_examples:training_examples + valid_examples]
x_test, y_test = x_ts[training_examples + valid_examples:], \
y_ts[training_examples + valid_examples:]
#build iterators to feed batches to network
train_iter = mx.io.NDArrayIter(data=x_train,
label=y_train,
batch_size=batch_size)
val_iter = mx.io.NDArrayIter(data=x_valid,
label=y_valid,
batch_size=batch_size)
test_iter = mx.io.NDArrayIter(data=x_test,
label=y_test,
batch_size=batch_size)
return train_iter, val_iter, test_iter
|
python
|
def build_iters(data_dir, max_records, q, horizon, splits, batch_size):
"""
Load & generate training examples from multivariate time series data
:return: data iters & variables required to define network architecture
"""
# Read in data as numpy array
df = pd.read_csv(os.path.join(data_dir, "electricity.txt"), sep=",", header=None)
feature_df = df.iloc[:, :].astype(float)
x = feature_df.as_matrix()
x = x[:max_records] if max_records else x
# Construct training examples based on horizon and window
x_ts = np.zeros((x.shape[0] - q, q, x.shape[1]))
y_ts = np.zeros((x.shape[0] - q, x.shape[1]))
for n in range(x.shape[0]):
if n + 1 < q:
continue
elif n + 1 + horizon > x.shape[0]:
continue
else:
y_n = x[n + horizon, :]
x_n = x[n + 1 - q:n + 1, :]
x_ts[n-q] = x_n
y_ts[n-q] = y_n
# Split into training and testing data
training_examples = int(x_ts.shape[0] * splits[0])
valid_examples = int(x_ts.shape[0] * splits[1])
x_train, y_train = x_ts[:training_examples], \
y_ts[:training_examples]
x_valid, y_valid = x_ts[training_examples:training_examples + valid_examples], \
y_ts[training_examples:training_examples + valid_examples]
x_test, y_test = x_ts[training_examples + valid_examples:], \
y_ts[training_examples + valid_examples:]
#build iterators to feed batches to network
train_iter = mx.io.NDArrayIter(data=x_train,
label=y_train,
batch_size=batch_size)
val_iter = mx.io.NDArrayIter(data=x_valid,
label=y_valid,
batch_size=batch_size)
test_iter = mx.io.NDArrayIter(data=x_test,
label=y_test,
batch_size=batch_size)
return train_iter, val_iter, test_iter
|
[
"def",
"build_iters",
"(",
"data_dir",
",",
"max_records",
",",
"q",
",",
"horizon",
",",
"splits",
",",
"batch_size",
")",
":",
"# Read in data as numpy array",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"\"electricity.txt\"",
")",
",",
"sep",
"=",
"\",\"",
",",
"header",
"=",
"None",
")",
"feature_df",
"=",
"df",
".",
"iloc",
"[",
":",
",",
":",
"]",
".",
"astype",
"(",
"float",
")",
"x",
"=",
"feature_df",
".",
"as_matrix",
"(",
")",
"x",
"=",
"x",
"[",
":",
"max_records",
"]",
"if",
"max_records",
"else",
"x",
"# Construct training examples based on horizon and window",
"x_ts",
"=",
"np",
".",
"zeros",
"(",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
"-",
"q",
",",
"q",
",",
"x",
".",
"shape",
"[",
"1",
"]",
")",
")",
"y_ts",
"=",
"np",
".",
"zeros",
"(",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
"-",
"q",
",",
"x",
".",
"shape",
"[",
"1",
"]",
")",
")",
"for",
"n",
"in",
"range",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"n",
"+",
"1",
"<",
"q",
":",
"continue",
"elif",
"n",
"+",
"1",
"+",
"horizon",
">",
"x",
".",
"shape",
"[",
"0",
"]",
":",
"continue",
"else",
":",
"y_n",
"=",
"x",
"[",
"n",
"+",
"horizon",
",",
":",
"]",
"x_n",
"=",
"x",
"[",
"n",
"+",
"1",
"-",
"q",
":",
"n",
"+",
"1",
",",
":",
"]",
"x_ts",
"[",
"n",
"-",
"q",
"]",
"=",
"x_n",
"y_ts",
"[",
"n",
"-",
"q",
"]",
"=",
"y_n",
"# Split into training and testing data",
"training_examples",
"=",
"int",
"(",
"x_ts",
".",
"shape",
"[",
"0",
"]",
"*",
"splits",
"[",
"0",
"]",
")",
"valid_examples",
"=",
"int",
"(",
"x_ts",
".",
"shape",
"[",
"0",
"]",
"*",
"splits",
"[",
"1",
"]",
")",
"x_train",
",",
"y_train",
"=",
"x_ts",
"[",
":",
"training_examples",
"]",
",",
"y_ts",
"[",
":",
"training_examples",
"]",
"x_valid",
",",
"y_valid",
"=",
"x_ts",
"[",
"training_examples",
":",
"training_examples",
"+",
"valid_examples",
"]",
",",
"y_ts",
"[",
"training_examples",
":",
"training_examples",
"+",
"valid_examples",
"]",
"x_test",
",",
"y_test",
"=",
"x_ts",
"[",
"training_examples",
"+",
"valid_examples",
":",
"]",
",",
"y_ts",
"[",
"training_examples",
"+",
"valid_examples",
":",
"]",
"#build iterators to feed batches to network",
"train_iter",
"=",
"mx",
".",
"io",
".",
"NDArrayIter",
"(",
"data",
"=",
"x_train",
",",
"label",
"=",
"y_train",
",",
"batch_size",
"=",
"batch_size",
")",
"val_iter",
"=",
"mx",
".",
"io",
".",
"NDArrayIter",
"(",
"data",
"=",
"x_valid",
",",
"label",
"=",
"y_valid",
",",
"batch_size",
"=",
"batch_size",
")",
"test_iter",
"=",
"mx",
".",
"io",
".",
"NDArrayIter",
"(",
"data",
"=",
"x_test",
",",
"label",
"=",
"y_test",
",",
"batch_size",
"=",
"batch_size",
")",
"return",
"train_iter",
",",
"val_iter",
",",
"test_iter"
] |
Load & generate training examples from multivariate time series data
:return: data iters & variables required to define network architecture
|
[
"Load",
"&",
"generate",
"training",
"examples",
"from",
"multivariate",
"time",
"series",
"data",
":",
"return",
":",
"data",
"iters",
"&",
"variables",
"required",
"to",
"define",
"network",
"architecture"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/multivariate_time_series/src/lstnet.py#L74-L119
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/model_zoo/vision/__init__.py
|
get_model
|
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
models = {'resnet18_v1': resnet18_v1,
'resnet34_v1': resnet34_v1,
'resnet50_v1': resnet50_v1,
'resnet101_v1': resnet101_v1,
'resnet152_v1': resnet152_v1,
'resnet18_v2': resnet18_v2,
'resnet34_v2': resnet34_v2,
'resnet50_v2': resnet50_v2,
'resnet101_v2': resnet101_v2,
'resnet152_v2': resnet152_v2,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'vgg11_bn': vgg11_bn,
'vgg13_bn': vgg13_bn,
'vgg16_bn': vgg16_bn,
'vgg19_bn': vgg19_bn,
'alexnet': alexnet,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'squeezenet1.0': squeezenet1_0,
'squeezenet1.1': squeezenet1_1,
'inceptionv3': inception_v3,
'mobilenet1.0': mobilenet1_0,
'mobilenet0.75': mobilenet0_75,
'mobilenet0.5': mobilenet0_5,
'mobilenet0.25': mobilenet0_25,
'mobilenetv2_1.0': mobilenet_v2_1_0,
'mobilenetv2_0.75': mobilenet_v2_0_75,
'mobilenetv2_0.5': mobilenet_v2_0_5,
'mobilenetv2_0.25': mobilenet_v2_0_25
}
name = name.lower()
if name not in models:
raise ValueError(
'Model %s is not supported. Available options are\n\t%s' % (
name, '\n\t'.join(sorted(models.keys()))))
return models[name](**kwargs)
|
python
|
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
models = {'resnet18_v1': resnet18_v1,
'resnet34_v1': resnet34_v1,
'resnet50_v1': resnet50_v1,
'resnet101_v1': resnet101_v1,
'resnet152_v1': resnet152_v1,
'resnet18_v2': resnet18_v2,
'resnet34_v2': resnet34_v2,
'resnet50_v2': resnet50_v2,
'resnet101_v2': resnet101_v2,
'resnet152_v2': resnet152_v2,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'vgg11_bn': vgg11_bn,
'vgg13_bn': vgg13_bn,
'vgg16_bn': vgg16_bn,
'vgg19_bn': vgg19_bn,
'alexnet': alexnet,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'squeezenet1.0': squeezenet1_0,
'squeezenet1.1': squeezenet1_1,
'inceptionv3': inception_v3,
'mobilenet1.0': mobilenet1_0,
'mobilenet0.75': mobilenet0_75,
'mobilenet0.5': mobilenet0_5,
'mobilenet0.25': mobilenet0_25,
'mobilenetv2_1.0': mobilenet_v2_1_0,
'mobilenetv2_0.75': mobilenet_v2_0_75,
'mobilenetv2_0.5': mobilenet_v2_0_5,
'mobilenetv2_0.25': mobilenet_v2_0_25
}
name = name.lower()
if name not in models:
raise ValueError(
'Model %s is not supported. Available options are\n\t%s' % (
name, '\n\t'.join(sorted(models.keys()))))
return models[name](**kwargs)
|
[
"def",
"get_model",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"models",
"=",
"{",
"'resnet18_v1'",
":",
"resnet18_v1",
",",
"'resnet34_v1'",
":",
"resnet34_v1",
",",
"'resnet50_v1'",
":",
"resnet50_v1",
",",
"'resnet101_v1'",
":",
"resnet101_v1",
",",
"'resnet152_v1'",
":",
"resnet152_v1",
",",
"'resnet18_v2'",
":",
"resnet18_v2",
",",
"'resnet34_v2'",
":",
"resnet34_v2",
",",
"'resnet50_v2'",
":",
"resnet50_v2",
",",
"'resnet101_v2'",
":",
"resnet101_v2",
",",
"'resnet152_v2'",
":",
"resnet152_v2",
",",
"'vgg11'",
":",
"vgg11",
",",
"'vgg13'",
":",
"vgg13",
",",
"'vgg16'",
":",
"vgg16",
",",
"'vgg19'",
":",
"vgg19",
",",
"'vgg11_bn'",
":",
"vgg11_bn",
",",
"'vgg13_bn'",
":",
"vgg13_bn",
",",
"'vgg16_bn'",
":",
"vgg16_bn",
",",
"'vgg19_bn'",
":",
"vgg19_bn",
",",
"'alexnet'",
":",
"alexnet",
",",
"'densenet121'",
":",
"densenet121",
",",
"'densenet161'",
":",
"densenet161",
",",
"'densenet169'",
":",
"densenet169",
",",
"'densenet201'",
":",
"densenet201",
",",
"'squeezenet1.0'",
":",
"squeezenet1_0",
",",
"'squeezenet1.1'",
":",
"squeezenet1_1",
",",
"'inceptionv3'",
":",
"inception_v3",
",",
"'mobilenet1.0'",
":",
"mobilenet1_0",
",",
"'mobilenet0.75'",
":",
"mobilenet0_75",
",",
"'mobilenet0.5'",
":",
"mobilenet0_5",
",",
"'mobilenet0.25'",
":",
"mobilenet0_25",
",",
"'mobilenetv2_1.0'",
":",
"mobilenet_v2_1_0",
",",
"'mobilenetv2_0.75'",
":",
"mobilenet_v2_0_75",
",",
"'mobilenetv2_0.5'",
":",
"mobilenet_v2_0_5",
",",
"'mobilenetv2_0.25'",
":",
"mobilenet_v2_0_25",
"}",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"if",
"name",
"not",
"in",
"models",
":",
"raise",
"ValueError",
"(",
"'Model %s is not supported. Available options are\\n\\t%s'",
"%",
"(",
"name",
",",
"'\\n\\t'",
".",
"join",
"(",
"sorted",
"(",
"models",
".",
"keys",
"(",
")",
")",
")",
")",
")",
"return",
"models",
"[",
"name",
"]",
"(",
"*",
"*",
"kwargs",
")"
] |
Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
|
[
"Returns",
"a",
"pre",
"-",
"defined",
"model",
"by",
"name"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/model_zoo/vision/__init__.py#L91-L152
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
_new_alloc_handle
|
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl
|
python
|
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl
|
[
"def",
"_new_alloc_handle",
"(",
"stype",
",",
"shape",
",",
"ctx",
",",
"delay_alloc",
",",
"dtype",
",",
"aux_types",
",",
"aux_shapes",
"=",
"None",
")",
":",
"hdl",
"=",
"NDArrayHandle",
"(",
")",
"for",
"aux_t",
"in",
"aux_types",
":",
"if",
"np",
".",
"dtype",
"(",
"aux_t",
")",
"!=",
"np",
".",
"dtype",
"(",
"\"int64\"",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"only int64 is supported for aux types\"",
")",
"aux_type_ids",
"=",
"[",
"int",
"(",
"_DTYPE_NP_TO_MX",
"[",
"np",
".",
"dtype",
"(",
"aux_t",
")",
".",
"type",
"]",
")",
"for",
"aux_t",
"in",
"aux_types",
"]",
"aux_shapes",
"=",
"[",
"(",
"0",
",",
")",
"for",
"aux_t",
"in",
"aux_types",
"]",
"if",
"aux_shapes",
"is",
"None",
"else",
"aux_shapes",
"aux_shape_lens",
"=",
"[",
"len",
"(",
"aux_shape",
")",
"for",
"aux_shape",
"in",
"aux_shapes",
"]",
"aux_shapes",
"=",
"py_sum",
"(",
"aux_shapes",
",",
"(",
")",
")",
"num_aux",
"=",
"mx_uint",
"(",
"len",
"(",
"aux_types",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayCreateSparseEx",
"(",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"_STORAGE_TYPE_STR_TO_ID",
"[",
"stype",
"]",
")",
")",
",",
"c_array_buf",
"(",
"mx_uint",
",",
"native_array",
"(",
"'I'",
",",
"shape",
")",
")",
",",
"mx_uint",
"(",
"len",
"(",
"shape",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"ctx",
".",
"device_typeid",
")",
",",
"ctypes",
".",
"c_int",
"(",
"ctx",
".",
"device_id",
")",
",",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"delay_alloc",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"_DTYPE_NP_TO_MX",
"[",
"np",
".",
"dtype",
"(",
"dtype",
")",
".",
"type",
"]",
")",
")",
",",
"num_aux",
",",
"c_array_buf",
"(",
"ctypes",
".",
"c_int",
",",
"native_array",
"(",
"'i'",
",",
"aux_type_ids",
")",
")",
",",
"c_array_buf",
"(",
"mx_uint",
",",
"native_array",
"(",
"'I'",
",",
"aux_shape_lens",
")",
")",
",",
"c_array_buf",
"(",
"mx_uint",
",",
"native_array",
"(",
"'I'",
",",
"aux_shapes",
")",
")",
",",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"return",
"hdl"
] |
Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
|
[
"Return",
"a",
"new",
"handle",
"with",
"specified",
"storage",
"type",
"shape",
"dtype",
"and",
"context",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L72-L104
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
_prepare_src_array
|
def _prepare_src_array(source_array, dtype):
"""Prepare `source_array` so that it can be used to construct NDArray.
`source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \
nor an `np.ndarray`.
"""
if not isinstance(source_array, NDArray) and not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('values must be array like object')
return source_array
|
python
|
def _prepare_src_array(source_array, dtype):
"""Prepare `source_array` so that it can be used to construct NDArray.
`source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \
nor an `np.ndarray`.
"""
if not isinstance(source_array, NDArray) and not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('values must be array like object')
return source_array
|
[
"def",
"_prepare_src_array",
"(",
"source_array",
",",
"dtype",
")",
":",
"if",
"not",
"isinstance",
"(",
"source_array",
",",
"NDArray",
")",
"and",
"not",
"isinstance",
"(",
"source_array",
",",
"np",
".",
"ndarray",
")",
":",
"try",
":",
"source_array",
"=",
"np",
".",
"array",
"(",
"source_array",
",",
"dtype",
"=",
"dtype",
")",
"except",
":",
"raise",
"TypeError",
"(",
"'values must be array like object'",
")",
"return",
"source_array"
] |
Prepare `source_array` so that it can be used to construct NDArray.
`source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \
nor an `np.ndarray`.
|
[
"Prepare",
"source_array",
"so",
"that",
"it",
"can",
"be",
"used",
"to",
"construct",
"NDArray",
".",
"source_array",
"is",
"converted",
"to",
"a",
"np",
".",
"ndarray",
"if",
"it",
"s",
"neither",
"an",
"NDArray",
"\\",
"nor",
"an",
"np",
".",
"ndarray",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L796-L806
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
_prepare_default_dtype
|
def _prepare_default_dtype(src_array, dtype):
"""Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray
or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise."""
if dtype is None:
if isinstance(src_array, (NDArray, np.ndarray)):
dtype = src_array.dtype
elif spsp and isinstance(src_array, spsp.csr.csr_matrix):
dtype = src_array.dtype
else:
dtype = mx_real_t
return dtype
|
python
|
def _prepare_default_dtype(src_array, dtype):
"""Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray
or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise."""
if dtype is None:
if isinstance(src_array, (NDArray, np.ndarray)):
dtype = src_array.dtype
elif spsp and isinstance(src_array, spsp.csr.csr_matrix):
dtype = src_array.dtype
else:
dtype = mx_real_t
return dtype
|
[
"def",
"_prepare_default_dtype",
"(",
"src_array",
",",
"dtype",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"if",
"isinstance",
"(",
"src_array",
",",
"(",
"NDArray",
",",
"np",
".",
"ndarray",
")",
")",
":",
"dtype",
"=",
"src_array",
".",
"dtype",
"elif",
"spsp",
"and",
"isinstance",
"(",
"src_array",
",",
"spsp",
".",
"csr",
".",
"csr_matrix",
")",
":",
"dtype",
"=",
"src_array",
".",
"dtype",
"else",
":",
"dtype",
"=",
"mx_real_t",
"return",
"dtype"
] |
Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray
or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise.
|
[
"Prepare",
"the",
"value",
"of",
"dtype",
"if",
"dtype",
"is",
"None",
".",
"If",
"src_array",
"is",
"an",
"NDArray",
"numpy",
".",
"ndarray",
"or",
"scipy",
".",
"sparse",
".",
"csr",
".",
"csr_matrix",
"return",
"src_array",
".",
"dtype",
".",
"float32",
"is",
"returned",
"otherwise",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L808-L818
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
_check_shape
|
def _check_shape(s1, s2):
"""check s1 == s2 if both are not None"""
if s1 and s2 and s1 != s2:
raise ValueError("Shape mismatch detected. " + str(s1) + " v.s. " + str(s2))
|
python
|
def _check_shape(s1, s2):
"""check s1 == s2 if both are not None"""
if s1 and s2 and s1 != s2:
raise ValueError("Shape mismatch detected. " + str(s1) + " v.s. " + str(s2))
|
[
"def",
"_check_shape",
"(",
"s1",
",",
"s2",
")",
":",
"if",
"s1",
"and",
"s2",
"and",
"s1",
"!=",
"s2",
":",
"raise",
"ValueError",
"(",
"\"Shape mismatch detected. \"",
"+",
"str",
"(",
"s1",
")",
"+",
"\" v.s. \"",
"+",
"str",
"(",
"s2",
")",
")"
] |
check s1 == s2 if both are not None
|
[
"check",
"s1",
"==",
"s2",
"if",
"both",
"are",
"not",
"None"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L820-L823
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
csr_matrix
|
def csr_matrix(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format.
The CSRNDArray can be instantiated in several ways:
- csr_matrix(D):
to construct a CSRNDArray with a dense 2D array ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix(S)
to construct a CSRNDArray with a sparse 2D array ``S``
- **S** (*CSRNDArray or scipy.sparse.csr.csr_matrix*) - A sparse matrix.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- csr_matrix((M, N))
to construct an empty CSRNDArray with shape ``(M, N)``
- **M** (*int*) - Number of rows in the matrix
- **N** (*int*) - Number of columns in the matrix
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- csr_matrix((data, indices, indptr))
to construct a CSRNDArray based on the definition of compressed sparse row format \
using three separate arrays, \
where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` \
and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. \
The column indices for a given row are expected to be **sorted in ascending order.** \
Duplicate column entries for the same row are not allowed.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in row-major order.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the column index for each non-zero element in ``data``.
- **indptr** (*array_like*) - An object exposing the array interface, which \
stores the offset into ``data`` of the first non-zero element number of each \
row of the matrix.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``data.dtype`` if ``data`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix((data, (row, col)))
to construct a CSRNDArray based on the COOrdinate format \
using three seperate arrays, \
where ``row[i]`` is the row index of the element, \
``col[i]`` is the column index of the element \
and ``data[i]`` is the data corresponding to the element. All the missing \
elements in the input are taken to be zeroes.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in COO format.
- **row** (*array_like*) - An object exposing the array interface, which \
stores the row index for each non zero element in ``data``.
- **col** (*array_like*) - An object exposing the array interface, which \
stores the col index for each non zero element in ``data``.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the ``row`` and ``col`` arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: tuple of int, tuple of array_like, array_like, CSRNDArray, scipy.sparse.csr_matrix, \
scipy.sparse.coo_matrix, tuple of int or tuple of array_like
The argument to help instantiate the csr matrix. See above for further details.
shape : tuple of int, optional
The shape of the csr matrix.
ctx: Context, optional
Device context (default is the current default context).
dtype: str or numpy.dtype, optional
The data type of the output array.
Returns
-------
CSRNDArray
A `CSRNDArray` with the `csr` storage representation.
Example
-------
>>> a = mx.nd.sparse.csr_matrix(([1, 2, 3], [1, 0, 2], [0, 1, 2, 2, 3]), shape=(4, 3))
>>> a.asnumpy()
array([[ 0., 1., 0.],
[ 2., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 3.]], dtype=float32)
See Also
--------
CSRNDArray : MXNet NDArray in compressed sparse row format.
"""
# construct a csr matrix from (M, N) or (data, indices, indptr)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len == 2:
# construct a sparse csr matrix from
# scipy coo matrix if input format is coo
if isinstance(arg1[1], tuple) and len(arg1[1]) == 2:
data, (row, col) = arg1
if isinstance(data, NDArray):
data = data.asnumpy()
if isinstance(row, NDArray):
row = row.asnumpy()
if isinstance(col, NDArray):
col = col.asnumpy()
coo = spsp.coo_matrix((data, (row, col)), shape=shape)
_check_shape(coo.shape, shape)
csr = coo.tocsr()
return array(csr, ctx=ctx, dtype=dtype)
else:
# empty matrix with shape
_check_shape(arg1, shape)
return empty('csr', arg1, ctx=ctx, dtype=dtype)
elif arg_len == 3:
# data, indices, indptr
return _csr_matrix_from_definition(arg1[0], arg1[1], arg1[2], shape=shape,
ctx=ctx, dtype=dtype)
else:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
else:
# construct a csr matrix from a sparse / dense one
if isinstance(arg1, CSRNDArray) or (spsp and isinstance(arg1, spsp.csr.csr_matrix)):
# construct a csr matrix from scipy or CSRNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, RowSparseNDArray):
raise ValueError("Unexpected input type: RowSparseNDArray")
else:
# construct a csr matrix from a dense one
# prepare default ctx and dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('csr')
|
python
|
def csr_matrix(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format.
The CSRNDArray can be instantiated in several ways:
- csr_matrix(D):
to construct a CSRNDArray with a dense 2D array ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix(S)
to construct a CSRNDArray with a sparse 2D array ``S``
- **S** (*CSRNDArray or scipy.sparse.csr.csr_matrix*) - A sparse matrix.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- csr_matrix((M, N))
to construct an empty CSRNDArray with shape ``(M, N)``
- **M** (*int*) - Number of rows in the matrix
- **N** (*int*) - Number of columns in the matrix
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- csr_matrix((data, indices, indptr))
to construct a CSRNDArray based on the definition of compressed sparse row format \
using three separate arrays, \
where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` \
and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. \
The column indices for a given row are expected to be **sorted in ascending order.** \
Duplicate column entries for the same row are not allowed.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in row-major order.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the column index for each non-zero element in ``data``.
- **indptr** (*array_like*) - An object exposing the array interface, which \
stores the offset into ``data`` of the first non-zero element number of each \
row of the matrix.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``data.dtype`` if ``data`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix((data, (row, col)))
to construct a CSRNDArray based on the COOrdinate format \
using three seperate arrays, \
where ``row[i]`` is the row index of the element, \
``col[i]`` is the column index of the element \
and ``data[i]`` is the data corresponding to the element. All the missing \
elements in the input are taken to be zeroes.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in COO format.
- **row** (*array_like*) - An object exposing the array interface, which \
stores the row index for each non zero element in ``data``.
- **col** (*array_like*) - An object exposing the array interface, which \
stores the col index for each non zero element in ``data``.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the ``row`` and ``col`` arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: tuple of int, tuple of array_like, array_like, CSRNDArray, scipy.sparse.csr_matrix, \
scipy.sparse.coo_matrix, tuple of int or tuple of array_like
The argument to help instantiate the csr matrix. See above for further details.
shape : tuple of int, optional
The shape of the csr matrix.
ctx: Context, optional
Device context (default is the current default context).
dtype: str or numpy.dtype, optional
The data type of the output array.
Returns
-------
CSRNDArray
A `CSRNDArray` with the `csr` storage representation.
Example
-------
>>> a = mx.nd.sparse.csr_matrix(([1, 2, 3], [1, 0, 2], [0, 1, 2, 2, 3]), shape=(4, 3))
>>> a.asnumpy()
array([[ 0., 1., 0.],
[ 2., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 3.]], dtype=float32)
See Also
--------
CSRNDArray : MXNet NDArray in compressed sparse row format.
"""
# construct a csr matrix from (M, N) or (data, indices, indptr)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len == 2:
# construct a sparse csr matrix from
# scipy coo matrix if input format is coo
if isinstance(arg1[1], tuple) and len(arg1[1]) == 2:
data, (row, col) = arg1
if isinstance(data, NDArray):
data = data.asnumpy()
if isinstance(row, NDArray):
row = row.asnumpy()
if isinstance(col, NDArray):
col = col.asnumpy()
coo = spsp.coo_matrix((data, (row, col)), shape=shape)
_check_shape(coo.shape, shape)
csr = coo.tocsr()
return array(csr, ctx=ctx, dtype=dtype)
else:
# empty matrix with shape
_check_shape(arg1, shape)
return empty('csr', arg1, ctx=ctx, dtype=dtype)
elif arg_len == 3:
# data, indices, indptr
return _csr_matrix_from_definition(arg1[0], arg1[1], arg1[2], shape=shape,
ctx=ctx, dtype=dtype)
else:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
else:
# construct a csr matrix from a sparse / dense one
if isinstance(arg1, CSRNDArray) or (spsp and isinstance(arg1, spsp.csr.csr_matrix)):
# construct a csr matrix from scipy or CSRNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, RowSparseNDArray):
raise ValueError("Unexpected input type: RowSparseNDArray")
else:
# construct a csr matrix from a dense one
# prepare default ctx and dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('csr')
|
[
"def",
"csr_matrix",
"(",
"arg1",
",",
"shape",
"=",
"None",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"# construct a csr matrix from (M, N) or (data, indices, indptr)",
"if",
"isinstance",
"(",
"arg1",
",",
"tuple",
")",
":",
"arg_len",
"=",
"len",
"(",
"arg1",
")",
"if",
"arg_len",
"==",
"2",
":",
"# construct a sparse csr matrix from",
"# scipy coo matrix if input format is coo",
"if",
"isinstance",
"(",
"arg1",
"[",
"1",
"]",
",",
"tuple",
")",
"and",
"len",
"(",
"arg1",
"[",
"1",
"]",
")",
"==",
"2",
":",
"data",
",",
"(",
"row",
",",
"col",
")",
"=",
"arg1",
"if",
"isinstance",
"(",
"data",
",",
"NDArray",
")",
":",
"data",
"=",
"data",
".",
"asnumpy",
"(",
")",
"if",
"isinstance",
"(",
"row",
",",
"NDArray",
")",
":",
"row",
"=",
"row",
".",
"asnumpy",
"(",
")",
"if",
"isinstance",
"(",
"col",
",",
"NDArray",
")",
":",
"col",
"=",
"col",
".",
"asnumpy",
"(",
")",
"coo",
"=",
"spsp",
".",
"coo_matrix",
"(",
"(",
"data",
",",
"(",
"row",
",",
"col",
")",
")",
",",
"shape",
"=",
"shape",
")",
"_check_shape",
"(",
"coo",
".",
"shape",
",",
"shape",
")",
"csr",
"=",
"coo",
".",
"tocsr",
"(",
")",
"return",
"array",
"(",
"csr",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"# empty matrix with shape",
"_check_shape",
"(",
"arg1",
",",
"shape",
")",
"return",
"empty",
"(",
"'csr'",
",",
"arg1",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"elif",
"arg_len",
"==",
"3",
":",
"# data, indices, indptr",
"return",
"_csr_matrix_from_definition",
"(",
"arg1",
"[",
"0",
"]",
",",
"arg1",
"[",
"1",
"]",
",",
"arg1",
"[",
"2",
"]",
",",
"shape",
"=",
"shape",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected length of input tuple: \"",
"+",
"str",
"(",
"arg_len",
")",
")",
"else",
":",
"# construct a csr matrix from a sparse / dense one",
"if",
"isinstance",
"(",
"arg1",
",",
"CSRNDArray",
")",
"or",
"(",
"spsp",
"and",
"isinstance",
"(",
"arg1",
",",
"spsp",
".",
"csr",
".",
"csr_matrix",
")",
")",
":",
"# construct a csr matrix from scipy or CSRNDArray",
"_check_shape",
"(",
"arg1",
".",
"shape",
",",
"shape",
")",
"return",
"array",
"(",
"arg1",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"elif",
"isinstance",
"(",
"arg1",
",",
"RowSparseNDArray",
")",
":",
"raise",
"ValueError",
"(",
"\"Unexpected input type: RowSparseNDArray\"",
")",
"else",
":",
"# construct a csr matrix from a dense one",
"# prepare default ctx and dtype since mx.nd.array doesn't use default values",
"# based on source_array",
"dtype",
"=",
"_prepare_default_dtype",
"(",
"arg1",
",",
"dtype",
")",
"# create dns array with provided dtype. ctx is not passed since copy across",
"# ctx requires dtype to be the same",
"dns",
"=",
"_array",
"(",
"arg1",
",",
"dtype",
"=",
"dtype",
")",
"if",
"ctx",
"is",
"not",
"None",
"and",
"dns",
".",
"context",
"!=",
"ctx",
":",
"dns",
"=",
"dns",
".",
"as_in_context",
"(",
"ctx",
")",
"_check_shape",
"(",
"dns",
".",
"shape",
",",
"shape",
")",
"return",
"dns",
".",
"tostype",
"(",
"'csr'",
")"
] |
Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format.
The CSRNDArray can be instantiated in several ways:
- csr_matrix(D):
to construct a CSRNDArray with a dense 2D array ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix(S)
to construct a CSRNDArray with a sparse 2D array ``S``
- **S** (*CSRNDArray or scipy.sparse.csr.csr_matrix*) - A sparse matrix.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- csr_matrix((M, N))
to construct an empty CSRNDArray with shape ``(M, N)``
- **M** (*int*) - Number of rows in the matrix
- **N** (*int*) - Number of columns in the matrix
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- csr_matrix((data, indices, indptr))
to construct a CSRNDArray based on the definition of compressed sparse row format \
using three separate arrays, \
where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` \
and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. \
The column indices for a given row are expected to be **sorted in ascending order.** \
Duplicate column entries for the same row are not allowed.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in row-major order.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the column index for each non-zero element in ``data``.
- **indptr** (*array_like*) - An object exposing the array interface, which \
stores the offset into ``data`` of the first non-zero element number of each \
row of the matrix.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``data.dtype`` if ``data`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix((data, (row, col)))
to construct a CSRNDArray based on the COOrdinate format \
using three seperate arrays, \
where ``row[i]`` is the row index of the element, \
``col[i]`` is the column index of the element \
and ``data[i]`` is the data corresponding to the element. All the missing \
elements in the input are taken to be zeroes.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in COO format.
- **row** (*array_like*) - An object exposing the array interface, which \
stores the row index for each non zero element in ``data``.
- **col** (*array_like*) - An object exposing the array interface, which \
stores the col index for each non zero element in ``data``.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the ``row`` and ``col`` arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: tuple of int, tuple of array_like, array_like, CSRNDArray, scipy.sparse.csr_matrix, \
scipy.sparse.coo_matrix, tuple of int or tuple of array_like
The argument to help instantiate the csr matrix. See above for further details.
shape : tuple of int, optional
The shape of the csr matrix.
ctx: Context, optional
Device context (default is the current default context).
dtype: str or numpy.dtype, optional
The data type of the output array.
Returns
-------
CSRNDArray
A `CSRNDArray` with the `csr` storage representation.
Example
-------
>>> a = mx.nd.sparse.csr_matrix(([1, 2, 3], [1, 0, 2], [0, 1, 2, 2, 3]), shape=(4, 3))
>>> a.asnumpy()
array([[ 0., 1., 0.],
[ 2., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 3.]], dtype=float32)
See Also
--------
CSRNDArray : MXNet NDArray in compressed sparse row format.
|
[
"Creates",
"a",
"CSRNDArray",
"an",
"2D",
"array",
"with",
"compressed",
"sparse",
"row",
"(",
"CSR",
")",
"format",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L825-L976
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
_csr_matrix_from_definition
|
def _csr_matrix_from_definition(data, indices, indptr, shape=None, ctx=None,
dtype=None, indices_type=None, indptr_type=None):
"""Create a `CSRNDArray` based on data, indices and indptr"""
# pylint: disable= no-member, protected-access
storage_type = 'csr'
# context
ctx = current_context() if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indptr_type = _STORAGE_AUX_TYPES[storage_type][0] if indptr_type is None else indptr_type
indices_type = _STORAGE_AUX_TYPES[storage_type][1] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indptr = _prepare_src_array(indptr, indptr_type)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indptr, NDArray):
indptr = _array(indptr, ctx, indptr_type)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
if indices.shape[0] == 0:
raise ValueError('invalid shape')
shape = (len(indptr) - 1, op.max(indices).asscalar() + 1)
# verify shapes
aux_shapes = [indptr.shape, indices.shape]
if data.ndim != 1 or indptr.ndim != 1 or indices.ndim != 1 or \
indptr.shape[0] == 0 or len(shape) != 2:
raise ValueError('invalid shape')
result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indptr_type, indices_type], aux_shapes))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1)))
return result
|
python
|
def _csr_matrix_from_definition(data, indices, indptr, shape=None, ctx=None,
dtype=None, indices_type=None, indptr_type=None):
"""Create a `CSRNDArray` based on data, indices and indptr"""
# pylint: disable= no-member, protected-access
storage_type = 'csr'
# context
ctx = current_context() if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indptr_type = _STORAGE_AUX_TYPES[storage_type][0] if indptr_type is None else indptr_type
indices_type = _STORAGE_AUX_TYPES[storage_type][1] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indptr = _prepare_src_array(indptr, indptr_type)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indptr, NDArray):
indptr = _array(indptr, ctx, indptr_type)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
if indices.shape[0] == 0:
raise ValueError('invalid shape')
shape = (len(indptr) - 1, op.max(indices).asscalar() + 1)
# verify shapes
aux_shapes = [indptr.shape, indices.shape]
if data.ndim != 1 or indptr.ndim != 1 or indices.ndim != 1 or \
indptr.shape[0] == 0 or len(shape) != 2:
raise ValueError('invalid shape')
result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indptr_type, indices_type], aux_shapes))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1)))
return result
|
[
"def",
"_csr_matrix_from_definition",
"(",
"data",
",",
"indices",
",",
"indptr",
",",
"shape",
"=",
"None",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"indices_type",
"=",
"None",
",",
"indptr_type",
"=",
"None",
")",
":",
"# pylint: disable= no-member, protected-access",
"storage_type",
"=",
"'csr'",
"# context",
"ctx",
"=",
"current_context",
"(",
")",
"if",
"ctx",
"is",
"None",
"else",
"ctx",
"# types",
"dtype",
"=",
"_prepare_default_dtype",
"(",
"data",
",",
"dtype",
")",
"indptr_type",
"=",
"_STORAGE_AUX_TYPES",
"[",
"storage_type",
"]",
"[",
"0",
"]",
"if",
"indptr_type",
"is",
"None",
"else",
"indptr_type",
"indices_type",
"=",
"_STORAGE_AUX_TYPES",
"[",
"storage_type",
"]",
"[",
"1",
"]",
"if",
"indices_type",
"is",
"None",
"else",
"indices_type",
"# prepare src array and types",
"data",
"=",
"_prepare_src_array",
"(",
"data",
",",
"dtype",
")",
"indptr",
"=",
"_prepare_src_array",
"(",
"indptr",
",",
"indptr_type",
")",
"indices",
"=",
"_prepare_src_array",
"(",
"indices",
",",
"indices_type",
")",
"# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays",
"# if they are not for now. In the future, we should provide a c-api",
"# to accept np.ndarray types to copy from to result.data and aux_data",
"if",
"not",
"isinstance",
"(",
"data",
",",
"NDArray",
")",
":",
"data",
"=",
"_array",
"(",
"data",
",",
"ctx",
",",
"dtype",
")",
"if",
"not",
"isinstance",
"(",
"indptr",
",",
"NDArray",
")",
":",
"indptr",
"=",
"_array",
"(",
"indptr",
",",
"ctx",
",",
"indptr_type",
")",
"if",
"not",
"isinstance",
"(",
"indices",
",",
"NDArray",
")",
":",
"indices",
"=",
"_array",
"(",
"indices",
",",
"ctx",
",",
"indices_type",
")",
"if",
"shape",
"is",
"None",
":",
"if",
"indices",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'invalid shape'",
")",
"shape",
"=",
"(",
"len",
"(",
"indptr",
")",
"-",
"1",
",",
"op",
".",
"max",
"(",
"indices",
")",
".",
"asscalar",
"(",
")",
"+",
"1",
")",
"# verify shapes",
"aux_shapes",
"=",
"[",
"indptr",
".",
"shape",
",",
"indices",
".",
"shape",
"]",
"if",
"data",
".",
"ndim",
"!=",
"1",
"or",
"indptr",
".",
"ndim",
"!=",
"1",
"or",
"indices",
".",
"ndim",
"!=",
"1",
"or",
"indptr",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
"or",
"len",
"(",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'invalid shape'",
")",
"result",
"=",
"CSRNDArray",
"(",
"_new_alloc_handle",
"(",
"storage_type",
",",
"shape",
",",
"ctx",
",",
"False",
",",
"dtype",
",",
"[",
"indptr_type",
",",
"indices_type",
"]",
",",
"aux_shapes",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArraySyncCopyFromNDArray",
"(",
"result",
".",
"handle",
",",
"data",
".",
"handle",
",",
"ctypes",
".",
"c_int",
"(",
"-",
"1",
")",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArraySyncCopyFromNDArray",
"(",
"result",
".",
"handle",
",",
"indptr",
".",
"handle",
",",
"ctypes",
".",
"c_int",
"(",
"0",
")",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArraySyncCopyFromNDArray",
"(",
"result",
".",
"handle",
",",
"indices",
".",
"handle",
",",
"ctypes",
".",
"c_int",
"(",
"1",
")",
")",
")",
"return",
"result"
] |
Create a `CSRNDArray` based on data, indices and indptr
|
[
"Create",
"a",
"CSRNDArray",
"based",
"on",
"data",
"indices",
"and",
"indptr"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L978-L1017
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
row_sparse_array
|
def row_sparse_array(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \
tensor slices at given indices.
The RowSparseNDArray can be instantiated in several ways:
- row_sparse_array(D):
to construct a RowSparseNDArray with a dense ndarray ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- row_sparse_array(S)
to construct a RowSparseNDArray with a sparse ndarray ``S``
- **S** (*RowSparseNDArray*) - A sparse ndarray.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- row_sparse_array((D0, D1 .. Dn))
to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)``
- **D0, D1 .. Dn** (*int*) - The shape of the ndarray
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- row_sparse_array((data, indices))
to construct a RowSparseNDArray based on the definition of row sparse format \
using two separate arrays, \
where the `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has \
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
The row indices for are expected to be **sorted in ascending order.** \
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero row slices of the array.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the row index for each row slice with non-zero elements.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1 : NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like
The argument to help instantiate the row sparse ndarray. See above for further details.
shape : tuple of int, optional
The shape of the row sparse ndarray. (Default value = None)
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. (Default value = None)
Returns
-------
RowSparseNDArray
An `RowSparseNDArray` with the `row_sparse` storage representation.
Examples
--------
>>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2))
>>> a.asnumpy()
array([[ 0., 0.],
[ 1., 2.],
[ 0., 0.],
[ 0., 0.],
[ 3., 4.],
[ 0., 0.]], dtype=float32)
See Also
--------
RowSparseNDArray : MXNet NDArray in row sparse format.
"""
# construct a row sparse array from (D0, D1 ..) or (data, indices)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len < 2:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
elif arg_len > 2:
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# len(arg1) = 2, is either shape or (data, indices)
if isinstance(arg1[0], integer_types) and isinstance(arg1[1], integer_types):
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# data, indices, indptr
return _row_sparse_ndarray_from_definition(arg1[0], arg1[1], shape=shape,
ctx=ctx, dtype=dtype)
else:
# construct a row sparse ndarray from a dense / sparse array
if isinstance(arg1, RowSparseNDArray):
# construct a row sparse ndarray from RowSparseNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, CSRNDArray):
raise ValueError("Unexpected input type: CSRNDArray")
else:
# construct a csr matrix from a dense one
# prepare default dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('row_sparse')
|
python
|
def row_sparse_array(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \
tensor slices at given indices.
The RowSparseNDArray can be instantiated in several ways:
- row_sparse_array(D):
to construct a RowSparseNDArray with a dense ndarray ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- row_sparse_array(S)
to construct a RowSparseNDArray with a sparse ndarray ``S``
- **S** (*RowSparseNDArray*) - A sparse ndarray.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- row_sparse_array((D0, D1 .. Dn))
to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)``
- **D0, D1 .. Dn** (*int*) - The shape of the ndarray
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- row_sparse_array((data, indices))
to construct a RowSparseNDArray based on the definition of row sparse format \
using two separate arrays, \
where the `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has \
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
The row indices for are expected to be **sorted in ascending order.** \
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero row slices of the array.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the row index for each row slice with non-zero elements.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1 : NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like
The argument to help instantiate the row sparse ndarray. See above for further details.
shape : tuple of int, optional
The shape of the row sparse ndarray. (Default value = None)
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. (Default value = None)
Returns
-------
RowSparseNDArray
An `RowSparseNDArray` with the `row_sparse` storage representation.
Examples
--------
>>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2))
>>> a.asnumpy()
array([[ 0., 0.],
[ 1., 2.],
[ 0., 0.],
[ 0., 0.],
[ 3., 4.],
[ 0., 0.]], dtype=float32)
See Also
--------
RowSparseNDArray : MXNet NDArray in row sparse format.
"""
# construct a row sparse array from (D0, D1 ..) or (data, indices)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len < 2:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
elif arg_len > 2:
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# len(arg1) = 2, is either shape or (data, indices)
if isinstance(arg1[0], integer_types) and isinstance(arg1[1], integer_types):
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# data, indices, indptr
return _row_sparse_ndarray_from_definition(arg1[0], arg1[1], shape=shape,
ctx=ctx, dtype=dtype)
else:
# construct a row sparse ndarray from a dense / sparse array
if isinstance(arg1, RowSparseNDArray):
# construct a row sparse ndarray from RowSparseNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, CSRNDArray):
raise ValueError("Unexpected input type: CSRNDArray")
else:
# construct a csr matrix from a dense one
# prepare default dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('row_sparse')
|
[
"def",
"row_sparse_array",
"(",
"arg1",
",",
"shape",
"=",
"None",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"# construct a row sparse array from (D0, D1 ..) or (data, indices)",
"if",
"isinstance",
"(",
"arg1",
",",
"tuple",
")",
":",
"arg_len",
"=",
"len",
"(",
"arg1",
")",
"if",
"arg_len",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Unexpected length of input tuple: \"",
"+",
"str",
"(",
"arg_len",
")",
")",
"elif",
"arg_len",
">",
"2",
":",
"# empty ndarray with shape",
"_check_shape",
"(",
"arg1",
",",
"shape",
")",
"return",
"empty",
"(",
"'row_sparse'",
",",
"arg1",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"# len(arg1) = 2, is either shape or (data, indices)",
"if",
"isinstance",
"(",
"arg1",
"[",
"0",
"]",
",",
"integer_types",
")",
"and",
"isinstance",
"(",
"arg1",
"[",
"1",
"]",
",",
"integer_types",
")",
":",
"# empty ndarray with shape",
"_check_shape",
"(",
"arg1",
",",
"shape",
")",
"return",
"empty",
"(",
"'row_sparse'",
",",
"arg1",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"# data, indices, indptr",
"return",
"_row_sparse_ndarray_from_definition",
"(",
"arg1",
"[",
"0",
"]",
",",
"arg1",
"[",
"1",
"]",
",",
"shape",
"=",
"shape",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"# construct a row sparse ndarray from a dense / sparse array",
"if",
"isinstance",
"(",
"arg1",
",",
"RowSparseNDArray",
")",
":",
"# construct a row sparse ndarray from RowSparseNDArray",
"_check_shape",
"(",
"arg1",
".",
"shape",
",",
"shape",
")",
"return",
"array",
"(",
"arg1",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"elif",
"isinstance",
"(",
"arg1",
",",
"CSRNDArray",
")",
":",
"raise",
"ValueError",
"(",
"\"Unexpected input type: CSRNDArray\"",
")",
"else",
":",
"# construct a csr matrix from a dense one",
"# prepare default dtype since mx.nd.array doesn't use default values",
"# based on source_array",
"dtype",
"=",
"_prepare_default_dtype",
"(",
"arg1",
",",
"dtype",
")",
"# create dns array with provided dtype. ctx is not passed since copy across",
"# ctx requires dtype to be the same",
"dns",
"=",
"_array",
"(",
"arg1",
",",
"dtype",
"=",
"dtype",
")",
"if",
"ctx",
"is",
"not",
"None",
"and",
"dns",
".",
"context",
"!=",
"ctx",
":",
"dns",
"=",
"dns",
".",
"as_in_context",
"(",
"ctx",
")",
"_check_shape",
"(",
"dns",
".",
"shape",
",",
"shape",
")",
"return",
"dns",
".",
"tostype",
"(",
"'row_sparse'",
")"
] |
Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \
tensor slices at given indices.
The RowSparseNDArray can be instantiated in several ways:
- row_sparse_array(D):
to construct a RowSparseNDArray with a dense ndarray ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- row_sparse_array(S)
to construct a RowSparseNDArray with a sparse ndarray ``S``
- **S** (*RowSparseNDArray*) - A sparse ndarray.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- row_sparse_array((D0, D1 .. Dn))
to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)``
- **D0, D1 .. Dn** (*int*) - The shape of the ndarray
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- row_sparse_array((data, indices))
to construct a RowSparseNDArray based on the definition of row sparse format \
using two separate arrays, \
where the `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has \
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
The row indices for are expected to be **sorted in ascending order.** \
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero row slices of the array.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the row index for each row slice with non-zero elements.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1 : NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like
The argument to help instantiate the row sparse ndarray. See above for further details.
shape : tuple of int, optional
The shape of the row sparse ndarray. (Default value = None)
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. (Default value = None)
Returns
-------
RowSparseNDArray
An `RowSparseNDArray` with the `row_sparse` storage representation.
Examples
--------
>>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2))
>>> a.asnumpy()
array([[ 0., 0.],
[ 1., 2.],
[ 0., 0.],
[ 0., 0.],
[ 3., 4.],
[ 0., 0.]], dtype=float32)
See Also
--------
RowSparseNDArray : MXNet NDArray in row sparse format.
|
[
"Creates",
"a",
"RowSparseNDArray",
"a",
"multidimensional",
"row",
"sparse",
"array",
"with",
"a",
"set",
"of",
"\\",
"tensor",
"slices",
"at",
"given",
"indices",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L1020-L1140
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
_row_sparse_ndarray_from_definition
|
def _row_sparse_ndarray_from_definition(data, indices, shape=None, ctx=None,
dtype=None, indices_type=None):
"""Create a `RowSparseNDArray` based on data and indices"""
storage_type = 'row_sparse'
# context
ctx = current_context() if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indices_type = _STORAGE_AUX_TYPES[storage_type][0] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
num_indices = indices.shape[0]
if num_indices == 0:
raise ValueError('invalid shape')
dim0 = indices[num_indices - 1].asscalar() + 1
shape = (dim0, ) + data.shape[1:]
# verify shapes
if data.ndim != len(shape) or indices.ndim != 1 or np.prod(shape[1:]) == 0:
raise ValueError("invalid shape")
result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indices_type], [indices.shape]))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0)))
return result
|
python
|
def _row_sparse_ndarray_from_definition(data, indices, shape=None, ctx=None,
dtype=None, indices_type=None):
"""Create a `RowSparseNDArray` based on data and indices"""
storage_type = 'row_sparse'
# context
ctx = current_context() if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indices_type = _STORAGE_AUX_TYPES[storage_type][0] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
num_indices = indices.shape[0]
if num_indices == 0:
raise ValueError('invalid shape')
dim0 = indices[num_indices - 1].asscalar() + 1
shape = (dim0, ) + data.shape[1:]
# verify shapes
if data.ndim != len(shape) or indices.ndim != 1 or np.prod(shape[1:]) == 0:
raise ValueError("invalid shape")
result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indices_type], [indices.shape]))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0)))
return result
|
[
"def",
"_row_sparse_ndarray_from_definition",
"(",
"data",
",",
"indices",
",",
"shape",
"=",
"None",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"indices_type",
"=",
"None",
")",
":",
"storage_type",
"=",
"'row_sparse'",
"# context",
"ctx",
"=",
"current_context",
"(",
")",
"if",
"ctx",
"is",
"None",
"else",
"ctx",
"# types",
"dtype",
"=",
"_prepare_default_dtype",
"(",
"data",
",",
"dtype",
")",
"indices_type",
"=",
"_STORAGE_AUX_TYPES",
"[",
"storage_type",
"]",
"[",
"0",
"]",
"if",
"indices_type",
"is",
"None",
"else",
"indices_type",
"# prepare src array and types",
"data",
"=",
"_prepare_src_array",
"(",
"data",
",",
"dtype",
")",
"indices",
"=",
"_prepare_src_array",
"(",
"indices",
",",
"indices_type",
")",
"# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays",
"# if they are not for now. In the future, we should provide a c-api",
"# to accept np.ndarray types to copy from to result.data and aux_data",
"if",
"not",
"isinstance",
"(",
"data",
",",
"NDArray",
")",
":",
"data",
"=",
"_array",
"(",
"data",
",",
"ctx",
",",
"dtype",
")",
"if",
"not",
"isinstance",
"(",
"indices",
",",
"NDArray",
")",
":",
"indices",
"=",
"_array",
"(",
"indices",
",",
"ctx",
",",
"indices_type",
")",
"if",
"shape",
"is",
"None",
":",
"num_indices",
"=",
"indices",
".",
"shape",
"[",
"0",
"]",
"if",
"num_indices",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'invalid shape'",
")",
"dim0",
"=",
"indices",
"[",
"num_indices",
"-",
"1",
"]",
".",
"asscalar",
"(",
")",
"+",
"1",
"shape",
"=",
"(",
"dim0",
",",
")",
"+",
"data",
".",
"shape",
"[",
"1",
":",
"]",
"# verify shapes",
"if",
"data",
".",
"ndim",
"!=",
"len",
"(",
"shape",
")",
"or",
"indices",
".",
"ndim",
"!=",
"1",
"or",
"np",
".",
"prod",
"(",
"shape",
"[",
"1",
":",
"]",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid shape\"",
")",
"result",
"=",
"RowSparseNDArray",
"(",
"_new_alloc_handle",
"(",
"storage_type",
",",
"shape",
",",
"ctx",
",",
"False",
",",
"dtype",
",",
"[",
"indices_type",
"]",
",",
"[",
"indices",
".",
"shape",
"]",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArraySyncCopyFromNDArray",
"(",
"result",
".",
"handle",
",",
"data",
".",
"handle",
",",
"ctypes",
".",
"c_int",
"(",
"-",
"1",
")",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArraySyncCopyFromNDArray",
"(",
"result",
".",
"handle",
",",
"indices",
".",
"handle",
",",
"ctypes",
".",
"c_int",
"(",
"0",
")",
")",
")",
"return",
"result"
] |
Create a `RowSparseNDArray` based on data and indices
|
[
"Create",
"a",
"RowSparseNDArray",
"based",
"on",
"data",
"and",
"indices"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L1142-L1175
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
add
|
def add(lhs, rhs):
"""Returns element-wise sum of the input arrays with broadcasting.
Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and
``mx.nd.broadcast_plus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_add(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.abs
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be added.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be added.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise sum of the input arrays.
Examples
--------
>>> a = mx.nd.ones((2,3)).tostype('csr')
>>> b = mx.nd.ones((2,3)).tostype('csr')
>>> a.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> b.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (a+b).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> c = mx.nd.ones((2,3)).tostype('row_sparse')
>>> d = mx.nd.ones((2,3)).tostype('row_sparse')
>>> c.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> d.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (c+d).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_add,
operator.add,
_internal._plus_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_add,
operator.add,
_internal._plus_scalar,
None)
|
python
|
def add(lhs, rhs):
"""Returns element-wise sum of the input arrays with broadcasting.
Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and
``mx.nd.broadcast_plus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_add(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.abs
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be added.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be added.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise sum of the input arrays.
Examples
--------
>>> a = mx.nd.ones((2,3)).tostype('csr')
>>> b = mx.nd.ones((2,3)).tostype('csr')
>>> a.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> b.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (a+b).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> c = mx.nd.ones((2,3)).tostype('row_sparse')
>>> d = mx.nd.ones((2,3)).tostype('row_sparse')
>>> c.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> d.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (c+d).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_add,
operator.add,
_internal._plus_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_add,
operator.add,
_internal._plus_scalar,
None)
|
[
"def",
"add",
"(",
"lhs",
",",
"rhs",
")",
":",
"# pylint: disable= no-member, protected-access",
"if",
"isinstance",
"(",
"lhs",
",",
"NDArray",
")",
"and",
"isinstance",
"(",
"rhs",
",",
"NDArray",
")",
"and",
"lhs",
".",
"shape",
"==",
"rhs",
".",
"shape",
":",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"elemwise_add",
",",
"operator",
".",
"add",
",",
"_internal",
".",
"_plus_scalar",
",",
"None",
")",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"broadcast_add",
",",
"operator",
".",
"add",
",",
"_internal",
".",
"_plus_scalar",
",",
"None",
")"
] |
Returns element-wise sum of the input arrays with broadcasting.
Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and
``mx.nd.broadcast_plus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_add(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.abs
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be added.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be added.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise sum of the input arrays.
Examples
--------
>>> a = mx.nd.ones((2,3)).tostype('csr')
>>> b = mx.nd.ones((2,3)).tostype('csr')
>>> a.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> b.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (a+b).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> c = mx.nd.ones((2,3)).tostype('row_sparse')
>>> d = mx.nd.ones((2,3)).tostype('row_sparse')
>>> c.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> d.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (c+d).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
|
[
"Returns",
"element",
"-",
"wise",
"sum",
"of",
"the",
"input",
"arrays",
"with",
"broadcasting",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L1193-L1261
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
subtract
|
def subtract(lhs, rhs):
"""Returns element-wise difference of the input arrays with broadcasting.
Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and
``mx.nd.broadcast_minus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_sub(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be subtracted.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be subtracted.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.__spec__
Returns
-------
NDArray
The element-wise difference of the input arrays.
Examples
--------
>>> a = mx.nd.ones((2,3)).tostype('csr')
>>> b = mx.nd.ones((2,3)).tostype('csr')
>>> a.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> b.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (a-b).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> c = mx.nd.ones((2,3)).tostype('row_sparse')
>>> d = mx.nd.ones((2,3)).tostype('row_sparse')
>>> c.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> d.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (c-d).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_sub,
operator.sub,
_internal._minus_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_sub,
operator.sub,
_internal._minus_scalar,
None)
|
python
|
def subtract(lhs, rhs):
"""Returns element-wise difference of the input arrays with broadcasting.
Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and
``mx.nd.broadcast_minus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_sub(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be subtracted.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be subtracted.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.__spec__
Returns
-------
NDArray
The element-wise difference of the input arrays.
Examples
--------
>>> a = mx.nd.ones((2,3)).tostype('csr')
>>> b = mx.nd.ones((2,3)).tostype('csr')
>>> a.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> b.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (a-b).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> c = mx.nd.ones((2,3)).tostype('row_sparse')
>>> d = mx.nd.ones((2,3)).tostype('row_sparse')
>>> c.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> d.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (c-d).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_sub,
operator.sub,
_internal._minus_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_sub,
operator.sub,
_internal._minus_scalar,
None)
|
[
"def",
"subtract",
"(",
"lhs",
",",
"rhs",
")",
":",
"# pylint: disable= no-member, protected-access",
"if",
"isinstance",
"(",
"lhs",
",",
"NDArray",
")",
"and",
"isinstance",
"(",
"rhs",
",",
"NDArray",
")",
"and",
"lhs",
".",
"shape",
"==",
"rhs",
".",
"shape",
":",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"elemwise_sub",
",",
"operator",
".",
"sub",
",",
"_internal",
".",
"_minus_scalar",
",",
"None",
")",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"broadcast_sub",
",",
"operator",
".",
"sub",
",",
"_internal",
".",
"_minus_scalar",
",",
"None",
")"
] |
Returns element-wise difference of the input arrays with broadcasting.
Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and
``mx.nd.broadcast_minus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_sub(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be subtracted.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be subtracted.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.__spec__
Returns
-------
NDArray
The element-wise difference of the input arrays.
Examples
--------
>>> a = mx.nd.ones((2,3)).tostype('csr')
>>> b = mx.nd.ones((2,3)).tostype('csr')
>>> a.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> b.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (a-b).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> c = mx.nd.ones((2,3)).tostype('row_sparse')
>>> d = mx.nd.ones((2,3)).tostype('row_sparse')
>>> c.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> d.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (c-d).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
|
[
"Returns",
"element",
"-",
"wise",
"difference",
"of",
"the",
"input",
"arrays",
"with",
"broadcasting",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L1265-L1333
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
multiply
|
def multiply(lhs, rhs):
"""Returns element-wise product of the input arrays with broadcasting.
Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_mul(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be multiplied.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be multiplied.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise multiplication of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3)).tostype('csr')
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(3)
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> (x*2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x*y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (x*z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> z = z.reshape((1, 3))
>>> z.asnumpy()
array([[ 0., 1., 2.]], dtype=float32)
>>> (x*z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_mul,
operator.mul,
_internal._mul_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_mul,
operator.mul,
_internal._mul_scalar,
None)
|
python
|
def multiply(lhs, rhs):
"""Returns element-wise product of the input arrays with broadcasting.
Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_mul(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be multiplied.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be multiplied.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise multiplication of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3)).tostype('csr')
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(3)
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> (x*2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x*y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (x*z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> z = z.reshape((1, 3))
>>> z.asnumpy()
array([[ 0., 1., 2.]], dtype=float32)
>>> (x*z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_mul,
operator.mul,
_internal._mul_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_mul,
operator.mul,
_internal._mul_scalar,
None)
|
[
"def",
"multiply",
"(",
"lhs",
",",
"rhs",
")",
":",
"# pylint: disable= no-member, protected-access",
"if",
"isinstance",
"(",
"lhs",
",",
"NDArray",
")",
"and",
"isinstance",
"(",
"rhs",
",",
"NDArray",
")",
"and",
"lhs",
".",
"shape",
"==",
"rhs",
".",
"shape",
":",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"elemwise_mul",
",",
"operator",
".",
"mul",
",",
"_internal",
".",
"_mul_scalar",
",",
"None",
")",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"broadcast_mul",
",",
"operator",
".",
"mul",
",",
"_internal",
".",
"_mul_scalar",
",",
"None",
")"
] |
Returns element-wise product of the input arrays with broadcasting.
Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_mul(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be multiplied.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be multiplied.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise multiplication of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3)).tostype('csr')
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(3)
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> (x*2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x*y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (x*z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> z = z.reshape((1, 3))
>>> z.asnumpy()
array([[ 0., 1., 2.]], dtype=float32)
>>> (x*z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
|
[
"Returns",
"element",
"-",
"wise",
"product",
"of",
"the",
"input",
"arrays",
"with",
"broadcasting",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L1337-L1417
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
divide
|
def divide(lhs, rhs):
"""Returns element-wise division of the input arrays with broadcasting.
Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_div(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array in division.
rhs : scalar or mxnet.ndarray.sparse.array
Second array in division.
The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise division of the input arrays.
Examples
--------
>>> x = (mx.nd.ones((2,3))*6).tostype('csr')
>>> y = mx.nd.arange(2).reshape((2,1)) + 1
>>> z = mx.nd.arange(3) + 1
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([ 1., 2., 3.], dtype=float32)
>>> x/2
<NDArray 2x3 @cpu(0)>
>>> (x/3).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x/y).asnumpy()
array([[ 6., 6., 6.],
[ 3., 3., 3.]], dtype=float32)
>>> mx.nd.sparse.divide(x,y).asnumpy()
array([[ 6., 6., 6.],
[ 3., 3., 3.]], dtype=float32)
>>> (x/z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> mx.nd.sprase.divide(x,z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> z = z.reshape((1,3))
>>> z.asnumpy()
array([[ 1., 2., 3.]], dtype=float32)
>>> (x/z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> mx.nd.sparse.divide(x,z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_div,
operator.truediv,
_internal._div_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_div,
operator.truediv,
_internal._div_scalar,
None)
|
python
|
def divide(lhs, rhs):
"""Returns element-wise division of the input arrays with broadcasting.
Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_div(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array in division.
rhs : scalar or mxnet.ndarray.sparse.array
Second array in division.
The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise division of the input arrays.
Examples
--------
>>> x = (mx.nd.ones((2,3))*6).tostype('csr')
>>> y = mx.nd.arange(2).reshape((2,1)) + 1
>>> z = mx.nd.arange(3) + 1
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([ 1., 2., 3.], dtype=float32)
>>> x/2
<NDArray 2x3 @cpu(0)>
>>> (x/3).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x/y).asnumpy()
array([[ 6., 6., 6.],
[ 3., 3., 3.]], dtype=float32)
>>> mx.nd.sparse.divide(x,y).asnumpy()
array([[ 6., 6., 6.],
[ 3., 3., 3.]], dtype=float32)
>>> (x/z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> mx.nd.sprase.divide(x,z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> z = z.reshape((1,3))
>>> z.asnumpy()
array([[ 1., 2., 3.]], dtype=float32)
>>> (x/z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> mx.nd.sparse.divide(x,z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_div,
operator.truediv,
_internal._div_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_div,
operator.truediv,
_internal._div_scalar,
None)
|
[
"def",
"divide",
"(",
"lhs",
",",
"rhs",
")",
":",
"# pylint: disable= no-member, protected-access",
"if",
"isinstance",
"(",
"lhs",
",",
"NDArray",
")",
"and",
"isinstance",
"(",
"rhs",
",",
"NDArray",
")",
"and",
"lhs",
".",
"shape",
"==",
"rhs",
".",
"shape",
":",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"elemwise_div",
",",
"operator",
".",
"truediv",
",",
"_internal",
".",
"_div_scalar",
",",
"None",
")",
"return",
"_ufunc_helper",
"(",
"lhs",
",",
"rhs",
",",
"op",
".",
"broadcast_div",
",",
"operator",
".",
"truediv",
",",
"_internal",
".",
"_div_scalar",
",",
"None",
")"
] |
Returns element-wise division of the input arrays with broadcasting.
Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_div(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array in division.
rhs : scalar or mxnet.ndarray.sparse.array
Second array in division.
The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise division of the input arrays.
Examples
--------
>>> x = (mx.nd.ones((2,3))*6).tostype('csr')
>>> y = mx.nd.arange(2).reshape((2,1)) + 1
>>> z = mx.nd.arange(3) + 1
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([ 1., 2., 3.], dtype=float32)
>>> x/2
<NDArray 2x3 @cpu(0)>
>>> (x/3).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x/y).asnumpy()
array([[ 6., 6., 6.],
[ 3., 3., 3.]], dtype=float32)
>>> mx.nd.sparse.divide(x,y).asnumpy()
array([[ 6., 6., 6.],
[ 3., 3., 3.]], dtype=float32)
>>> (x/z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> mx.nd.sprase.divide(x,z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> z = z.reshape((1,3))
>>> z.asnumpy()
array([[ 1., 2., 3.]], dtype=float32)
>>> (x/z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> mx.nd.sparse.divide(x,z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
|
[
"Returns",
"element",
"-",
"wise",
"division",
"of",
"the",
"input",
"arrays",
"with",
"broadcasting",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L1421-L1503
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
zeros
|
def zeros(stype, shape, ctx=None, dtype=None, **kwargs):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
Returns
-------
RowSparseNDArray or CSRNDArray
A created array
Examples
--------
>>> mx.nd.sparse.zeros('csr', (1,2))
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
# pylint: disable= no-member, protected-access
if stype == 'default':
return _zeros_ndarray(shape, ctx=ctx, dtype=dtype, **kwargs)
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
if stype in ('row_sparse', 'csr'):
aux_types = _STORAGE_AUX_TYPES[stype]
else:
raise ValueError("unknown storage type" + stype)
out = _ndarray_cls(_new_alloc_handle(stype, shape, ctx, True, dtype, aux_types))
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, out=out, **kwargs)
|
python
|
def zeros(stype, shape, ctx=None, dtype=None, **kwargs):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
Returns
-------
RowSparseNDArray or CSRNDArray
A created array
Examples
--------
>>> mx.nd.sparse.zeros('csr', (1,2))
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
# pylint: disable= no-member, protected-access
if stype == 'default':
return _zeros_ndarray(shape, ctx=ctx, dtype=dtype, **kwargs)
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
if stype in ('row_sparse', 'csr'):
aux_types = _STORAGE_AUX_TYPES[stype]
else:
raise ValueError("unknown storage type" + stype)
out = _ndarray_cls(_new_alloc_handle(stype, shape, ctx, True, dtype, aux_types))
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, out=out, **kwargs)
|
[
"def",
"zeros",
"(",
"stype",
",",
"shape",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable= no-member, protected-access",
"if",
"stype",
"==",
"'default'",
":",
"return",
"_zeros_ndarray",
"(",
"shape",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
",",
"*",
"*",
"kwargs",
")",
"if",
"ctx",
"is",
"None",
":",
"ctx",
"=",
"current_context",
"(",
")",
"dtype",
"=",
"mx_real_t",
"if",
"dtype",
"is",
"None",
"else",
"dtype",
"if",
"stype",
"in",
"(",
"'row_sparse'",
",",
"'csr'",
")",
":",
"aux_types",
"=",
"_STORAGE_AUX_TYPES",
"[",
"stype",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown storage type\"",
"+",
"stype",
")",
"out",
"=",
"_ndarray_cls",
"(",
"_new_alloc_handle",
"(",
"stype",
",",
"shape",
",",
"ctx",
",",
"True",
",",
"dtype",
",",
"aux_types",
")",
")",
"return",
"_internal",
".",
"_zeros",
"(",
"shape",
"=",
"shape",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
",",
"out",
"=",
"out",
",",
"*",
"*",
"kwargs",
")"
] |
Return a new array of given shape and type, filled with zeros.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
Returns
-------
RowSparseNDArray or CSRNDArray
A created array
Examples
--------
>>> mx.nd.sparse.zeros('csr', (1,2))
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
|
[
"Return",
"a",
"new",
"array",
"of",
"given",
"shape",
"and",
"type",
"filled",
"with",
"zeros",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L1507-L1543
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
empty
|
def empty(stype, shape, ctx=None, dtype=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
CSRNDArray or RowSparseNDArray
A created array.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = current_context()
if dtype is None:
dtype = mx_real_t
assert(stype is not None)
if stype in ('csr', 'row_sparse'):
return zeros(stype, shape, ctx=ctx, dtype=dtype)
else:
raise Exception("unknown stype : " + str(stype))
|
python
|
def empty(stype, shape, ctx=None, dtype=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
CSRNDArray or RowSparseNDArray
A created array.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = current_context()
if dtype is None:
dtype = mx_real_t
assert(stype is not None)
if stype in ('csr', 'row_sparse'):
return zeros(stype, shape, ctx=ctx, dtype=dtype)
else:
raise Exception("unknown stype : " + str(stype))
|
[
"def",
"empty",
"(",
"stype",
",",
"shape",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"shape",
",",
"int",
")",
":",
"shape",
"=",
"(",
"shape",
",",
")",
"if",
"ctx",
"is",
"None",
":",
"ctx",
"=",
"current_context",
"(",
")",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"mx_real_t",
"assert",
"(",
"stype",
"is",
"not",
"None",
")",
"if",
"stype",
"in",
"(",
"'csr'",
",",
"'row_sparse'",
")",
":",
"return",
"zeros",
"(",
"stype",
",",
"shape",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"unknown stype : \"",
"+",
"str",
"(",
"stype",
")",
")"
] |
Returns a new array of given shape and type, without initializing entries.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
CSRNDArray or RowSparseNDArray
A created array.
|
[
"Returns",
"a",
"new",
"array",
"of",
"given",
"shape",
"and",
"type",
"without",
"initializing",
"entries",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L1547-L1576
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
array
|
def array(source_array, ctx=None, dtype=None):
"""Creates a sparse array from any object exposing the array interface.
Parameters
----------
source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix
The source sparse array
ctx : Context, optional
The default context is ``source_array.context`` if ``source_array`` is an NDArray. \
The current default context otherwise.
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `numpy.ndarray` or `scipy.sparse.csr.csr_matrix`, \
`float32` otherwise.
Returns
-------
RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import scipy.sparse as spsp
>>> csr = spsp.csr_matrix((2, 100))
>>> mx.nd.sparse.array(csr)
<CSRNDArray 2x100 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2)))
<CSRNDArray 3x2 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2)))
<RowSparseNDArray 3x2 @cpu(0)>
"""
ctx = current_context() if ctx is None else ctx
if isinstance(source_array, NDArray):
assert(source_array.stype != 'default'), \
"Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray"
# prepare dtype and ctx based on source_array, if not provided
dtype = _prepare_default_dtype(source_array, dtype)
# if both dtype and ctx are different from source_array, we cannot copy directly
if source_array.dtype != dtype and source_array.context != ctx:
arr = empty(source_array.stype, source_array.shape, dtype=dtype)
arr[:] = source_array
arr = arr.as_in_context(ctx)
else:
arr = empty(source_array.stype, source_array.shape, dtype=dtype, ctx=ctx)
arr[:] = source_array
return arr
elif spsp and isinstance(source_array, spsp.csr.csr_matrix):
# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy
# preprocess scipy csr to canonical form
csr = source_array.sorted_indices()
csr.sum_duplicates()
dtype = _prepare_default_dtype(source_array, dtype)
return csr_matrix((csr.data, csr.indices, csr.indptr), shape=csr.shape, \
dtype=dtype, ctx=ctx)
elif isinstance(source_array, (np.ndarray, np.generic)):
raise ValueError("Please use mx.nd.array to create an NDArray with source_array of type ",
type(source_array))
else:
raise ValueError("Unexpected source_array type: ", type(source_array))
|
python
|
def array(source_array, ctx=None, dtype=None):
"""Creates a sparse array from any object exposing the array interface.
Parameters
----------
source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix
The source sparse array
ctx : Context, optional
The default context is ``source_array.context`` if ``source_array`` is an NDArray. \
The current default context otherwise.
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `numpy.ndarray` or `scipy.sparse.csr.csr_matrix`, \
`float32` otherwise.
Returns
-------
RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import scipy.sparse as spsp
>>> csr = spsp.csr_matrix((2, 100))
>>> mx.nd.sparse.array(csr)
<CSRNDArray 2x100 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2)))
<CSRNDArray 3x2 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2)))
<RowSparseNDArray 3x2 @cpu(0)>
"""
ctx = current_context() if ctx is None else ctx
if isinstance(source_array, NDArray):
assert(source_array.stype != 'default'), \
"Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray"
# prepare dtype and ctx based on source_array, if not provided
dtype = _prepare_default_dtype(source_array, dtype)
# if both dtype and ctx are different from source_array, we cannot copy directly
if source_array.dtype != dtype and source_array.context != ctx:
arr = empty(source_array.stype, source_array.shape, dtype=dtype)
arr[:] = source_array
arr = arr.as_in_context(ctx)
else:
arr = empty(source_array.stype, source_array.shape, dtype=dtype, ctx=ctx)
arr[:] = source_array
return arr
elif spsp and isinstance(source_array, spsp.csr.csr_matrix):
# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy
# preprocess scipy csr to canonical form
csr = source_array.sorted_indices()
csr.sum_duplicates()
dtype = _prepare_default_dtype(source_array, dtype)
return csr_matrix((csr.data, csr.indices, csr.indptr), shape=csr.shape, \
dtype=dtype, ctx=ctx)
elif isinstance(source_array, (np.ndarray, np.generic)):
raise ValueError("Please use mx.nd.array to create an NDArray with source_array of type ",
type(source_array))
else:
raise ValueError("Unexpected source_array type: ", type(source_array))
|
[
"def",
"array",
"(",
"source_array",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"ctx",
"=",
"current_context",
"(",
")",
"if",
"ctx",
"is",
"None",
"else",
"ctx",
"if",
"isinstance",
"(",
"source_array",
",",
"NDArray",
")",
":",
"assert",
"(",
"source_array",
".",
"stype",
"!=",
"'default'",
")",
",",
"\"Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray\"",
"# prepare dtype and ctx based on source_array, if not provided",
"dtype",
"=",
"_prepare_default_dtype",
"(",
"source_array",
",",
"dtype",
")",
"# if both dtype and ctx are different from source_array, we cannot copy directly",
"if",
"source_array",
".",
"dtype",
"!=",
"dtype",
"and",
"source_array",
".",
"context",
"!=",
"ctx",
":",
"arr",
"=",
"empty",
"(",
"source_array",
".",
"stype",
",",
"source_array",
".",
"shape",
",",
"dtype",
"=",
"dtype",
")",
"arr",
"[",
":",
"]",
"=",
"source_array",
"arr",
"=",
"arr",
".",
"as_in_context",
"(",
"ctx",
")",
"else",
":",
"arr",
"=",
"empty",
"(",
"source_array",
".",
"stype",
",",
"source_array",
".",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"ctx",
"=",
"ctx",
")",
"arr",
"[",
":",
"]",
"=",
"source_array",
"return",
"arr",
"elif",
"spsp",
"and",
"isinstance",
"(",
"source_array",
",",
"spsp",
".",
"csr",
".",
"csr_matrix",
")",
":",
"# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy",
"# preprocess scipy csr to canonical form",
"csr",
"=",
"source_array",
".",
"sorted_indices",
"(",
")",
"csr",
".",
"sum_duplicates",
"(",
")",
"dtype",
"=",
"_prepare_default_dtype",
"(",
"source_array",
",",
"dtype",
")",
"return",
"csr_matrix",
"(",
"(",
"csr",
".",
"data",
",",
"csr",
".",
"indices",
",",
"csr",
".",
"indptr",
")",
",",
"shape",
"=",
"csr",
".",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"ctx",
"=",
"ctx",
")",
"elif",
"isinstance",
"(",
"source_array",
",",
"(",
"np",
".",
"ndarray",
",",
"np",
".",
"generic",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Please use mx.nd.array to create an NDArray with source_array of type \"",
",",
"type",
"(",
"source_array",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected source_array type: \"",
",",
"type",
"(",
"source_array",
")",
")"
] |
Creates a sparse array from any object exposing the array interface.
Parameters
----------
source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix
The source sparse array
ctx : Context, optional
The default context is ``source_array.context`` if ``source_array`` is an NDArray. \
The current default context otherwise.
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `numpy.ndarray` or `scipy.sparse.csr.csr_matrix`, \
`float32` otherwise.
Returns
-------
RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import scipy.sparse as spsp
>>> csr = spsp.csr_matrix((2, 100))
>>> mx.nd.sparse.array(csr)
<CSRNDArray 2x100 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2)))
<CSRNDArray 3x2 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2)))
<RowSparseNDArray 3x2 @cpu(0)>
|
[
"Creates",
"a",
"sparse",
"array",
"from",
"any",
"object",
"exposing",
"the",
"array",
"interface",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L1579-L1637
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
BaseSparseNDArray._aux_type
|
def _aux_type(self, i):
"""Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type.
"""
aux_type = ctypes.c_int()
check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type)))
return _DTYPE_MX_TO_NP[aux_type.value]
|
python
|
def _aux_type(self, i):
"""Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type.
"""
aux_type = ctypes.c_int()
check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type)))
return _DTYPE_MX_TO_NP[aux_type.value]
|
[
"def",
"_aux_type",
"(",
"self",
",",
"i",
")",
":",
"aux_type",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayGetAuxType",
"(",
"self",
".",
"handle",
",",
"i",
",",
"ctypes",
".",
"byref",
"(",
"aux_type",
")",
")",
")",
"return",
"_DTYPE_MX_TO_NP",
"[",
"aux_type",
".",
"value",
"]"
] |
Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type.
|
[
"Data",
"-",
"type",
"of",
"the",
"array",
"s",
"ith",
"aux",
"data",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L164-L174
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
BaseSparseNDArray._aux_types
|
def _aux_types(self):
"""The data types of the aux data for the BaseSparseNDArray.
"""
aux_types = []
num_aux = self._num_aux
for i in range(num_aux):
aux_types.append(self._aux_type(i))
return aux_types
|
python
|
def _aux_types(self):
"""The data types of the aux data for the BaseSparseNDArray.
"""
aux_types = []
num_aux = self._num_aux
for i in range(num_aux):
aux_types.append(self._aux_type(i))
return aux_types
|
[
"def",
"_aux_types",
"(",
"self",
")",
":",
"aux_types",
"=",
"[",
"]",
"num_aux",
"=",
"self",
".",
"_num_aux",
"for",
"i",
"in",
"range",
"(",
"num_aux",
")",
":",
"aux_types",
".",
"append",
"(",
"self",
".",
"_aux_type",
"(",
"i",
")",
")",
"return",
"aux_types"
] |
The data types of the aux data for the BaseSparseNDArray.
|
[
"The",
"data",
"types",
"of",
"the",
"aux",
"data",
"for",
"the",
"BaseSparseNDArray",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L183-L190
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
BaseSparseNDArray.astype
|
def astype(self, dtype, copy=True):
"""Return a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
copy : bool
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
if not copy and np.dtype(dtype) == self.dtype:
return self
res = zeros(shape=self.shape, ctx=self.context,
dtype=dtype, stype=self.stype)
self.copyto(res)
return res
|
python
|
def astype(self, dtype, copy=True):
"""Return a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
copy : bool
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
if not copy and np.dtype(dtype) == self.dtype:
return self
res = zeros(shape=self.shape, ctx=self.context,
dtype=dtype, stype=self.stype)
self.copyto(res)
return res
|
[
"def",
"astype",
"(",
"self",
",",
"dtype",
",",
"copy",
"=",
"True",
")",
":",
"if",
"not",
"copy",
"and",
"np",
".",
"dtype",
"(",
"dtype",
")",
"==",
"self",
".",
"dtype",
":",
"return",
"self",
"res",
"=",
"zeros",
"(",
"shape",
"=",
"self",
".",
"shape",
",",
"ctx",
"=",
"self",
".",
"context",
",",
"dtype",
"=",
"dtype",
",",
"stype",
"=",
"self",
".",
"stype",
")",
"self",
".",
"copyto",
"(",
"res",
")",
"return",
"res"
] |
Return a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
copy : bool
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
|
[
"Return",
"a",
"copy",
"of",
"the",
"array",
"after",
"casting",
"to",
"a",
"specified",
"type",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L197-L223
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
BaseSparseNDArray.check_format
|
def check_format(self, full_check=True):
"""Check whether the NDArray format is valid.
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
check_call(_LIB.MXNDArraySyncCheckFormat(self.handle, ctypes.c_bool(full_check)))
|
python
|
def check_format(self, full_check=True):
"""Check whether the NDArray format is valid.
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
check_call(_LIB.MXNDArraySyncCheckFormat(self.handle, ctypes.c_bool(full_check)))
|
[
"def",
"check_format",
"(",
"self",
",",
"full_check",
"=",
"True",
")",
":",
"check_call",
"(",
"_LIB",
".",
"MXNDArraySyncCheckFormat",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"c_bool",
"(",
"full_check",
")",
")",
")"
] |
Check whether the NDArray format is valid.
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
|
[
"Check",
"whether",
"the",
"NDArray",
"format",
"is",
"valid",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L252-L261
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
BaseSparseNDArray._data
|
def _data(self):
"""A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(hdl)
|
python
|
def _data(self):
"""A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(hdl)
|
[
"def",
"_data",
"(",
"self",
")",
":",
"self",
".",
"wait_to_read",
"(",
")",
"hdl",
"=",
"NDArrayHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayGetDataNDArray",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"return",
"NDArray",
"(",
"hdl",
")"
] |
A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
|
[
"A",
"deep",
"copy",
"NDArray",
"of",
"the",
"data",
"array",
"associated",
"with",
"the",
"BaseSparseNDArray",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L263-L271
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
BaseSparseNDArray._aux_data
|
def _aux_data(self, i):
""" Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl)))
return NDArray(hdl)
|
python
|
def _aux_data(self, i):
""" Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl)))
return NDArray(hdl)
|
[
"def",
"_aux_data",
"(",
"self",
",",
"i",
")",
":",
"self",
".",
"wait_to_read",
"(",
")",
"hdl",
"=",
"NDArrayHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayGetAuxNDArray",
"(",
"self",
".",
"handle",
",",
"i",
",",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"return",
"NDArray",
"(",
"hdl",
")"
] |
Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
|
[
"Get",
"a",
"deep",
"copy",
"NDArray",
"of",
"the",
"i",
"-",
"th",
"aux",
"data",
"array",
"associated",
"with",
"the",
"BaseSparseNDArray",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L274-L283
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
CSRNDArray.asscipy
|
def asscipy(self):
"""Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array
Examples
--------
>>> x = mx.nd.sparse.zeros('csr', (2,3))
>>> y = x.asscipy()
>>> type(y)
<type 'scipy.sparse.csr.csr_matrix'>
>>> y
<2x3 sparse matrix of type '<type 'numpy.float32'>'
with 0 stored elements in Compressed Sparse Row format>
"""
data = self.data.asnumpy()
indices = self.indices.asnumpy()
indptr = self.indptr.asnumpy()
if not spsp:
raise ImportError("scipy is not available. \
Please check if the scipy python bindings are installed.")
return spsp.csr_matrix((data, indices, indptr), shape=self.shape, dtype=self.dtype)
|
python
|
def asscipy(self):
"""Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array
Examples
--------
>>> x = mx.nd.sparse.zeros('csr', (2,3))
>>> y = x.asscipy()
>>> type(y)
<type 'scipy.sparse.csr.csr_matrix'>
>>> y
<2x3 sparse matrix of type '<type 'numpy.float32'>'
with 0 stored elements in Compressed Sparse Row format>
"""
data = self.data.asnumpy()
indices = self.indices.asnumpy()
indptr = self.indptr.asnumpy()
if not spsp:
raise ImportError("scipy is not available. \
Please check if the scipy python bindings are installed.")
return spsp.csr_matrix((data, indices, indptr), shape=self.shape, dtype=self.dtype)
|
[
"def",
"asscipy",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"data",
".",
"asnumpy",
"(",
")",
"indices",
"=",
"self",
".",
"indices",
".",
"asnumpy",
"(",
")",
"indptr",
"=",
"self",
".",
"indptr",
".",
"asnumpy",
"(",
")",
"if",
"not",
"spsp",
":",
"raise",
"ImportError",
"(",
"\"scipy is not available. \\\n Please check if the scipy python bindings are installed.\"",
")",
"return",
"spsp",
".",
"csr_matrix",
"(",
"(",
"data",
",",
"indices",
",",
"indptr",
")",
",",
"shape",
"=",
"self",
".",
"shape",
",",
"dtype",
"=",
"self",
".",
"dtype",
")"
] |
Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array
Examples
--------
>>> x = mx.nd.sparse.zeros('csr', (2,3))
>>> y = x.asscipy()
>>> type(y)
<type 'scipy.sparse.csr.csr_matrix'>
>>> y
<2x3 sparse matrix of type '<type 'numpy.float32'>'
with 0 stored elements in Compressed Sparse Row format>
|
[
"Returns",
"a",
"scipy",
".",
"sparse",
".",
"csr",
".",
"csr_matrix",
"object",
"with",
"value",
"copied",
"from",
"this",
"array"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L539-L558
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
RowSparseNDArray.tostype
|
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
# pylint: disable= no-member, protected-access
if stype == 'csr':
raise ValueError("cast_storage from row_sparse to csr is not supported")
return op.cast_storage(self, stype=stype)
|
python
|
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
# pylint: disable= no-member, protected-access
if stype == 'csr':
raise ValueError("cast_storage from row_sparse to csr is not supported")
return op.cast_storage(self, stype=stype)
|
[
"def",
"tostype",
"(",
"self",
",",
"stype",
")",
":",
"# pylint: disable= no-member, protected-access",
"if",
"stype",
"==",
"'csr'",
":",
"raise",
"ValueError",
"(",
"\"cast_storage from row_sparse to csr is not supported\"",
")",
"return",
"op",
".",
"cast_storage",
"(",
"self",
",",
"stype",
"=",
"stype",
")"
] |
Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
|
[
"Return",
"a",
"copy",
"of",
"the",
"array",
"with",
"chosen",
"storage",
"type",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L740-L751
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
RowSparseNDArray.copyto
|
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
"""
if isinstance(other, Context):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype in ('default', 'row_sparse'):
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
|
python
|
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
"""
if isinstance(other, Context):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype in ('default', 'row_sparse'):
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
|
[
"def",
"copyto",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"Context",
")",
":",
"return",
"super",
"(",
"RowSparseNDArray",
",",
"self",
")",
".",
"copyto",
"(",
"other",
")",
"elif",
"isinstance",
"(",
"other",
",",
"NDArray",
")",
":",
"stype",
"=",
"other",
".",
"stype",
"if",
"stype",
"in",
"(",
"'default'",
",",
"'row_sparse'",
")",
":",
"return",
"super",
"(",
"RowSparseNDArray",
",",
"self",
")",
".",
"copyto",
"(",
"other",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'copyto does not support destination NDArray stype '",
"+",
"str",
"(",
"stype",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'copyto does not support type '",
"+",
"str",
"(",
"type",
"(",
"other",
")",
")",
")"
] |
Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
|
[
"Copies",
"the",
"value",
"of",
"this",
"array",
"to",
"another",
"array",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L754-L784
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/mx2onnx/export_model.py
|
export_model
|
def export_model(sym, params, input_shape, input_type=np.float32,
onnx_file_path='model.onnx', verbose=False):
"""Exports the MXNet model file, passed as a parameter, into ONNX model.
Accepts both symbol,parameter objects as well as json and params filepaths as input.
Operator support and coverage -
https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration
Parameters
----------
sym : str or symbol object
Path to the json file or Symbol object
params : str or symbol object
Path to the params file or params dictionary. (Including both arg_params and aux_params)
input_shape : List of tuple
Input shape of the model e.g [(1,3,224,224)]
input_type : data type
Input data type e.g. np.float32
onnx_file_path : str
Path where to save the generated onnx file
verbose : Boolean
If true will print logs of the model conversion
Returns
-------
onnx_file_path : str
Onnx file path
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
"""
try:
from onnx import helper, mapping
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
converter = MXNetGraph()
data_format = np.dtype(input_type)
# if input parameters are strings(file paths), load files and create symbol parameter objects
if isinstance(sym, string_types) and isinstance(params, string_types):
logging.info("Converting json and weight file to sym and params")
sym_obj, params_obj = load_module(sym, params)
onnx_graph = converter.create_onnx_graph_proto(sym_obj, params_obj, input_shape,
mapping.NP_TYPE_TO_TENSOR_TYPE[data_format],
verbose=verbose)
elif isinstance(sym, symbol.Symbol) and isinstance(params, dict):
onnx_graph = converter.create_onnx_graph_proto(sym, params, input_shape,
mapping.NP_TYPE_TO_TENSOR_TYPE[data_format],
verbose=verbose)
else:
raise ValueError("Input sym and params should either be files or objects")
# Create the model (ModelProto)
onnx_model = helper.make_model(onnx_graph)
# Save model on disk
with open(onnx_file_path, "wb") as file_handle:
serialized = onnx_model.SerializeToString()
file_handle.write(serialized)
logging.info("Input shape of the model %s ", input_shape)
logging.info("Exported ONNX file %s saved to disk", onnx_file_path)
return onnx_file_path
|
python
|
def export_model(sym, params, input_shape, input_type=np.float32,
onnx_file_path='model.onnx', verbose=False):
"""Exports the MXNet model file, passed as a parameter, into ONNX model.
Accepts both symbol,parameter objects as well as json and params filepaths as input.
Operator support and coverage -
https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration
Parameters
----------
sym : str or symbol object
Path to the json file or Symbol object
params : str or symbol object
Path to the params file or params dictionary. (Including both arg_params and aux_params)
input_shape : List of tuple
Input shape of the model e.g [(1,3,224,224)]
input_type : data type
Input data type e.g. np.float32
onnx_file_path : str
Path where to save the generated onnx file
verbose : Boolean
If true will print logs of the model conversion
Returns
-------
onnx_file_path : str
Onnx file path
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
"""
try:
from onnx import helper, mapping
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
converter = MXNetGraph()
data_format = np.dtype(input_type)
# if input parameters are strings(file paths), load files and create symbol parameter objects
if isinstance(sym, string_types) and isinstance(params, string_types):
logging.info("Converting json and weight file to sym and params")
sym_obj, params_obj = load_module(sym, params)
onnx_graph = converter.create_onnx_graph_proto(sym_obj, params_obj, input_shape,
mapping.NP_TYPE_TO_TENSOR_TYPE[data_format],
verbose=verbose)
elif isinstance(sym, symbol.Symbol) and isinstance(params, dict):
onnx_graph = converter.create_onnx_graph_proto(sym, params, input_shape,
mapping.NP_TYPE_TO_TENSOR_TYPE[data_format],
verbose=verbose)
else:
raise ValueError("Input sym and params should either be files or objects")
# Create the model (ModelProto)
onnx_model = helper.make_model(onnx_graph)
# Save model on disk
with open(onnx_file_path, "wb") as file_handle:
serialized = onnx_model.SerializeToString()
file_handle.write(serialized)
logging.info("Input shape of the model %s ", input_shape)
logging.info("Exported ONNX file %s saved to disk", onnx_file_path)
return onnx_file_path
|
[
"def",
"export_model",
"(",
"sym",
",",
"params",
",",
"input_shape",
",",
"input_type",
"=",
"np",
".",
"float32",
",",
"onnx_file_path",
"=",
"'model.onnx'",
",",
"verbose",
"=",
"False",
")",
":",
"try",
":",
"from",
"onnx",
"import",
"helper",
",",
"mapping",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Onnx and protobuf need to be installed. \"",
"+",
"\"Instructions to install - https://github.com/onnx/onnx\"",
")",
"converter",
"=",
"MXNetGraph",
"(",
")",
"data_format",
"=",
"np",
".",
"dtype",
"(",
"input_type",
")",
"# if input parameters are strings(file paths), load files and create symbol parameter objects",
"if",
"isinstance",
"(",
"sym",
",",
"string_types",
")",
"and",
"isinstance",
"(",
"params",
",",
"string_types",
")",
":",
"logging",
".",
"info",
"(",
"\"Converting json and weight file to sym and params\"",
")",
"sym_obj",
",",
"params_obj",
"=",
"load_module",
"(",
"sym",
",",
"params",
")",
"onnx_graph",
"=",
"converter",
".",
"create_onnx_graph_proto",
"(",
"sym_obj",
",",
"params_obj",
",",
"input_shape",
",",
"mapping",
".",
"NP_TYPE_TO_TENSOR_TYPE",
"[",
"data_format",
"]",
",",
"verbose",
"=",
"verbose",
")",
"elif",
"isinstance",
"(",
"sym",
",",
"symbol",
".",
"Symbol",
")",
"and",
"isinstance",
"(",
"params",
",",
"dict",
")",
":",
"onnx_graph",
"=",
"converter",
".",
"create_onnx_graph_proto",
"(",
"sym",
",",
"params",
",",
"input_shape",
",",
"mapping",
".",
"NP_TYPE_TO_TENSOR_TYPE",
"[",
"data_format",
"]",
",",
"verbose",
"=",
"verbose",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Input sym and params should either be files or objects\"",
")",
"# Create the model (ModelProto)",
"onnx_model",
"=",
"helper",
".",
"make_model",
"(",
"onnx_graph",
")",
"# Save model on disk",
"with",
"open",
"(",
"onnx_file_path",
",",
"\"wb\"",
")",
"as",
"file_handle",
":",
"serialized",
"=",
"onnx_model",
".",
"SerializeToString",
"(",
")",
"file_handle",
".",
"write",
"(",
"serialized",
")",
"logging",
".",
"info",
"(",
"\"Input shape of the model %s \"",
",",
"input_shape",
")",
"logging",
".",
"info",
"(",
"\"Exported ONNX file %s saved to disk\"",
",",
"onnx_file_path",
")",
"return",
"onnx_file_path"
] |
Exports the MXNet model file, passed as a parameter, into ONNX model.
Accepts both symbol,parameter objects as well as json and params filepaths as input.
Operator support and coverage -
https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration
Parameters
----------
sym : str or symbol object
Path to the json file or Symbol object
params : str or symbol object
Path to the params file or params dictionary. (Including both arg_params and aux_params)
input_shape : List of tuple
Input shape of the model e.g [(1,3,224,224)]
input_type : data type
Input data type e.g. np.float32
onnx_file_path : str
Path where to save the generated onnx file
verbose : Boolean
If true will print logs of the model conversion
Returns
-------
onnx_file_path : str
Onnx file path
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
|
[
"Exports",
"the",
"MXNet",
"model",
"file",
"passed",
"as",
"a",
"parameter",
"into",
"ONNX",
"model",
".",
"Accepts",
"both",
"symbol",
"parameter",
"objects",
"as",
"well",
"as",
"json",
"and",
"params",
"filepaths",
"as",
"input",
".",
"Operator",
"support",
"and",
"coverage",
"-",
"https",
":",
"//",
"cwiki",
".",
"apache",
".",
"org",
"/",
"confluence",
"/",
"display",
"/",
"MXNET",
"/",
"MXNet",
"-",
"ONNX",
"+",
"Integration"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_model.py#L35-L101
|
train
|
apache/incubator-mxnet
|
benchmark/python/sparse/memory_benchmark.py
|
bench_dot
|
def bench_dot(lhs_row_dim, lhs_col_dim, rhs_col_dim, density,
rhs_density, dot_func, trans_lhs, lhs_stype,
rhs_stype, only_storage, distribution="uniform"):
""" Benchmarking both storage and dot
"""
lhs_nd = rand_ndarray((lhs_row_dim, lhs_col_dim), lhs_stype, density, distribution=distribution)
if not only_storage:
rhs_nd = rand_ndarray((lhs_col_dim, rhs_col_dim), rhs_stype,
density=rhs_density, distribution=distribution)
out = dot_func(lhs_nd, rhs_nd, trans_lhs)
mx.nd.waitall()
|
python
|
def bench_dot(lhs_row_dim, lhs_col_dim, rhs_col_dim, density,
rhs_density, dot_func, trans_lhs, lhs_stype,
rhs_stype, only_storage, distribution="uniform"):
""" Benchmarking both storage and dot
"""
lhs_nd = rand_ndarray((lhs_row_dim, lhs_col_dim), lhs_stype, density, distribution=distribution)
if not only_storage:
rhs_nd = rand_ndarray((lhs_col_dim, rhs_col_dim), rhs_stype,
density=rhs_density, distribution=distribution)
out = dot_func(lhs_nd, rhs_nd, trans_lhs)
mx.nd.waitall()
|
[
"def",
"bench_dot",
"(",
"lhs_row_dim",
",",
"lhs_col_dim",
",",
"rhs_col_dim",
",",
"density",
",",
"rhs_density",
",",
"dot_func",
",",
"trans_lhs",
",",
"lhs_stype",
",",
"rhs_stype",
",",
"only_storage",
",",
"distribution",
"=",
"\"uniform\"",
")",
":",
"lhs_nd",
"=",
"rand_ndarray",
"(",
"(",
"lhs_row_dim",
",",
"lhs_col_dim",
")",
",",
"lhs_stype",
",",
"density",
",",
"distribution",
"=",
"distribution",
")",
"if",
"not",
"only_storage",
":",
"rhs_nd",
"=",
"rand_ndarray",
"(",
"(",
"lhs_col_dim",
",",
"rhs_col_dim",
")",
",",
"rhs_stype",
",",
"density",
"=",
"rhs_density",
",",
"distribution",
"=",
"distribution",
")",
"out",
"=",
"dot_func",
"(",
"lhs_nd",
",",
"rhs_nd",
",",
"trans_lhs",
")",
"mx",
".",
"nd",
".",
"waitall",
"(",
")"
] |
Benchmarking both storage and dot
|
[
"Benchmarking",
"both",
"storage",
"and",
"dot"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/benchmark/python/sparse/memory_benchmark.py#L79-L89
|
train
|
apache/incubator-mxnet
|
tools/caffe_converter/convert_mean.py
|
convert_mean
|
def convert_mean(binaryproto_fname, output=None):
"""Convert caffe mean
Parameters
----------
binaryproto_fname : str
Filename of the mean
output : str, optional
Save the mean into mxnet's format
Returns
-------
NDArray
Mean in ndarray
"""
mean_blob = caffe_parser.caffe_pb2.BlobProto()
with open(binaryproto_fname, 'rb') as f:
mean_blob.ParseFromString(f.read())
img_mean_np = np.array(mean_blob.data)
img_mean_np = img_mean_np.reshape(
mean_blob.channels, mean_blob.height, mean_blob.width
)
# swap channels from Caffe BGR to RGB
img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :]
nd = mx.nd.array(img_mean_np)
if output is not None:
mx.nd.save(output, {"mean_image": nd})
return nd
|
python
|
def convert_mean(binaryproto_fname, output=None):
"""Convert caffe mean
Parameters
----------
binaryproto_fname : str
Filename of the mean
output : str, optional
Save the mean into mxnet's format
Returns
-------
NDArray
Mean in ndarray
"""
mean_blob = caffe_parser.caffe_pb2.BlobProto()
with open(binaryproto_fname, 'rb') as f:
mean_blob.ParseFromString(f.read())
img_mean_np = np.array(mean_blob.data)
img_mean_np = img_mean_np.reshape(
mean_blob.channels, mean_blob.height, mean_blob.width
)
# swap channels from Caffe BGR to RGB
img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :]
nd = mx.nd.array(img_mean_np)
if output is not None:
mx.nd.save(output, {"mean_image": nd})
return nd
|
[
"def",
"convert_mean",
"(",
"binaryproto_fname",
",",
"output",
"=",
"None",
")",
":",
"mean_blob",
"=",
"caffe_parser",
".",
"caffe_pb2",
".",
"BlobProto",
"(",
")",
"with",
"open",
"(",
"binaryproto_fname",
",",
"'rb'",
")",
"as",
"f",
":",
"mean_blob",
".",
"ParseFromString",
"(",
"f",
".",
"read",
"(",
")",
")",
"img_mean_np",
"=",
"np",
".",
"array",
"(",
"mean_blob",
".",
"data",
")",
"img_mean_np",
"=",
"img_mean_np",
".",
"reshape",
"(",
"mean_blob",
".",
"channels",
",",
"mean_blob",
".",
"height",
",",
"mean_blob",
".",
"width",
")",
"# swap channels from Caffe BGR to RGB",
"img_mean_np",
"[",
"[",
"0",
",",
"2",
"]",
",",
":",
",",
":",
"]",
"=",
"img_mean_np",
"[",
"[",
"2",
",",
"0",
"]",
",",
":",
",",
":",
"]",
"nd",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"img_mean_np",
")",
"if",
"output",
"is",
"not",
"None",
":",
"mx",
".",
"nd",
".",
"save",
"(",
"output",
",",
"{",
"\"mean_image\"",
":",
"nd",
"}",
")",
"return",
"nd"
] |
Convert caffe mean
Parameters
----------
binaryproto_fname : str
Filename of the mean
output : str, optional
Save the mean into mxnet's format
Returns
-------
NDArray
Mean in ndarray
|
[
"Convert",
"caffe",
"mean"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_converter/convert_mean.py#L25-L53
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/model_zoo/vision/densenet.py
|
get_densenet
|
def get_densenet(num_layers, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 121, 161, 169, 201.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
num_init_features, growth_rate, block_config = densenet_spec[num_layers]
net = DenseNet(num_init_features, growth_rate, block_config, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('densenet%d'%(num_layers), root=root), ctx=ctx)
return net
|
python
|
def get_densenet(num_layers, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 121, 161, 169, 201.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
num_init_features, growth_rate, block_config = densenet_spec[num_layers]
net = DenseNet(num_init_features, growth_rate, block_config, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('densenet%d'%(num_layers), root=root), ctx=ctx)
return net
|
[
"def",
"get_densenet",
"(",
"num_layers",
",",
"pretrained",
"=",
"False",
",",
"ctx",
"=",
"cpu",
"(",
")",
",",
"root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base",
".",
"data_dir",
"(",
")",
",",
"'models'",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"num_init_features",
",",
"growth_rate",
",",
"block_config",
"=",
"densenet_spec",
"[",
"num_layers",
"]",
"net",
"=",
"DenseNet",
"(",
"num_init_features",
",",
"growth_rate",
",",
"block_config",
",",
"*",
"*",
"kwargs",
")",
"if",
"pretrained",
":",
"from",
".",
".",
"model_store",
"import",
"get_model_file",
"net",
".",
"load_parameters",
"(",
"get_model_file",
"(",
"'densenet%d'",
"%",
"(",
"num_layers",
")",
",",
"root",
"=",
"root",
")",
",",
"ctx",
"=",
"ctx",
")",
"return",
"net"
] |
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 121, 161, 169, 201.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
|
[
"r",
"Densenet",
"-",
"BC",
"model",
"from",
"the",
"Densely",
"Connected",
"Convolutional",
"Networks",
"<https",
":",
"//",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"1608",
".",
"06993",
".",
"pdf",
">",
"_",
"paper",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/model_zoo/vision/densenet.py#L125-L146
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/mx2onnx/_export_helper.py
|
load_module
|
def load_module(sym_filepath, params_filepath):
"""Loads the MXNet model file and
returns MXNet symbol and params (weights).
Parameters
----------
json_path : str
Path to the json file
params_path : str
Path to the params file
Returns
-------
sym : MXNet symbol
Model symbol object
params : params object
Model weights including both arg and aux params.
"""
if not (os.path.isfile(sym_filepath) and os.path.isfile(params_filepath)):
raise ValueError("Symbol and params files provided are invalid")
else:
try:
# reads symbol.json file from given path and
# retrieves model prefix and number of epochs
model_name = sym_filepath.rsplit('.', 1)[0].rsplit('-', 1)[0]
params_file_list = params_filepath.rsplit('.', 1)[0].rsplit('-', 1)
# Setting num_epochs to 0 if not present in filename
num_epochs = 0 if len(params_file_list) == 1 else int(params_file_list[1])
except IndexError:
logging.info("Model and params name should be in format: "
"prefix-symbol.json, prefix-epoch.params")
raise
sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, num_epochs)
# Merging arg and aux parameters
params = {}
params.update(arg_params)
params.update(aux_params)
return sym, params
|
python
|
def load_module(sym_filepath, params_filepath):
"""Loads the MXNet model file and
returns MXNet symbol and params (weights).
Parameters
----------
json_path : str
Path to the json file
params_path : str
Path to the params file
Returns
-------
sym : MXNet symbol
Model symbol object
params : params object
Model weights including both arg and aux params.
"""
if not (os.path.isfile(sym_filepath) and os.path.isfile(params_filepath)):
raise ValueError("Symbol and params files provided are invalid")
else:
try:
# reads symbol.json file from given path and
# retrieves model prefix and number of epochs
model_name = sym_filepath.rsplit('.', 1)[0].rsplit('-', 1)[0]
params_file_list = params_filepath.rsplit('.', 1)[0].rsplit('-', 1)
# Setting num_epochs to 0 if not present in filename
num_epochs = 0 if len(params_file_list) == 1 else int(params_file_list[1])
except IndexError:
logging.info("Model and params name should be in format: "
"prefix-symbol.json, prefix-epoch.params")
raise
sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, num_epochs)
# Merging arg and aux parameters
params = {}
params.update(arg_params)
params.update(aux_params)
return sym, params
|
[
"def",
"load_module",
"(",
"sym_filepath",
",",
"params_filepath",
")",
":",
"if",
"not",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"sym_filepath",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"params_filepath",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Symbol and params files provided are invalid\"",
")",
"else",
":",
"try",
":",
"# reads symbol.json file from given path and",
"# retrieves model prefix and number of epochs",
"model_name",
"=",
"sym_filepath",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
".",
"rsplit",
"(",
"'-'",
",",
"1",
")",
"[",
"0",
"]",
"params_file_list",
"=",
"params_filepath",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
".",
"rsplit",
"(",
"'-'",
",",
"1",
")",
"# Setting num_epochs to 0 if not present in filename",
"num_epochs",
"=",
"0",
"if",
"len",
"(",
"params_file_list",
")",
"==",
"1",
"else",
"int",
"(",
"params_file_list",
"[",
"1",
"]",
")",
"except",
"IndexError",
":",
"logging",
".",
"info",
"(",
"\"Model and params name should be in format: \"",
"\"prefix-symbol.json, prefix-epoch.params\"",
")",
"raise",
"sym",
",",
"arg_params",
",",
"aux_params",
"=",
"mx",
".",
"model",
".",
"load_checkpoint",
"(",
"model_name",
",",
"num_epochs",
")",
"# Merging arg and aux parameters",
"params",
"=",
"{",
"}",
"params",
".",
"update",
"(",
"arg_params",
")",
"params",
".",
"update",
"(",
"aux_params",
")",
"return",
"sym",
",",
"params"
] |
Loads the MXNet model file and
returns MXNet symbol and params (weights).
Parameters
----------
json_path : str
Path to the json file
params_path : str
Path to the params file
Returns
-------
sym : MXNet symbol
Model symbol object
params : params object
Model weights including both arg and aux params.
|
[
"Loads",
"the",
"MXNet",
"model",
"file",
"and",
"returns",
"MXNet",
"symbol",
"and",
"params",
"(",
"weights",
")",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/_export_helper.py#L24-L65
|
train
|
apache/incubator-mxnet
|
example/ssd/symbol/symbol_builder.py
|
import_module
|
def import_module(module_name):
"""Helper function to import module"""
import sys, os
import importlib
sys.path.append(os.path.dirname(__file__))
return importlib.import_module(module_name)
|
python
|
def import_module(module_name):
"""Helper function to import module"""
import sys, os
import importlib
sys.path.append(os.path.dirname(__file__))
return importlib.import_module(module_name)
|
[
"def",
"import_module",
"(",
"module_name",
")",
":",
"import",
"sys",
",",
"os",
"import",
"importlib",
"sys",
".",
"path",
".",
"append",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"return",
"importlib",
".",
"import_module",
"(",
"module_name",
")"
] |
Helper function to import module
|
[
"Helper",
"function",
"to",
"import",
"module"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/symbol_builder.py#L22-L27
|
train
|
apache/incubator-mxnet
|
example/ssd/symbol/symbol_builder.py
|
get_symbol_train
|
def get_symbol_train(network, num_classes, from_layers, num_filters, strides, pads,
sizes, ratios, normalizations=-1, steps=[], min_filter=128,
nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs):
"""Build network symbol for training SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol
"""
label = mx.sym.Variable('label')
body = import_module(network).get_symbol(num_classes, **kwargs)
layers = multi_layer_feature(body, from_layers, num_filters, strides, pads,
min_filter=min_filter)
loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_filters, clip=False, interm_layer=0, steps=steps)
tmp = mx.symbol.contrib.MultiBoxTarget(
*[anchor_boxes, label, cls_preds], overlap_threshold=.5, \
ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \
negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2),
name="multibox_target")
loc_target = tmp[0]
loc_target_mask = tmp[1]
cls_target = tmp[2]
cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \
ignore_label=-1, use_ignore=True, grad_scale=1., multi_output=True, \
normalization='valid', name="cls_prob")
loc_loss_ = mx.symbol.smooth_l1(name="loc_loss_", \
data=loc_target_mask * (loc_preds - loc_target), scalar=1.0)
loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \
normalization='valid', name="loc_loss")
# monitoring training status
cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name="cls_label")
det = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
det = mx.symbol.MakeLoss(data=det, grad_scale=0, name="det_out")
# group output
out = mx.symbol.Group([cls_prob, loc_loss, cls_label, det])
return out
|
python
|
def get_symbol_train(network, num_classes, from_layers, num_filters, strides, pads,
sizes, ratios, normalizations=-1, steps=[], min_filter=128,
nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs):
"""Build network symbol for training SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol
"""
label = mx.sym.Variable('label')
body = import_module(network).get_symbol(num_classes, **kwargs)
layers = multi_layer_feature(body, from_layers, num_filters, strides, pads,
min_filter=min_filter)
loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_filters, clip=False, interm_layer=0, steps=steps)
tmp = mx.symbol.contrib.MultiBoxTarget(
*[anchor_boxes, label, cls_preds], overlap_threshold=.5, \
ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \
negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2),
name="multibox_target")
loc_target = tmp[0]
loc_target_mask = tmp[1]
cls_target = tmp[2]
cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \
ignore_label=-1, use_ignore=True, grad_scale=1., multi_output=True, \
normalization='valid', name="cls_prob")
loc_loss_ = mx.symbol.smooth_l1(name="loc_loss_", \
data=loc_target_mask * (loc_preds - loc_target), scalar=1.0)
loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \
normalization='valid', name="loc_loss")
# monitoring training status
cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name="cls_label")
det = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
det = mx.symbol.MakeLoss(data=det, grad_scale=0, name="det_out")
# group output
out = mx.symbol.Group([cls_prob, loc_loss, cls_label, det])
return out
|
[
"def",
"get_symbol_train",
"(",
"network",
",",
"num_classes",
",",
"from_layers",
",",
"num_filters",
",",
"strides",
",",
"pads",
",",
"sizes",
",",
"ratios",
",",
"normalizations",
"=",
"-",
"1",
",",
"steps",
"=",
"[",
"]",
",",
"min_filter",
"=",
"128",
",",
"nms_thresh",
"=",
"0.5",
",",
"force_suppress",
"=",
"False",
",",
"nms_topk",
"=",
"400",
",",
"*",
"*",
"kwargs",
")",
":",
"label",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'label'",
")",
"body",
"=",
"import_module",
"(",
"network",
")",
".",
"get_symbol",
"(",
"num_classes",
",",
"*",
"*",
"kwargs",
")",
"layers",
"=",
"multi_layer_feature",
"(",
"body",
",",
"from_layers",
",",
"num_filters",
",",
"strides",
",",
"pads",
",",
"min_filter",
"=",
"min_filter",
")",
"loc_preds",
",",
"cls_preds",
",",
"anchor_boxes",
"=",
"multibox_layer",
"(",
"layers",
",",
"num_classes",
",",
"sizes",
"=",
"sizes",
",",
"ratios",
"=",
"ratios",
",",
"normalization",
"=",
"normalizations",
",",
"num_channels",
"=",
"num_filters",
",",
"clip",
"=",
"False",
",",
"interm_layer",
"=",
"0",
",",
"steps",
"=",
"steps",
")",
"tmp",
"=",
"mx",
".",
"symbol",
".",
"contrib",
".",
"MultiBoxTarget",
"(",
"*",
"[",
"anchor_boxes",
",",
"label",
",",
"cls_preds",
"]",
",",
"overlap_threshold",
"=",
".5",
",",
"ignore_label",
"=",
"-",
"1",
",",
"negative_mining_ratio",
"=",
"3",
",",
"minimum_negative_samples",
"=",
"0",
",",
"negative_mining_thresh",
"=",
".5",
",",
"variances",
"=",
"(",
"0.1",
",",
"0.1",
",",
"0.2",
",",
"0.2",
")",
",",
"name",
"=",
"\"multibox_target\"",
")",
"loc_target",
"=",
"tmp",
"[",
"0",
"]",
"loc_target_mask",
"=",
"tmp",
"[",
"1",
"]",
"cls_target",
"=",
"tmp",
"[",
"2",
"]",
"cls_prob",
"=",
"mx",
".",
"symbol",
".",
"SoftmaxOutput",
"(",
"data",
"=",
"cls_preds",
",",
"label",
"=",
"cls_target",
",",
"ignore_label",
"=",
"-",
"1",
",",
"use_ignore",
"=",
"True",
",",
"grad_scale",
"=",
"1.",
",",
"multi_output",
"=",
"True",
",",
"normalization",
"=",
"'valid'",
",",
"name",
"=",
"\"cls_prob\"",
")",
"loc_loss_",
"=",
"mx",
".",
"symbol",
".",
"smooth_l1",
"(",
"name",
"=",
"\"loc_loss_\"",
",",
"data",
"=",
"loc_target_mask",
"*",
"(",
"loc_preds",
"-",
"loc_target",
")",
",",
"scalar",
"=",
"1.0",
")",
"loc_loss",
"=",
"mx",
".",
"symbol",
".",
"MakeLoss",
"(",
"loc_loss_",
",",
"grad_scale",
"=",
"1.",
",",
"normalization",
"=",
"'valid'",
",",
"name",
"=",
"\"loc_loss\"",
")",
"# monitoring training status",
"cls_label",
"=",
"mx",
".",
"symbol",
".",
"MakeLoss",
"(",
"data",
"=",
"cls_target",
",",
"grad_scale",
"=",
"0",
",",
"name",
"=",
"\"cls_label\"",
")",
"det",
"=",
"mx",
".",
"symbol",
".",
"contrib",
".",
"MultiBoxDetection",
"(",
"*",
"[",
"cls_prob",
",",
"loc_preds",
",",
"anchor_boxes",
"]",
",",
"name",
"=",
"\"detection\"",
",",
"nms_threshold",
"=",
"nms_thresh",
",",
"force_suppress",
"=",
"force_suppress",
",",
"variances",
"=",
"(",
"0.1",
",",
"0.1",
",",
"0.2",
",",
"0.2",
")",
",",
"nms_topk",
"=",
"nms_topk",
")",
"det",
"=",
"mx",
".",
"symbol",
".",
"MakeLoss",
"(",
"data",
"=",
"det",
",",
"grad_scale",
"=",
"0",
",",
"name",
"=",
"\"det_out\"",
")",
"# group output",
"out",
"=",
"mx",
".",
"symbol",
".",
"Group",
"(",
"[",
"cls_prob",
",",
"loc_loss",
",",
"cls_label",
",",
"det",
"]",
")",
"return",
"out"
] |
Build network symbol for training SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol
|
[
"Build",
"network",
"symbol",
"for",
"training",
"SSD"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/symbol_builder.py#L29-L116
|
train
|
apache/incubator-mxnet
|
example/ssd/symbol/symbol_builder.py
|
get_symbol
|
def get_symbol(network, num_classes, from_layers, num_filters, sizes, ratios,
strides, pads, normalizations=-1, steps=[], min_filter=128,
nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs):
"""Build network for testing SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol
"""
body = import_module(network).get_symbol(num_classes, **kwargs)
layers = multi_layer_feature(body, from_layers, num_filters, strides, pads,
min_filter=min_filter)
loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_filters, clip=False, interm_layer=0, steps=steps)
cls_prob = mx.symbol.softmax(data=cls_preds, axis=1, name='cls_prob')
out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
return out
|
python
|
def get_symbol(network, num_classes, from_layers, num_filters, sizes, ratios,
strides, pads, normalizations=-1, steps=[], min_filter=128,
nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs):
"""Build network for testing SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol
"""
body = import_module(network).get_symbol(num_classes, **kwargs)
layers = multi_layer_feature(body, from_layers, num_filters, strides, pads,
min_filter=min_filter)
loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_filters, clip=False, interm_layer=0, steps=steps)
cls_prob = mx.symbol.softmax(data=cls_preds, axis=1, name='cls_prob')
out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
return out
|
[
"def",
"get_symbol",
"(",
"network",
",",
"num_classes",
",",
"from_layers",
",",
"num_filters",
",",
"sizes",
",",
"ratios",
",",
"strides",
",",
"pads",
",",
"normalizations",
"=",
"-",
"1",
",",
"steps",
"=",
"[",
"]",
",",
"min_filter",
"=",
"128",
",",
"nms_thresh",
"=",
"0.5",
",",
"force_suppress",
"=",
"False",
",",
"nms_topk",
"=",
"400",
",",
"*",
"*",
"kwargs",
")",
":",
"body",
"=",
"import_module",
"(",
"network",
")",
".",
"get_symbol",
"(",
"num_classes",
",",
"*",
"*",
"kwargs",
")",
"layers",
"=",
"multi_layer_feature",
"(",
"body",
",",
"from_layers",
",",
"num_filters",
",",
"strides",
",",
"pads",
",",
"min_filter",
"=",
"min_filter",
")",
"loc_preds",
",",
"cls_preds",
",",
"anchor_boxes",
"=",
"multibox_layer",
"(",
"layers",
",",
"num_classes",
",",
"sizes",
"=",
"sizes",
",",
"ratios",
"=",
"ratios",
",",
"normalization",
"=",
"normalizations",
",",
"num_channels",
"=",
"num_filters",
",",
"clip",
"=",
"False",
",",
"interm_layer",
"=",
"0",
",",
"steps",
"=",
"steps",
")",
"cls_prob",
"=",
"mx",
".",
"symbol",
".",
"softmax",
"(",
"data",
"=",
"cls_preds",
",",
"axis",
"=",
"1",
",",
"name",
"=",
"'cls_prob'",
")",
"out",
"=",
"mx",
".",
"symbol",
".",
"contrib",
".",
"MultiBoxDetection",
"(",
"*",
"[",
"cls_prob",
",",
"loc_preds",
",",
"anchor_boxes",
"]",
",",
"name",
"=",
"\"detection\"",
",",
"nms_threshold",
"=",
"nms_thresh",
",",
"force_suppress",
"=",
"force_suppress",
",",
"variances",
"=",
"(",
"0.1",
",",
"0.1",
",",
"0.2",
",",
"0.2",
")",
",",
"nms_topk",
"=",
"nms_topk",
")",
"return",
"out"
] |
Build network for testing SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
min_filter : int
minimum number of filters used in 1x1 convolution
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns
-------
mx.Symbol
|
[
"Build",
"network",
"for",
"testing",
"SSD"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/symbol_builder.py#L118-L182
|
train
|
apache/incubator-mxnet
|
docs/tutorial_utils/vision/cnn_visualization/gradcam.py
|
_get_grad
|
def _get_grad(net, image, class_id=None, conv_layer_name=None, image_grad=False):
"""This is an internal helper function that can be used for either of these
but not both at the same time:
1. Record the output and gradient of output of an intermediate convolutional layer.
2. Record the gradients of the image.
Parameters
----------
image : NDArray
Image to visuaize. This is an NDArray with the preprocessed image.
class_id : int
Category ID this image belongs to. If not provided,
network's prediction will be used.
conv_layer_name: str
Name of the convolutional layer whose output and output's gradients need to be acptured.
image_grad: bool
Whether to capture gradients of the image."""
if image_grad:
image.attach_grad()
Conv2D.capture_layer_name = None
Activation.set_guided_backprop(True)
else:
# Tell convviz.Conv2D which layer's output and gradient needs to be recorded
Conv2D.capture_layer_name = conv_layer_name
Activation.set_guided_backprop(False)
# Run the network
with autograd.record(train_mode=False):
out = net(image)
# If user didn't provide a class id, we'll use the class that the network predicted
if class_id == None:
model_output = out.asnumpy()
class_id = np.argmax(model_output)
# Create a one-hot target with class_id and backprop with the created target
one_hot_target = mx.nd.one_hot(mx.nd.array([class_id]), 1000)
out.backward(one_hot_target, train_mode=False)
if image_grad:
return image.grad[0].asnumpy()
else:
# Return the recorded convolution output and gradient
conv_out = Conv2D.conv_output
return conv_out[0].asnumpy(), conv_out.grad[0].asnumpy()
|
python
|
def _get_grad(net, image, class_id=None, conv_layer_name=None, image_grad=False):
"""This is an internal helper function that can be used for either of these
but not both at the same time:
1. Record the output and gradient of output of an intermediate convolutional layer.
2. Record the gradients of the image.
Parameters
----------
image : NDArray
Image to visuaize. This is an NDArray with the preprocessed image.
class_id : int
Category ID this image belongs to. If not provided,
network's prediction will be used.
conv_layer_name: str
Name of the convolutional layer whose output and output's gradients need to be acptured.
image_grad: bool
Whether to capture gradients of the image."""
if image_grad:
image.attach_grad()
Conv2D.capture_layer_name = None
Activation.set_guided_backprop(True)
else:
# Tell convviz.Conv2D which layer's output and gradient needs to be recorded
Conv2D.capture_layer_name = conv_layer_name
Activation.set_guided_backprop(False)
# Run the network
with autograd.record(train_mode=False):
out = net(image)
# If user didn't provide a class id, we'll use the class that the network predicted
if class_id == None:
model_output = out.asnumpy()
class_id = np.argmax(model_output)
# Create a one-hot target with class_id and backprop with the created target
one_hot_target = mx.nd.one_hot(mx.nd.array([class_id]), 1000)
out.backward(one_hot_target, train_mode=False)
if image_grad:
return image.grad[0].asnumpy()
else:
# Return the recorded convolution output and gradient
conv_out = Conv2D.conv_output
return conv_out[0].asnumpy(), conv_out.grad[0].asnumpy()
|
[
"def",
"_get_grad",
"(",
"net",
",",
"image",
",",
"class_id",
"=",
"None",
",",
"conv_layer_name",
"=",
"None",
",",
"image_grad",
"=",
"False",
")",
":",
"if",
"image_grad",
":",
"image",
".",
"attach_grad",
"(",
")",
"Conv2D",
".",
"capture_layer_name",
"=",
"None",
"Activation",
".",
"set_guided_backprop",
"(",
"True",
")",
"else",
":",
"# Tell convviz.Conv2D which layer's output and gradient needs to be recorded",
"Conv2D",
".",
"capture_layer_name",
"=",
"conv_layer_name",
"Activation",
".",
"set_guided_backprop",
"(",
"False",
")",
"# Run the network",
"with",
"autograd",
".",
"record",
"(",
"train_mode",
"=",
"False",
")",
":",
"out",
"=",
"net",
"(",
"image",
")",
"# If user didn't provide a class id, we'll use the class that the network predicted",
"if",
"class_id",
"==",
"None",
":",
"model_output",
"=",
"out",
".",
"asnumpy",
"(",
")",
"class_id",
"=",
"np",
".",
"argmax",
"(",
"model_output",
")",
"# Create a one-hot target with class_id and backprop with the created target",
"one_hot_target",
"=",
"mx",
".",
"nd",
".",
"one_hot",
"(",
"mx",
".",
"nd",
".",
"array",
"(",
"[",
"class_id",
"]",
")",
",",
"1000",
")",
"out",
".",
"backward",
"(",
"one_hot_target",
",",
"train_mode",
"=",
"False",
")",
"if",
"image_grad",
":",
"return",
"image",
".",
"grad",
"[",
"0",
"]",
".",
"asnumpy",
"(",
")",
"else",
":",
"# Return the recorded convolution output and gradient",
"conv_out",
"=",
"Conv2D",
".",
"conv_output",
"return",
"conv_out",
"[",
"0",
"]",
".",
"asnumpy",
"(",
")",
",",
"conv_out",
".",
"grad",
"[",
"0",
"]",
".",
"asnumpy",
"(",
")"
] |
This is an internal helper function that can be used for either of these
but not both at the same time:
1. Record the output and gradient of output of an intermediate convolutional layer.
2. Record the gradients of the image.
Parameters
----------
image : NDArray
Image to visuaize. This is an NDArray with the preprocessed image.
class_id : int
Category ID this image belongs to. If not provided,
network's prediction will be used.
conv_layer_name: str
Name of the convolutional layer whose output and output's gradients need to be acptured.
image_grad: bool
Whether to capture gradients of the image.
|
[
"This",
"is",
"an",
"internal",
"helper",
"function",
"that",
"can",
"be",
"used",
"for",
"either",
"of",
"these",
"but",
"not",
"both",
"at",
"the",
"same",
"time",
":",
"1",
".",
"Record",
"the",
"output",
"and",
"gradient",
"of",
"output",
"of",
"an",
"intermediate",
"convolutional",
"layer",
".",
"2",
".",
"Record",
"the",
"gradients",
"of",
"the",
"image",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/tutorial_utils/vision/cnn_visualization/gradcam.py#L122-L167
|
train
|
apache/incubator-mxnet
|
docs/tutorial_utils/vision/cnn_visualization/gradcam.py
|
get_conv_out_grad
|
def get_conv_out_grad(net, image, class_id=None, conv_layer_name=None):
"""Get the output and gradients of output of a convolutional layer.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network's prediction will be used.
conv_layer_name: str
Name of the convolutional layer whose output and output's gradients need to be acptured."""
return _get_grad(net, image, class_id, conv_layer_name, image_grad=False)
|
python
|
def get_conv_out_grad(net, image, class_id=None, conv_layer_name=None):
"""Get the output and gradients of output of a convolutional layer.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network's prediction will be used.
conv_layer_name: str
Name of the convolutional layer whose output and output's gradients need to be acptured."""
return _get_grad(net, image, class_id, conv_layer_name, image_grad=False)
|
[
"def",
"get_conv_out_grad",
"(",
"net",
",",
"image",
",",
"class_id",
"=",
"None",
",",
"conv_layer_name",
"=",
"None",
")",
":",
"return",
"_get_grad",
"(",
"net",
",",
"image",
",",
"class_id",
",",
"conv_layer_name",
",",
"image_grad",
"=",
"False",
")"
] |
Get the output and gradients of output of a convolutional layer.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network's prediction will be used.
conv_layer_name: str
Name of the convolutional layer whose output and output's gradients need to be acptured.
|
[
"Get",
"the",
"output",
"and",
"gradients",
"of",
"output",
"of",
"a",
"convolutional",
"layer",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/tutorial_utils/vision/cnn_visualization/gradcam.py#L169-L183
|
train
|
apache/incubator-mxnet
|
docs/tutorial_utils/vision/cnn_visualization/gradcam.py
|
get_image_grad
|
def get_image_grad(net, image, class_id=None):
"""Get the gradients of the image.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network's prediction will be used."""
return _get_grad(net, image, class_id, image_grad=True)
|
python
|
def get_image_grad(net, image, class_id=None):
"""Get the gradients of the image.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network's prediction will be used."""
return _get_grad(net, image, class_id, image_grad=True)
|
[
"def",
"get_image_grad",
"(",
"net",
",",
"image",
",",
"class_id",
"=",
"None",
")",
":",
"return",
"_get_grad",
"(",
"net",
",",
"image",
",",
"class_id",
",",
"image_grad",
"=",
"True",
")"
] |
Get the gradients of the image.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network's prediction will be used.
|
[
"Get",
"the",
"gradients",
"of",
"the",
"image",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/tutorial_utils/vision/cnn_visualization/gradcam.py#L185-L197
|
train
|
apache/incubator-mxnet
|
docs/tutorial_utils/vision/cnn_visualization/gradcam.py
|
grad_to_image
|
def grad_to_image(gradient):
"""Convert gradients of image obtained using `get_image_grad`
into image. This shows parts of the image that is most strongly activating
the output neurons."""
gradient = gradient - gradient.min()
gradient /= gradient.max()
gradient = np.uint8(gradient * 255).transpose(1, 2, 0)
gradient = gradient[..., ::-1]
return gradient
|
python
|
def grad_to_image(gradient):
"""Convert gradients of image obtained using `get_image_grad`
into image. This shows parts of the image that is most strongly activating
the output neurons."""
gradient = gradient - gradient.min()
gradient /= gradient.max()
gradient = np.uint8(gradient * 255).transpose(1, 2, 0)
gradient = gradient[..., ::-1]
return gradient
|
[
"def",
"grad_to_image",
"(",
"gradient",
")",
":",
"gradient",
"=",
"gradient",
"-",
"gradient",
".",
"min",
"(",
")",
"gradient",
"/=",
"gradient",
".",
"max",
"(",
")",
"gradient",
"=",
"np",
".",
"uint8",
"(",
"gradient",
"*",
"255",
")",
".",
"transpose",
"(",
"1",
",",
"2",
",",
"0",
")",
"gradient",
"=",
"gradient",
"[",
"...",
",",
":",
":",
"-",
"1",
"]",
"return",
"gradient"
] |
Convert gradients of image obtained using `get_image_grad`
into image. This shows parts of the image that is most strongly activating
the output neurons.
|
[
"Convert",
"gradients",
"of",
"image",
"obtained",
"using",
"get_image_grad",
"into",
"image",
".",
"This",
"shows",
"parts",
"of",
"the",
"image",
"that",
"is",
"most",
"strongly",
"activating",
"the",
"output",
"neurons",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/tutorial_utils/vision/cnn_visualization/gradcam.py#L199-L207
|
train
|
apache/incubator-mxnet
|
docs/tutorial_utils/vision/cnn_visualization/gradcam.py
|
get_cam
|
def get_cam(imggrad, conv_out):
"""Compute CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details"""
weights = np.mean(imggrad, axis=(1, 2))
cam = np.ones(conv_out.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * conv_out[i, :, :]
cam = cv2.resize(cam, (imggrad.shape[1], imggrad.shape[2]))
cam = np.maximum(cam, 0)
cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam))
cam = np.uint8(cam * 255)
return cam
|
python
|
def get_cam(imggrad, conv_out):
"""Compute CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details"""
weights = np.mean(imggrad, axis=(1, 2))
cam = np.ones(conv_out.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * conv_out[i, :, :]
cam = cv2.resize(cam, (imggrad.shape[1], imggrad.shape[2]))
cam = np.maximum(cam, 0)
cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam))
cam = np.uint8(cam * 255)
return cam
|
[
"def",
"get_cam",
"(",
"imggrad",
",",
"conv_out",
")",
":",
"weights",
"=",
"np",
".",
"mean",
"(",
"imggrad",
",",
"axis",
"=",
"(",
"1",
",",
"2",
")",
")",
"cam",
"=",
"np",
".",
"ones",
"(",
"conv_out",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"i",
",",
"w",
"in",
"enumerate",
"(",
"weights",
")",
":",
"cam",
"+=",
"w",
"*",
"conv_out",
"[",
"i",
",",
":",
",",
":",
"]",
"cam",
"=",
"cv2",
".",
"resize",
"(",
"cam",
",",
"(",
"imggrad",
".",
"shape",
"[",
"1",
"]",
",",
"imggrad",
".",
"shape",
"[",
"2",
"]",
")",
")",
"cam",
"=",
"np",
".",
"maximum",
"(",
"cam",
",",
"0",
")",
"cam",
"=",
"(",
"cam",
"-",
"np",
".",
"min",
"(",
"cam",
")",
")",
"/",
"(",
"np",
".",
"max",
"(",
"cam",
")",
"-",
"np",
".",
"min",
"(",
"cam",
")",
")",
"cam",
"=",
"np",
".",
"uint8",
"(",
"cam",
"*",
"255",
")",
"return",
"cam"
] |
Compute CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details
|
[
"Compute",
"CAM",
".",
"Refer",
"section",
"3",
"of",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1610",
".",
"02391",
"for",
"details"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/tutorial_utils/vision/cnn_visualization/gradcam.py#L209-L219
|
train
|
apache/incubator-mxnet
|
docs/tutorial_utils/vision/cnn_visualization/gradcam.py
|
get_img_heatmap
|
def get_img_heatmap(orig_img, activation_map):
"""Draw a heatmap on top of the original image using intensities from activation_map"""
heatmap = cv2.applyColorMap(activation_map, cv2.COLORMAP_COOL)
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
img_heatmap = np.float32(heatmap) + np.float32(orig_img)
img_heatmap = img_heatmap / np.max(img_heatmap)
img_heatmap *= 255
return img_heatmap.astype(int)
|
python
|
def get_img_heatmap(orig_img, activation_map):
"""Draw a heatmap on top of the original image using intensities from activation_map"""
heatmap = cv2.applyColorMap(activation_map, cv2.COLORMAP_COOL)
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
img_heatmap = np.float32(heatmap) + np.float32(orig_img)
img_heatmap = img_heatmap / np.max(img_heatmap)
img_heatmap *= 255
return img_heatmap.astype(int)
|
[
"def",
"get_img_heatmap",
"(",
"orig_img",
",",
"activation_map",
")",
":",
"heatmap",
"=",
"cv2",
".",
"applyColorMap",
"(",
"activation_map",
",",
"cv2",
".",
"COLORMAP_COOL",
")",
"heatmap",
"=",
"cv2",
".",
"cvtColor",
"(",
"heatmap",
",",
"cv2",
".",
"COLOR_BGR2RGB",
")",
"img_heatmap",
"=",
"np",
".",
"float32",
"(",
"heatmap",
")",
"+",
"np",
".",
"float32",
"(",
"orig_img",
")",
"img_heatmap",
"=",
"img_heatmap",
"/",
"np",
".",
"max",
"(",
"img_heatmap",
")",
"img_heatmap",
"*=",
"255",
"return",
"img_heatmap",
".",
"astype",
"(",
"int",
")"
] |
Draw a heatmap on top of the original image using intensities from activation_map
|
[
"Draw",
"a",
"heatmap",
"on",
"top",
"of",
"the",
"original",
"image",
"using",
"intensities",
"from",
"activation_map"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/tutorial_utils/vision/cnn_visualization/gradcam.py#L225-L232
|
train
|
apache/incubator-mxnet
|
docs/tutorial_utils/vision/cnn_visualization/gradcam.py
|
to_grayscale
|
def to_grayscale(cv2im):
"""Convert gradients to grayscale. This gives a saliency map."""
# How strongly does each position activate the output
grayscale_im = np.sum(np.abs(cv2im), axis=0)
# Normalize between min and 99th percentile
im_max = np.percentile(grayscale_im, 99)
im_min = np.min(grayscale_im)
grayscale_im = np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1)
grayscale_im = np.expand_dims(grayscale_im, axis=0)
return grayscale_im
|
python
|
def to_grayscale(cv2im):
"""Convert gradients to grayscale. This gives a saliency map."""
# How strongly does each position activate the output
grayscale_im = np.sum(np.abs(cv2im), axis=0)
# Normalize between min and 99th percentile
im_max = np.percentile(grayscale_im, 99)
im_min = np.min(grayscale_im)
grayscale_im = np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1)
grayscale_im = np.expand_dims(grayscale_im, axis=0)
return grayscale_im
|
[
"def",
"to_grayscale",
"(",
"cv2im",
")",
":",
"# How strongly does each position activate the output",
"grayscale_im",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"cv2im",
")",
",",
"axis",
"=",
"0",
")",
"# Normalize between min and 99th percentile",
"im_max",
"=",
"np",
".",
"percentile",
"(",
"grayscale_im",
",",
"99",
")",
"im_min",
"=",
"np",
".",
"min",
"(",
"grayscale_im",
")",
"grayscale_im",
"=",
"np",
".",
"clip",
"(",
"(",
"grayscale_im",
"-",
"im_min",
")",
"/",
"(",
"im_max",
"-",
"im_min",
")",
",",
"0",
",",
"1",
")",
"grayscale_im",
"=",
"np",
".",
"expand_dims",
"(",
"grayscale_im",
",",
"axis",
"=",
"0",
")",
"return",
"grayscale_im"
] |
Convert gradients to grayscale. This gives a saliency map.
|
[
"Convert",
"gradients",
"to",
"grayscale",
".",
"This",
"gives",
"a",
"saliency",
"map",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/tutorial_utils/vision/cnn_visualization/gradcam.py#L234-L245
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
check_label_shapes
|
def check_label_shapes(labels, preds, wrap=False, shape=False):
"""Helper function for checking shape of label and prediction
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
wrap : boolean
If True, wrap labels/preds in a list if they are single NDArray
shape : boolean
If True, check the shape of labels and preds;
Otherwise only check their length.
"""
if not shape:
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape, preds.shape
if label_shape != pred_shape:
raise ValueError("Shape of labels {} does not match shape of "
"predictions {}".format(label_shape, pred_shape))
if wrap:
if isinstance(labels, ndarray.ndarray.NDArray):
labels = [labels]
if isinstance(preds, ndarray.ndarray.NDArray):
preds = [preds]
return labels, preds
|
python
|
def check_label_shapes(labels, preds, wrap=False, shape=False):
"""Helper function for checking shape of label and prediction
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
wrap : boolean
If True, wrap labels/preds in a list if they are single NDArray
shape : boolean
If True, check the shape of labels and preds;
Otherwise only check their length.
"""
if not shape:
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape, preds.shape
if label_shape != pred_shape:
raise ValueError("Shape of labels {} does not match shape of "
"predictions {}".format(label_shape, pred_shape))
if wrap:
if isinstance(labels, ndarray.ndarray.NDArray):
labels = [labels]
if isinstance(preds, ndarray.ndarray.NDArray):
preds = [preds]
return labels, preds
|
[
"def",
"check_label_shapes",
"(",
"labels",
",",
"preds",
",",
"wrap",
"=",
"False",
",",
"shape",
"=",
"False",
")",
":",
"if",
"not",
"shape",
":",
"label_shape",
",",
"pred_shape",
"=",
"len",
"(",
"labels",
")",
",",
"len",
"(",
"preds",
")",
"else",
":",
"label_shape",
",",
"pred_shape",
"=",
"labels",
".",
"shape",
",",
"preds",
".",
"shape",
"if",
"label_shape",
"!=",
"pred_shape",
":",
"raise",
"ValueError",
"(",
"\"Shape of labels {} does not match shape of \"",
"\"predictions {}\"",
".",
"format",
"(",
"label_shape",
",",
"pred_shape",
")",
")",
"if",
"wrap",
":",
"if",
"isinstance",
"(",
"labels",
",",
"ndarray",
".",
"ndarray",
".",
"NDArray",
")",
":",
"labels",
"=",
"[",
"labels",
"]",
"if",
"isinstance",
"(",
"preds",
",",
"ndarray",
".",
"ndarray",
".",
"NDArray",
")",
":",
"preds",
"=",
"[",
"preds",
"]",
"return",
"labels",
",",
"preds"
] |
Helper function for checking shape of label and prediction
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
wrap : boolean
If True, wrap labels/preds in a list if they are single NDArray
shape : boolean
If True, check the shape of labels and preds;
Otherwise only check their length.
|
[
"Helper",
"function",
"for",
"checking",
"shape",
"of",
"label",
"and",
"prediction"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L33-L66
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
create
|
def create(metric, *args, **kwargs):
"""Creates evaluation metric from metric names or instances of EvalMetric
or a custom metric function.
Parameters
----------
metric : str or callable
Specifies the metric to create.
This argument must be one of the below:
- Name of a metric.
- An instance of `EvalMetric`.
- A list, each element of which is a metric or a metric name.
- An evaluation function that computes custom metric for a given batch of
labels and predictions.
*args : list
Additional arguments to metric constructor.
Only used when metric is str.
**kwargs : dict
Additional arguments to metric constructor.
Only used when metric is str
Examples
--------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label - pred))
...
>>> metric1 = mx.metric.create('acc')
>>> metric2 = mx.metric.create(custom_metric)
>>> metric3 = mx.metric.create([metric1, metric2, 'rmse'])
"""
if callable(metric):
return CustomMetric(metric, *args, **kwargs)
elif isinstance(metric, list):
composite_metric = CompositeEvalMetric()
for child_metric in metric:
composite_metric.add(create(child_metric, *args, **kwargs))
return composite_metric
return _create(metric, *args, **kwargs)
|
python
|
def create(metric, *args, **kwargs):
"""Creates evaluation metric from metric names or instances of EvalMetric
or a custom metric function.
Parameters
----------
metric : str or callable
Specifies the metric to create.
This argument must be one of the below:
- Name of a metric.
- An instance of `EvalMetric`.
- A list, each element of which is a metric or a metric name.
- An evaluation function that computes custom metric for a given batch of
labels and predictions.
*args : list
Additional arguments to metric constructor.
Only used when metric is str.
**kwargs : dict
Additional arguments to metric constructor.
Only used when metric is str
Examples
--------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label - pred))
...
>>> metric1 = mx.metric.create('acc')
>>> metric2 = mx.metric.create(custom_metric)
>>> metric3 = mx.metric.create([metric1, metric2, 'rmse'])
"""
if callable(metric):
return CustomMetric(metric, *args, **kwargs)
elif isinstance(metric, list):
composite_metric = CompositeEvalMetric()
for child_metric in metric:
composite_metric.add(create(child_metric, *args, **kwargs))
return composite_metric
return _create(metric, *args, **kwargs)
|
[
"def",
"create",
"(",
"metric",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"callable",
"(",
"metric",
")",
":",
"return",
"CustomMetric",
"(",
"metric",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"isinstance",
"(",
"metric",
",",
"list",
")",
":",
"composite_metric",
"=",
"CompositeEvalMetric",
"(",
")",
"for",
"child_metric",
"in",
"metric",
":",
"composite_metric",
".",
"add",
"(",
"create",
"(",
"child_metric",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"composite_metric",
"return",
"_create",
"(",
"metric",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Creates evaluation metric from metric names or instances of EvalMetric
or a custom metric function.
Parameters
----------
metric : str or callable
Specifies the metric to create.
This argument must be one of the below:
- Name of a metric.
- An instance of `EvalMetric`.
- A list, each element of which is a metric or a metric name.
- An evaluation function that computes custom metric for a given batch of
labels and predictions.
*args : list
Additional arguments to metric constructor.
Only used when metric is str.
**kwargs : dict
Additional arguments to metric constructor.
Only used when metric is str
Examples
--------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label - pred))
...
>>> metric1 = mx.metric.create('acc')
>>> metric2 = mx.metric.create(custom_metric)
>>> metric3 = mx.metric.create([metric1, metric2, 'rmse'])
|
[
"Creates",
"evaluation",
"metric",
"from",
"metric",
"names",
"or",
"instances",
"of",
"EvalMetric",
"or",
"a",
"custom",
"metric",
"function",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L234-L273
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
np
|
def np(numpy_feval, name=None, allow_extra_outputs=False):
"""Creates a custom evaluation metric that receives its inputs as numpy arrays.
Parameters
----------
numpy_feval : callable(label, pred)
Custom evaluation function that receives labels and predictions for a minibatch
as numpy arrays and returns the corresponding custom metric as a floating point number.
name : str, optional
Name of the custom metric.
allow_extra_outputs : bool, optional
Whether prediction output is allowed to have extra outputs. This is useful in cases
like RNN where states are also part of output which can then be fed back to the RNN
in the next step. By default, extra outputs are not allowed.
Returns
-------
float
Custom metric corresponding to the provided labels and predictions.
Example
-------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label-pred))
...
>>> metric = mx.metric.np(custom_metric)
"""
def feval(label, pred):
"""Internal eval function."""
return numpy_feval(label, pred)
feval.__name__ = numpy_feval.__name__
return CustomMetric(feval, name, allow_extra_outputs)
|
python
|
def np(numpy_feval, name=None, allow_extra_outputs=False):
"""Creates a custom evaluation metric that receives its inputs as numpy arrays.
Parameters
----------
numpy_feval : callable(label, pred)
Custom evaluation function that receives labels and predictions for a minibatch
as numpy arrays and returns the corresponding custom metric as a floating point number.
name : str, optional
Name of the custom metric.
allow_extra_outputs : bool, optional
Whether prediction output is allowed to have extra outputs. This is useful in cases
like RNN where states are also part of output which can then be fed back to the RNN
in the next step. By default, extra outputs are not allowed.
Returns
-------
float
Custom metric corresponding to the provided labels and predictions.
Example
-------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label-pred))
...
>>> metric = mx.metric.np(custom_metric)
"""
def feval(label, pred):
"""Internal eval function."""
return numpy_feval(label, pred)
feval.__name__ = numpy_feval.__name__
return CustomMetric(feval, name, allow_extra_outputs)
|
[
"def",
"np",
"(",
"numpy_feval",
",",
"name",
"=",
"None",
",",
"allow_extra_outputs",
"=",
"False",
")",
":",
"def",
"feval",
"(",
"label",
",",
"pred",
")",
":",
"\"\"\"Internal eval function.\"\"\"",
"return",
"numpy_feval",
"(",
"label",
",",
"pred",
")",
"feval",
".",
"__name__",
"=",
"numpy_feval",
".",
"__name__",
"return",
"CustomMetric",
"(",
"feval",
",",
"name",
",",
"allow_extra_outputs",
")"
] |
Creates a custom evaluation metric that receives its inputs as numpy arrays.
Parameters
----------
numpy_feval : callable(label, pred)
Custom evaluation function that receives labels and predictions for a minibatch
as numpy arrays and returns the corresponding custom metric as a floating point number.
name : str, optional
Name of the custom metric.
allow_extra_outputs : bool, optional
Whether prediction output is allowed to have extra outputs. This is useful in cases
like RNN where states are also part of output which can then be fed back to the RNN
in the next step. By default, extra outputs are not allowed.
Returns
-------
float
Custom metric corresponding to the provided labels and predictions.
Example
-------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label-pred))
...
>>> metric = mx.metric.np(custom_metric)
|
[
"Creates",
"a",
"custom",
"evaluation",
"metric",
"that",
"receives",
"its",
"inputs",
"as",
"numpy",
"arrays",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L1747-L1778
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
EvalMetric.get_config
|
def get_config(self):
"""Save configurations of metric. Can be recreated
from configs with metric.create(``**config``)
"""
config = self._kwargs.copy()
config.update({
'metric': self.__class__.__name__,
'name': self.name,
'output_names': self.output_names,
'label_names': self.label_names})
return config
|
python
|
def get_config(self):
"""Save configurations of metric. Can be recreated
from configs with metric.create(``**config``)
"""
config = self._kwargs.copy()
config.update({
'metric': self.__class__.__name__,
'name': self.name,
'output_names': self.output_names,
'label_names': self.label_names})
return config
|
[
"def",
"get_config",
"(",
"self",
")",
":",
"config",
"=",
"self",
".",
"_kwargs",
".",
"copy",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'metric'",
":",
"self",
".",
"__class__",
".",
"__name__",
",",
"'name'",
":",
"self",
".",
"name",
",",
"'output_names'",
":",
"self",
".",
"output_names",
",",
"'label_names'",
":",
"self",
".",
"label_names",
"}",
")",
"return",
"config"
] |
Save configurations of metric. Can be recreated
from configs with metric.create(``**config``)
|
[
"Save",
"configurations",
"of",
"metric",
".",
"Can",
"be",
"recreated",
"from",
"configs",
"with",
"metric",
".",
"create",
"(",
"**",
"config",
")"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L100-L110
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
EvalMetric.update_dict
|
def update_dict(self, label, pred):
"""Update the internal evaluation with named label and pred
Parameters
----------
labels : OrderedDict of str -> NDArray
name to array mapping for labels.
preds : OrderedDict of str -> NDArray
name to array mapping of predicted outputs.
"""
if self.output_names is not None:
pred = [pred[name] for name in self.output_names]
else:
pred = list(pred.values())
if self.label_names is not None:
label = [label[name] for name in self.label_names]
else:
label = list(label.values())
self.update(label, pred)
|
python
|
def update_dict(self, label, pred):
"""Update the internal evaluation with named label and pred
Parameters
----------
labels : OrderedDict of str -> NDArray
name to array mapping for labels.
preds : OrderedDict of str -> NDArray
name to array mapping of predicted outputs.
"""
if self.output_names is not None:
pred = [pred[name] for name in self.output_names]
else:
pred = list(pred.values())
if self.label_names is not None:
label = [label[name] for name in self.label_names]
else:
label = list(label.values())
self.update(label, pred)
|
[
"def",
"update_dict",
"(",
"self",
",",
"label",
",",
"pred",
")",
":",
"if",
"self",
".",
"output_names",
"is",
"not",
"None",
":",
"pred",
"=",
"[",
"pred",
"[",
"name",
"]",
"for",
"name",
"in",
"self",
".",
"output_names",
"]",
"else",
":",
"pred",
"=",
"list",
"(",
"pred",
".",
"values",
"(",
")",
")",
"if",
"self",
".",
"label_names",
"is",
"not",
"None",
":",
"label",
"=",
"[",
"label",
"[",
"name",
"]",
"for",
"name",
"in",
"self",
".",
"label_names",
"]",
"else",
":",
"label",
"=",
"list",
"(",
"label",
".",
"values",
"(",
")",
")",
"self",
".",
"update",
"(",
"label",
",",
"pred",
")"
] |
Update the internal evaluation with named label and pred
Parameters
----------
labels : OrderedDict of str -> NDArray
name to array mapping for labels.
preds : OrderedDict of str -> NDArray
name to array mapping of predicted outputs.
|
[
"Update",
"the",
"internal",
"evaluation",
"with",
"named",
"label",
"and",
"pred"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L112-L133
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
EvalMetric.reset
|
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.num_inst = 0
self.sum_metric = 0.0
self.global_num_inst = 0
self.global_sum_metric = 0.0
|
python
|
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.num_inst = 0
self.sum_metric = 0.0
self.global_num_inst = 0
self.global_sum_metric = 0.0
|
[
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"num_inst",
"=",
"0",
"self",
".",
"sum_metric",
"=",
"0.0",
"self",
".",
"global_num_inst",
"=",
"0",
"self",
".",
"global_sum_metric",
"=",
"0.0"
] |
Resets the internal evaluation result to initial state.
|
[
"Resets",
"the",
"internal",
"evaluation",
"result",
"to",
"initial",
"state",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L148-L153
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
EvalMetric.get
|
def get(self):
"""Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.sum_metric / self.num_inst)
|
python
|
def get(self):
"""Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.sum_metric / self.num_inst)
|
[
"def",
"get",
"(",
"self",
")",
":",
"if",
"self",
".",
"num_inst",
"==",
"0",
":",
"return",
"(",
"self",
".",
"name",
",",
"float",
"(",
"'nan'",
")",
")",
"else",
":",
"return",
"(",
"self",
".",
"name",
",",
"self",
".",
"sum_metric",
"/",
"self",
".",
"num_inst",
")"
] |
Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
|
[
"Gets",
"the",
"current",
"evaluation",
"result",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L161-L174
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
EvalMetric.get_global
|
def get_global(self):
"""Gets the current global evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self._has_global_stats:
if self.global_num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.global_sum_metric / self.global_num_inst)
else:
return self.get()
|
python
|
def get_global(self):
"""Gets the current global evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self._has_global_stats:
if self.global_num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.global_sum_metric / self.global_num_inst)
else:
return self.get()
|
[
"def",
"get_global",
"(",
"self",
")",
":",
"if",
"self",
".",
"_has_global_stats",
":",
"if",
"self",
".",
"global_num_inst",
"==",
"0",
":",
"return",
"(",
"self",
".",
"name",
",",
"float",
"(",
"'nan'",
")",
")",
"else",
":",
"return",
"(",
"self",
".",
"name",
",",
"self",
".",
"global_sum_metric",
"/",
"self",
".",
"global_num_inst",
")",
"else",
":",
"return",
"self",
".",
"get",
"(",
")"
] |
Gets the current global evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
|
[
"Gets",
"the",
"current",
"global",
"evaluation",
"result",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L176-L192
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
EvalMetric.get_name_value
|
def get_name_value(self):
"""Returns zipped name and value pairs.
Returns
-------
list of tuples
A (name, value) tuple list.
"""
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
|
python
|
def get_name_value(self):
"""Returns zipped name and value pairs.
Returns
-------
list of tuples
A (name, value) tuple list.
"""
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
|
[
"def",
"get_name_value",
"(",
"self",
")",
":",
"name",
",",
"value",
"=",
"self",
".",
"get",
"(",
")",
"if",
"not",
"isinstance",
"(",
"name",
",",
"list",
")",
":",
"name",
"=",
"[",
"name",
"]",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"[",
"value",
"]",
"return",
"list",
"(",
"zip",
"(",
"name",
",",
"value",
")",
")"
] |
Returns zipped name and value pairs.
Returns
-------
list of tuples
A (name, value) tuple list.
|
[
"Returns",
"zipped",
"name",
"and",
"value",
"pairs",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L194-L207
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
EvalMetric.get_global_name_value
|
def get_global_name_value(self):
"""Returns zipped name and value pairs for global results.
Returns
-------
list of tuples
A (name, value) tuple list.
"""
if self._has_global_stats:
name, value = self.get_global()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
else:
return self.get_name_value()
|
python
|
def get_global_name_value(self):
"""Returns zipped name and value pairs for global results.
Returns
-------
list of tuples
A (name, value) tuple list.
"""
if self._has_global_stats:
name, value = self.get_global()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
else:
return self.get_name_value()
|
[
"def",
"get_global_name_value",
"(",
"self",
")",
":",
"if",
"self",
".",
"_has_global_stats",
":",
"name",
",",
"value",
"=",
"self",
".",
"get_global",
"(",
")",
"if",
"not",
"isinstance",
"(",
"name",
",",
"list",
")",
":",
"name",
"=",
"[",
"name",
"]",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"[",
"value",
"]",
"return",
"list",
"(",
"zip",
"(",
"name",
",",
"value",
")",
")",
"else",
":",
"return",
"self",
".",
"get_name_value",
"(",
")"
] |
Returns zipped name and value pairs for global results.
Returns
-------
list of tuples
A (name, value) tuple list.
|
[
"Returns",
"zipped",
"name",
"and",
"value",
"pairs",
"for",
"global",
"results",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L209-L225
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
_BinaryClassificationMetrics.update_binary_stats
|
def update_binary_stats(self, label, pred):
"""
Update various binary classification counts for a single (label, pred)
pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
"""
pred = pred.asnumpy()
label = label.asnumpy().astype('int32')
pred_label = numpy.argmax(pred, axis=1)
check_label_shapes(label, pred)
if len(numpy.unique(label)) > 2:
raise ValueError("%s currently only supports binary classification."
% self.__class__.__name__)
pred_true = (pred_label == 1)
pred_false = 1 - pred_true
label_true = (label == 1)
label_false = 1 - label_true
true_pos = (pred_true * label_true).sum()
false_pos = (pred_true * label_false).sum()
false_neg = (pred_false * label_true).sum()
true_neg = (pred_false * label_false).sum()
self.true_positives += true_pos
self.global_true_positives += true_pos
self.false_positives += false_pos
self.global_false_positives += false_pos
self.false_negatives += false_neg
self.global_false_negatives += false_neg
self.true_negatives += true_neg
self.global_true_negatives += true_neg
|
python
|
def update_binary_stats(self, label, pred):
"""
Update various binary classification counts for a single (label, pred)
pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
"""
pred = pred.asnumpy()
label = label.asnumpy().astype('int32')
pred_label = numpy.argmax(pred, axis=1)
check_label_shapes(label, pred)
if len(numpy.unique(label)) > 2:
raise ValueError("%s currently only supports binary classification."
% self.__class__.__name__)
pred_true = (pred_label == 1)
pred_false = 1 - pred_true
label_true = (label == 1)
label_false = 1 - label_true
true_pos = (pred_true * label_true).sum()
false_pos = (pred_true * label_false).sum()
false_neg = (pred_false * label_true).sum()
true_neg = (pred_false * label_false).sum()
self.true_positives += true_pos
self.global_true_positives += true_pos
self.false_positives += false_pos
self.global_false_positives += false_pos
self.false_negatives += false_neg
self.global_false_negatives += false_neg
self.true_negatives += true_neg
self.global_true_negatives += true_neg
|
[
"def",
"update_binary_stats",
"(",
"self",
",",
"label",
",",
"pred",
")",
":",
"pred",
"=",
"pred",
".",
"asnumpy",
"(",
")",
"label",
"=",
"label",
".",
"asnumpy",
"(",
")",
".",
"astype",
"(",
"'int32'",
")",
"pred_label",
"=",
"numpy",
".",
"argmax",
"(",
"pred",
",",
"axis",
"=",
"1",
")",
"check_label_shapes",
"(",
"label",
",",
"pred",
")",
"if",
"len",
"(",
"numpy",
".",
"unique",
"(",
"label",
")",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"%s currently only supports binary classification.\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")",
"pred_true",
"=",
"(",
"pred_label",
"==",
"1",
")",
"pred_false",
"=",
"1",
"-",
"pred_true",
"label_true",
"=",
"(",
"label",
"==",
"1",
")",
"label_false",
"=",
"1",
"-",
"label_true",
"true_pos",
"=",
"(",
"pred_true",
"*",
"label_true",
")",
".",
"sum",
"(",
")",
"false_pos",
"=",
"(",
"pred_true",
"*",
"label_false",
")",
".",
"sum",
"(",
")",
"false_neg",
"=",
"(",
"pred_false",
"*",
"label_true",
")",
".",
"sum",
"(",
")",
"true_neg",
"=",
"(",
"pred_false",
"*",
"label_false",
")",
".",
"sum",
"(",
")",
"self",
".",
"true_positives",
"+=",
"true_pos",
"self",
".",
"global_true_positives",
"+=",
"true_pos",
"self",
".",
"false_positives",
"+=",
"false_pos",
"self",
".",
"global_false_positives",
"+=",
"false_pos",
"self",
".",
"false_negatives",
"+=",
"false_neg",
"self",
".",
"global_false_negatives",
"+=",
"false_neg",
"self",
".",
"true_negatives",
"+=",
"true_neg",
"self",
".",
"global_true_negatives",
"+=",
"true_neg"
] |
Update various binary classification counts for a single (label, pred)
pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
|
[
"Update",
"various",
"binary",
"classification",
"counts",
"for",
"a",
"single",
"(",
"label",
"pred",
")",
"pair",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L612-L649
|
train
|
apache/incubator-mxnet
|
python/mxnet/metric.py
|
_BinaryClassificationMetrics.matthewscc
|
def matthewscc(self, use_global=False):
"""
Calculate the Matthew's Correlation Coefficent
"""
if use_global:
if not self.global_total_examples:
return 0.
true_pos = float(self.global_true_positives)
false_pos = float(self.global_false_positives)
false_neg = float(self.global_false_negatives)
true_neg = float(self.global_true_negatives)
else:
if not self.total_examples:
return 0.
true_pos = float(self.true_positives)
false_pos = float(self.false_positives)
false_neg = float(self.false_negatives)
true_neg = float(self.true_negatives)
terms = [(true_pos + false_pos),
(true_pos + false_neg),
(true_neg + false_pos),
(true_neg + false_neg)]
denom = 1.
for t in filter(lambda t: t != 0., terms):
denom *= t
return ((true_pos * true_neg) - (false_pos * false_neg)) / math.sqrt(denom)
|
python
|
def matthewscc(self, use_global=False):
"""
Calculate the Matthew's Correlation Coefficent
"""
if use_global:
if not self.global_total_examples:
return 0.
true_pos = float(self.global_true_positives)
false_pos = float(self.global_false_positives)
false_neg = float(self.global_false_negatives)
true_neg = float(self.global_true_negatives)
else:
if not self.total_examples:
return 0.
true_pos = float(self.true_positives)
false_pos = float(self.false_positives)
false_neg = float(self.false_negatives)
true_neg = float(self.true_negatives)
terms = [(true_pos + false_pos),
(true_pos + false_neg),
(true_neg + false_pos),
(true_neg + false_neg)]
denom = 1.
for t in filter(lambda t: t != 0., terms):
denom *= t
return ((true_pos * true_neg) - (false_pos * false_neg)) / math.sqrt(denom)
|
[
"def",
"matthewscc",
"(",
"self",
",",
"use_global",
"=",
"False",
")",
":",
"if",
"use_global",
":",
"if",
"not",
"self",
".",
"global_total_examples",
":",
"return",
"0.",
"true_pos",
"=",
"float",
"(",
"self",
".",
"global_true_positives",
")",
"false_pos",
"=",
"float",
"(",
"self",
".",
"global_false_positives",
")",
"false_neg",
"=",
"float",
"(",
"self",
".",
"global_false_negatives",
")",
"true_neg",
"=",
"float",
"(",
"self",
".",
"global_true_negatives",
")",
"else",
":",
"if",
"not",
"self",
".",
"total_examples",
":",
"return",
"0.",
"true_pos",
"=",
"float",
"(",
"self",
".",
"true_positives",
")",
"false_pos",
"=",
"float",
"(",
"self",
".",
"false_positives",
")",
"false_neg",
"=",
"float",
"(",
"self",
".",
"false_negatives",
")",
"true_neg",
"=",
"float",
"(",
"self",
".",
"true_negatives",
")",
"terms",
"=",
"[",
"(",
"true_pos",
"+",
"false_pos",
")",
",",
"(",
"true_pos",
"+",
"false_neg",
")",
",",
"(",
"true_neg",
"+",
"false_pos",
")",
",",
"(",
"true_neg",
"+",
"false_neg",
")",
"]",
"denom",
"=",
"1.",
"for",
"t",
"in",
"filter",
"(",
"lambda",
"t",
":",
"t",
"!=",
"0.",
",",
"terms",
")",
":",
"denom",
"*=",
"t",
"return",
"(",
"(",
"true_pos",
"*",
"true_neg",
")",
"-",
"(",
"false_pos",
"*",
"false_neg",
")",
")",
"/",
"math",
".",
"sqrt",
"(",
"denom",
")"
] |
Calculate the Matthew's Correlation Coefficent
|
[
"Calculate",
"the",
"Matthew",
"s",
"Correlation",
"Coefficent"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L693-L721
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/data/dataset.py
|
Dataset.transform
|
def transform(self, fn, lazy=True):
"""Returns a new dataset with each sample transformed by the
transformer function `fn`.
Parameters
----------
fn : callable
A transformer function that takes a sample as input and
returns the transformed sample.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
"""
trans = _LazyTransformDataset(self, fn)
if lazy:
return trans
return SimpleDataset([i for i in trans])
|
python
|
def transform(self, fn, lazy=True):
"""Returns a new dataset with each sample transformed by the
transformer function `fn`.
Parameters
----------
fn : callable
A transformer function that takes a sample as input and
returns the transformed sample.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
"""
trans = _LazyTransformDataset(self, fn)
if lazy:
return trans
return SimpleDataset([i for i in trans])
|
[
"def",
"transform",
"(",
"self",
",",
"fn",
",",
"lazy",
"=",
"True",
")",
":",
"trans",
"=",
"_LazyTransformDataset",
"(",
"self",
",",
"fn",
")",
"if",
"lazy",
":",
"return",
"trans",
"return",
"SimpleDataset",
"(",
"[",
"i",
"for",
"i",
"in",
"trans",
"]",
")"
] |
Returns a new dataset with each sample transformed by the
transformer function `fn`.
Parameters
----------
fn : callable
A transformer function that takes a sample as input and
returns the transformed sample.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
|
[
"Returns",
"a",
"new",
"dataset",
"with",
"each",
"sample",
"transformed",
"by",
"the",
"transformer",
"function",
"fn",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/data/dataset.py#L43-L66
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/data/dataset.py
|
Dataset.transform_first
|
def transform_first(self, fn, lazy=True):
"""Returns a new dataset with the first element of each sample
transformed by the transformer function `fn`.
This is useful, for example, when you only want to transform data
while keeping label as is.
Parameters
----------
fn : callable
A transformer function that takes the first elemtn of a sample
as input and returns the transformed element.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
"""
return self.transform(_TransformFirstClosure(fn), lazy)
|
python
|
def transform_first(self, fn, lazy=True):
"""Returns a new dataset with the first element of each sample
transformed by the transformer function `fn`.
This is useful, for example, when you only want to transform data
while keeping label as is.
Parameters
----------
fn : callable
A transformer function that takes the first elemtn of a sample
as input and returns the transformed element.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
"""
return self.transform(_TransformFirstClosure(fn), lazy)
|
[
"def",
"transform_first",
"(",
"self",
",",
"fn",
",",
"lazy",
"=",
"True",
")",
":",
"return",
"self",
".",
"transform",
"(",
"_TransformFirstClosure",
"(",
"fn",
")",
",",
"lazy",
")"
] |
Returns a new dataset with the first element of each sample
transformed by the transformer function `fn`.
This is useful, for example, when you only want to transform data
while keeping label as is.
Parameters
----------
fn : callable
A transformer function that takes the first elemtn of a sample
as input and returns the transformed element.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
|
[
"Returns",
"a",
"new",
"dataset",
"with",
"the",
"first",
"element",
"of",
"each",
"sample",
"transformed",
"by",
"the",
"transformer",
"function",
"fn",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/data/dataset.py#L68-L91
|
train
|
apache/incubator-mxnet
|
example/ctc/ocr_predict.py
|
lstm_ocr_model.forward_ocr
|
def forward_ocr(self, img_):
"""Forward the image through the LSTM network model
Parameters
----------
img_: int of array
Returns
----------
label_list: string of list
"""
img_ = cv2.resize(img_, (80, 30))
img_ = img_.transpose(1, 0)
print(img_.shape)
img_ = img_.reshape((1, 80, 30))
print(img_.shape)
# img_ = img_.reshape((80 * 30))
img_ = np.multiply(img_, 1 / 255.0)
self.predictor.forward(data=img_, **self.init_state_dict)
prob = self.predictor.get_output(0)
label_list = []
for p in prob:
print(np.argsort(p))
max_index = np.argsort(p)[::-1][0]
label_list.append(max_index)
return self.__get_string(label_list)
|
python
|
def forward_ocr(self, img_):
"""Forward the image through the LSTM network model
Parameters
----------
img_: int of array
Returns
----------
label_list: string of list
"""
img_ = cv2.resize(img_, (80, 30))
img_ = img_.transpose(1, 0)
print(img_.shape)
img_ = img_.reshape((1, 80, 30))
print(img_.shape)
# img_ = img_.reshape((80 * 30))
img_ = np.multiply(img_, 1 / 255.0)
self.predictor.forward(data=img_, **self.init_state_dict)
prob = self.predictor.get_output(0)
label_list = []
for p in prob:
print(np.argsort(p))
max_index = np.argsort(p)[::-1][0]
label_list.append(max_index)
return self.__get_string(label_list)
|
[
"def",
"forward_ocr",
"(",
"self",
",",
"img_",
")",
":",
"img_",
"=",
"cv2",
".",
"resize",
"(",
"img_",
",",
"(",
"80",
",",
"30",
")",
")",
"img_",
"=",
"img_",
".",
"transpose",
"(",
"1",
",",
"0",
")",
"print",
"(",
"img_",
".",
"shape",
")",
"img_",
"=",
"img_",
".",
"reshape",
"(",
"(",
"1",
",",
"80",
",",
"30",
")",
")",
"print",
"(",
"img_",
".",
"shape",
")",
"# img_ = img_.reshape((80 * 30))",
"img_",
"=",
"np",
".",
"multiply",
"(",
"img_",
",",
"1",
"/",
"255.0",
")",
"self",
".",
"predictor",
".",
"forward",
"(",
"data",
"=",
"img_",
",",
"*",
"*",
"self",
".",
"init_state_dict",
")",
"prob",
"=",
"self",
".",
"predictor",
".",
"get_output",
"(",
"0",
")",
"label_list",
"=",
"[",
"]",
"for",
"p",
"in",
"prob",
":",
"print",
"(",
"np",
".",
"argsort",
"(",
"p",
")",
")",
"max_index",
"=",
"np",
".",
"argsort",
"(",
"p",
")",
"[",
":",
":",
"-",
"1",
"]",
"[",
"0",
"]",
"label_list",
".",
"append",
"(",
"max_index",
")",
"return",
"self",
".",
"__get_string",
"(",
"label_list",
")"
] |
Forward the image through the LSTM network model
Parameters
----------
img_: int of array
Returns
----------
label_list: string of list
|
[
"Forward",
"the",
"image",
"through",
"the",
"LSTM",
"network",
"model"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ctc/ocr_predict.py#L63-L88
|
train
|
apache/incubator-mxnet
|
tools/caffe_converter/caffe_parser.py
|
read_prototxt
|
def read_prototxt(fname):
"""Return a caffe_pb2.NetParameter object that defined in a prototxt file
"""
proto = caffe_pb2.NetParameter()
with open(fname, 'r') as f:
text_format.Merge(str(f.read()), proto)
return proto
|
python
|
def read_prototxt(fname):
"""Return a caffe_pb2.NetParameter object that defined in a prototxt file
"""
proto = caffe_pb2.NetParameter()
with open(fname, 'r') as f:
text_format.Merge(str(f.read()), proto)
return proto
|
[
"def",
"read_prototxt",
"(",
"fname",
")",
":",
"proto",
"=",
"caffe_pb2",
".",
"NetParameter",
"(",
")",
"with",
"open",
"(",
"fname",
",",
"'r'",
")",
"as",
"f",
":",
"text_format",
".",
"Merge",
"(",
"str",
"(",
"f",
".",
"read",
"(",
")",
")",
",",
"proto",
")",
"return",
"proto"
] |
Return a caffe_pb2.NetParameter object that defined in a prototxt file
|
[
"Return",
"a",
"caffe_pb2",
".",
"NetParameter",
"object",
"that",
"defined",
"in",
"a",
"prototxt",
"file"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_converter/caffe_parser.py#L34-L40
|
train
|
apache/incubator-mxnet
|
tools/caffe_converter/caffe_parser.py
|
get_layers
|
def get_layers(proto):
"""Returns layers in a caffe_pb2.NetParameter object
"""
if len(proto.layer):
return proto.layer
elif len(proto.layers):
return proto.layers
else:
raise ValueError('Invalid proto file.')
|
python
|
def get_layers(proto):
"""Returns layers in a caffe_pb2.NetParameter object
"""
if len(proto.layer):
return proto.layer
elif len(proto.layers):
return proto.layers
else:
raise ValueError('Invalid proto file.')
|
[
"def",
"get_layers",
"(",
"proto",
")",
":",
"if",
"len",
"(",
"proto",
".",
"layer",
")",
":",
"return",
"proto",
".",
"layer",
"elif",
"len",
"(",
"proto",
".",
"layers",
")",
":",
"return",
"proto",
".",
"layers",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid proto file.'",
")"
] |
Returns layers in a caffe_pb2.NetParameter object
|
[
"Returns",
"layers",
"in",
"a",
"caffe_pb2",
".",
"NetParameter",
"object"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_converter/caffe_parser.py#L42-L50
|
train
|
apache/incubator-mxnet
|
tools/caffe_converter/caffe_parser.py
|
read_caffemodel
|
def read_caffemodel(prototxt_fname, caffemodel_fname):
"""Return a caffe_pb2.NetParameter object that defined in a binary
caffemodel file
"""
if use_caffe:
caffe.set_mode_cpu()
net = caffe.Net(prototxt_fname, caffemodel_fname, caffe.TEST)
layer_names = net._layer_names
layers = net.layers
return (layers, layer_names)
else:
proto = caffe_pb2.NetParameter()
with open(caffemodel_fname, 'rb') as f:
proto.ParseFromString(f.read())
return (get_layers(proto), None)
|
python
|
def read_caffemodel(prototxt_fname, caffemodel_fname):
"""Return a caffe_pb2.NetParameter object that defined in a binary
caffemodel file
"""
if use_caffe:
caffe.set_mode_cpu()
net = caffe.Net(prototxt_fname, caffemodel_fname, caffe.TEST)
layer_names = net._layer_names
layers = net.layers
return (layers, layer_names)
else:
proto = caffe_pb2.NetParameter()
with open(caffemodel_fname, 'rb') as f:
proto.ParseFromString(f.read())
return (get_layers(proto), None)
|
[
"def",
"read_caffemodel",
"(",
"prototxt_fname",
",",
"caffemodel_fname",
")",
":",
"if",
"use_caffe",
":",
"caffe",
".",
"set_mode_cpu",
"(",
")",
"net",
"=",
"caffe",
".",
"Net",
"(",
"prototxt_fname",
",",
"caffemodel_fname",
",",
"caffe",
".",
"TEST",
")",
"layer_names",
"=",
"net",
".",
"_layer_names",
"layers",
"=",
"net",
".",
"layers",
"return",
"(",
"layers",
",",
"layer_names",
")",
"else",
":",
"proto",
"=",
"caffe_pb2",
".",
"NetParameter",
"(",
")",
"with",
"open",
"(",
"caffemodel_fname",
",",
"'rb'",
")",
"as",
"f",
":",
"proto",
".",
"ParseFromString",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"(",
"get_layers",
"(",
"proto",
")",
",",
"None",
")"
] |
Return a caffe_pb2.NetParameter object that defined in a binary
caffemodel file
|
[
"Return",
"a",
"caffe_pb2",
".",
"NetParameter",
"object",
"that",
"defined",
"in",
"a",
"binary",
"caffemodel",
"file"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_converter/caffe_parser.py#L52-L66
|
train
|
apache/incubator-mxnet
|
tools/caffe_converter/caffe_parser.py
|
layer_iter
|
def layer_iter(layers, layer_names):
"""Iterate over all layers"""
if use_caffe:
for layer_idx, layer in enumerate(layers):
layer_name = re.sub('[-/]', '_', layer_names[layer_idx])
layer_type = layer.type
layer_blobs = layer.blobs
yield (layer_name, layer_type, layer_blobs)
else:
for layer in layers:
layer_name = re.sub('[-/]', '_', layer.name)
layer_type = layer.type
layer_blobs = layer.blobs
yield (layer_name, layer_type, layer_blobs)
|
python
|
def layer_iter(layers, layer_names):
"""Iterate over all layers"""
if use_caffe:
for layer_idx, layer in enumerate(layers):
layer_name = re.sub('[-/]', '_', layer_names[layer_idx])
layer_type = layer.type
layer_blobs = layer.blobs
yield (layer_name, layer_type, layer_blobs)
else:
for layer in layers:
layer_name = re.sub('[-/]', '_', layer.name)
layer_type = layer.type
layer_blobs = layer.blobs
yield (layer_name, layer_type, layer_blobs)
|
[
"def",
"layer_iter",
"(",
"layers",
",",
"layer_names",
")",
":",
"if",
"use_caffe",
":",
"for",
"layer_idx",
",",
"layer",
"in",
"enumerate",
"(",
"layers",
")",
":",
"layer_name",
"=",
"re",
".",
"sub",
"(",
"'[-/]'",
",",
"'_'",
",",
"layer_names",
"[",
"layer_idx",
"]",
")",
"layer_type",
"=",
"layer",
".",
"type",
"layer_blobs",
"=",
"layer",
".",
"blobs",
"yield",
"(",
"layer_name",
",",
"layer_type",
",",
"layer_blobs",
")",
"else",
":",
"for",
"layer",
"in",
"layers",
":",
"layer_name",
"=",
"re",
".",
"sub",
"(",
"'[-/]'",
",",
"'_'",
",",
"layer",
".",
"name",
")",
"layer_type",
"=",
"layer",
".",
"type",
"layer_blobs",
"=",
"layer",
".",
"blobs",
"yield",
"(",
"layer_name",
",",
"layer_type",
",",
"layer_blobs",
")"
] |
Iterate over all layers
|
[
"Iterate",
"over",
"all",
"layers"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_converter/caffe_parser.py#L68-L81
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
set_config
|
def set_config(**kwargs):
"""Set up the configure of profiler (only accepts keyword arguments).
Parameters
----------
filename : string,
output file for profile data
profile_all : boolean,
all profile types enabled
profile_symbolic : boolean,
whether to profile symbolic operators
profile_imperative : boolean,
whether to profile imperative operators
profile_memory : boolean,
whether to profile memory usage
profile_api : boolean,
whether to profile the C API
contiguous_dump : boolean,
whether to periodically dump profiling data to file
dump_period : float,
seconds between profile data dumps
aggregate_stats : boolean,
whether to maintain aggregate stats in memory for console
dump. Has some negative performance impact.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
kk = kwargs.keys()
vv = kwargs.values()
check_call(_LIB.MXSetProcessProfilerConfig(len(kwargs),
c_str_array([key for key in kk]),
c_str_array([str(val) for val in vv]),
profiler_kvstore_handle))
|
python
|
def set_config(**kwargs):
"""Set up the configure of profiler (only accepts keyword arguments).
Parameters
----------
filename : string,
output file for profile data
profile_all : boolean,
all profile types enabled
profile_symbolic : boolean,
whether to profile symbolic operators
profile_imperative : boolean,
whether to profile imperative operators
profile_memory : boolean,
whether to profile memory usage
profile_api : boolean,
whether to profile the C API
contiguous_dump : boolean,
whether to periodically dump profiling data to file
dump_period : float,
seconds between profile data dumps
aggregate_stats : boolean,
whether to maintain aggregate stats in memory for console
dump. Has some negative performance impact.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
kk = kwargs.keys()
vv = kwargs.values()
check_call(_LIB.MXSetProcessProfilerConfig(len(kwargs),
c_str_array([key for key in kk]),
c_str_array([str(val) for val in vv]),
profiler_kvstore_handle))
|
[
"def",
"set_config",
"(",
"*",
"*",
"kwargs",
")",
":",
"kk",
"=",
"kwargs",
".",
"keys",
"(",
")",
"vv",
"=",
"kwargs",
".",
"values",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXSetProcessProfilerConfig",
"(",
"len",
"(",
"kwargs",
")",
",",
"c_str_array",
"(",
"[",
"key",
"for",
"key",
"in",
"kk",
"]",
")",
",",
"c_str_array",
"(",
"[",
"str",
"(",
"val",
")",
"for",
"val",
"in",
"vv",
"]",
")",
",",
"profiler_kvstore_handle",
")",
")"
] |
Set up the configure of profiler (only accepts keyword arguments).
Parameters
----------
filename : string,
output file for profile data
profile_all : boolean,
all profile types enabled
profile_symbolic : boolean,
whether to profile symbolic operators
profile_imperative : boolean,
whether to profile imperative operators
profile_memory : boolean,
whether to profile memory usage
profile_api : boolean,
whether to profile the C API
contiguous_dump : boolean,
whether to periodically dump profiling data to file
dump_period : float,
seconds between profile data dumps
aggregate_stats : boolean,
whether to maintain aggregate stats in memory for console
dump. Has some negative performance impact.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
|
[
"Set",
"up",
"the",
"configure",
"of",
"profiler",
"(",
"only",
"accepts",
"keyword",
"arguments",
")",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L33-L67
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
profiler_set_config
|
def profiler_set_config(mode='symbolic', filename='profile.json'):
"""Set up the configure of profiler (Deprecated).
Parameters
----------
mode : string, optional
Indicates whether to enable the profiler, can
be 'symbolic', or 'all'. Defaults to `symbolic`.
filename : string, optional
The name of output trace file. Defaults to 'profile.json'.
"""
warnings.warn('profiler.profiler_set_config() is deprecated. '
'Please use profiler.set_config() instead')
keys = c_str_array([key for key in ["profile_" + mode, "filename"]])
values = c_str_array([str(val) for val in [True, filename]])
assert len(keys) == len(values)
check_call(_LIB.MXSetProcessProfilerConfig(len(keys), keys, values, profiler_kvstore_handle))
|
python
|
def profiler_set_config(mode='symbolic', filename='profile.json'):
"""Set up the configure of profiler (Deprecated).
Parameters
----------
mode : string, optional
Indicates whether to enable the profiler, can
be 'symbolic', or 'all'. Defaults to `symbolic`.
filename : string, optional
The name of output trace file. Defaults to 'profile.json'.
"""
warnings.warn('profiler.profiler_set_config() is deprecated. '
'Please use profiler.set_config() instead')
keys = c_str_array([key for key in ["profile_" + mode, "filename"]])
values = c_str_array([str(val) for val in [True, filename]])
assert len(keys) == len(values)
check_call(_LIB.MXSetProcessProfilerConfig(len(keys), keys, values, profiler_kvstore_handle))
|
[
"def",
"profiler_set_config",
"(",
"mode",
"=",
"'symbolic'",
",",
"filename",
"=",
"'profile.json'",
")",
":",
"warnings",
".",
"warn",
"(",
"'profiler.profiler_set_config() is deprecated. '",
"'Please use profiler.set_config() instead'",
")",
"keys",
"=",
"c_str_array",
"(",
"[",
"key",
"for",
"key",
"in",
"[",
"\"profile_\"",
"+",
"mode",
",",
"\"filename\"",
"]",
"]",
")",
"values",
"=",
"c_str_array",
"(",
"[",
"str",
"(",
"val",
")",
"for",
"val",
"in",
"[",
"True",
",",
"filename",
"]",
"]",
")",
"assert",
"len",
"(",
"keys",
")",
"==",
"len",
"(",
"values",
")",
"check_call",
"(",
"_LIB",
".",
"MXSetProcessProfilerConfig",
"(",
"len",
"(",
"keys",
")",
",",
"keys",
",",
"values",
",",
"profiler_kvstore_handle",
")",
")"
] |
Set up the configure of profiler (Deprecated).
Parameters
----------
mode : string, optional
Indicates whether to enable the profiler, can
be 'symbolic', or 'all'. Defaults to `symbolic`.
filename : string, optional
The name of output trace file. Defaults to 'profile.json'.
|
[
"Set",
"up",
"the",
"configure",
"of",
"profiler",
"(",
"Deprecated",
")",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L70-L86
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
set_state
|
def set_state(state='stop', profile_process='worker'):
"""Set up the profiler state to 'run' or 'stop'.
Parameters
----------
state : string, optional
Indicates whether to run the profiler, can
be 'stop' or 'run'. Default is `stop`.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
state2int = {'stop': 0, 'run': 1}
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXSetProcessProfilerState(ctypes.c_int(state2int[state]),
profile_process2int[profile_process],
profiler_kvstore_handle))
|
python
|
def set_state(state='stop', profile_process='worker'):
"""Set up the profiler state to 'run' or 'stop'.
Parameters
----------
state : string, optional
Indicates whether to run the profiler, can
be 'stop' or 'run'. Default is `stop`.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
state2int = {'stop': 0, 'run': 1}
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXSetProcessProfilerState(ctypes.c_int(state2int[state]),
profile_process2int[profile_process],
profiler_kvstore_handle))
|
[
"def",
"set_state",
"(",
"state",
"=",
"'stop'",
",",
"profile_process",
"=",
"'worker'",
")",
":",
"state2int",
"=",
"{",
"'stop'",
":",
"0",
",",
"'run'",
":",
"1",
"}",
"profile_process2int",
"=",
"{",
"'worker'",
":",
"0",
",",
"'server'",
":",
"1",
"}",
"check_call",
"(",
"_LIB",
".",
"MXSetProcessProfilerState",
"(",
"ctypes",
".",
"c_int",
"(",
"state2int",
"[",
"state",
"]",
")",
",",
"profile_process2int",
"[",
"profile_process",
"]",
",",
"profiler_kvstore_handle",
")",
")"
] |
Set up the profiler state to 'run' or 'stop'.
Parameters
----------
state : string, optional
Indicates whether to run the profiler, can
be 'stop' or 'run'. Default is `stop`.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
|
[
"Set",
"up",
"the",
"profiler",
"state",
"to",
"run",
"or",
"stop",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L89-L106
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
dump
|
def dump(finished=True, profile_process='worker'):
"""Dump profile and stop profiler. Use this to save profile
in advance in case your program cannot exit normally.
Parameters
----------
finished : boolean
Indicates whether to stop statistic output (dumping) after this dump.
Default is True
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
fin = 1 if finished is True else 0
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXDumpProcessProfile(fin,
profile_process2int[profile_process],
profiler_kvstore_handle))
|
python
|
def dump(finished=True, profile_process='worker'):
"""Dump profile and stop profiler. Use this to save profile
in advance in case your program cannot exit normally.
Parameters
----------
finished : boolean
Indicates whether to stop statistic output (dumping) after this dump.
Default is True
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
fin = 1 if finished is True else 0
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXDumpProcessProfile(fin,
profile_process2int[profile_process],
profiler_kvstore_handle))
|
[
"def",
"dump",
"(",
"finished",
"=",
"True",
",",
"profile_process",
"=",
"'worker'",
")",
":",
"fin",
"=",
"1",
"if",
"finished",
"is",
"True",
"else",
"0",
"profile_process2int",
"=",
"{",
"'worker'",
":",
"0",
",",
"'server'",
":",
"1",
"}",
"check_call",
"(",
"_LIB",
".",
"MXDumpProcessProfile",
"(",
"fin",
",",
"profile_process2int",
"[",
"profile_process",
"]",
",",
"profiler_kvstore_handle",
")",
")"
] |
Dump profile and stop profiler. Use this to save profile
in advance in case your program cannot exit normally.
Parameters
----------
finished : boolean
Indicates whether to stop statistic output (dumping) after this dump.
Default is True
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
|
[
"Dump",
"profile",
"and",
"stop",
"profiler",
".",
"Use",
"this",
"to",
"save",
"profile",
"in",
"advance",
"in",
"case",
"your",
"program",
"cannot",
"exit",
"normally",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L122-L140
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
dumps
|
def dumps(reset=False):
"""Return a printable string of aggregate profile stats.
Parameters
----------
reset: boolean
Indicates whether to clean aggeregate statistical data collected up to this point
"""
debug_str = ctypes.c_char_p()
do_reset = 1 if reset is True else 0
check_call(_LIB.MXAggregateProfileStatsPrint(ctypes.byref(debug_str), int(do_reset)))
return py_str(debug_str.value)
|
python
|
def dumps(reset=False):
"""Return a printable string of aggregate profile stats.
Parameters
----------
reset: boolean
Indicates whether to clean aggeregate statistical data collected up to this point
"""
debug_str = ctypes.c_char_p()
do_reset = 1 if reset is True else 0
check_call(_LIB.MXAggregateProfileStatsPrint(ctypes.byref(debug_str), int(do_reset)))
return py_str(debug_str.value)
|
[
"def",
"dumps",
"(",
"reset",
"=",
"False",
")",
":",
"debug_str",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"do_reset",
"=",
"1",
"if",
"reset",
"is",
"True",
"else",
"0",
"check_call",
"(",
"_LIB",
".",
"MXAggregateProfileStatsPrint",
"(",
"ctypes",
".",
"byref",
"(",
"debug_str",
")",
",",
"int",
"(",
"do_reset",
")",
")",
")",
"return",
"py_str",
"(",
"debug_str",
".",
"value",
")"
] |
Return a printable string of aggregate profile stats.
Parameters
----------
reset: boolean
Indicates whether to clean aggeregate statistical data collected up to this point
|
[
"Return",
"a",
"printable",
"string",
"of",
"aggregate",
"profile",
"stats",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L151-L162
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
pause
|
def pause(profile_process='worker'):
"""Pause profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXProcessProfilePause(int(1),
profile_process2int[profile_process],
profiler_kvstore_handle))
|
python
|
def pause(profile_process='worker'):
"""Pause profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXProcessProfilePause(int(1),
profile_process2int[profile_process],
profiler_kvstore_handle))
|
[
"def",
"pause",
"(",
"profile_process",
"=",
"'worker'",
")",
":",
"profile_process2int",
"=",
"{",
"'worker'",
":",
"0",
",",
"'server'",
":",
"1",
"}",
"check_call",
"(",
"_LIB",
".",
"MXProcessProfilePause",
"(",
"int",
"(",
"1",
")",
",",
"profile_process2int",
"[",
"profile_process",
"]",
",",
"profiler_kvstore_handle",
")",
")"
] |
Pause profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
|
[
"Pause",
"profiling",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L165-L178
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
resume
|
def resume(profile_process='worker'):
"""
Resume paused profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXProcessProfilePause(int(0),
profile_process2int[profile_process],
profiler_kvstore_handle))
|
python
|
def resume(profile_process='worker'):
"""
Resume paused profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXProcessProfilePause(int(0),
profile_process2int[profile_process],
profiler_kvstore_handle))
|
[
"def",
"resume",
"(",
"profile_process",
"=",
"'worker'",
")",
":",
"profile_process2int",
"=",
"{",
"'worker'",
":",
"0",
",",
"'server'",
":",
"1",
"}",
"check_call",
"(",
"_LIB",
".",
"MXProcessProfilePause",
"(",
"int",
"(",
"0",
")",
",",
"profile_process2int",
"[",
"profile_process",
"]",
",",
"profiler_kvstore_handle",
")",
")"
] |
Resume paused profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
|
[
"Resume",
"paused",
"profiling",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L181-L195
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
Counter.set_value
|
def set_value(self, value):
"""Set counter value.
Parameters
----------
value : int
Value for the counter
"""
check_call(_LIB.MXProfileSetCounter(self.handle, int(value)))
|
python
|
def set_value(self, value):
"""Set counter value.
Parameters
----------
value : int
Value for the counter
"""
check_call(_LIB.MXProfileSetCounter(self.handle, int(value)))
|
[
"def",
"set_value",
"(",
"self",
",",
"value",
")",
":",
"check_call",
"(",
"_LIB",
".",
"MXProfileSetCounter",
"(",
"self",
".",
"handle",
",",
"int",
"(",
"value",
")",
")",
")"
] |
Set counter value.
Parameters
----------
value : int
Value for the counter
|
[
"Set",
"counter",
"value",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L405-L413
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
Counter.increment
|
def increment(self, delta=1):
"""Increment counter value.
Parameters
----------
value_change : int
Amount by which to add to the counter
"""
check_call(_LIB.MXProfileAdjustCounter(self.handle, int(delta)))
|
python
|
def increment(self, delta=1):
"""Increment counter value.
Parameters
----------
value_change : int
Amount by which to add to the counter
"""
check_call(_LIB.MXProfileAdjustCounter(self.handle, int(delta)))
|
[
"def",
"increment",
"(",
"self",
",",
"delta",
"=",
"1",
")",
":",
"check_call",
"(",
"_LIB",
".",
"MXProfileAdjustCounter",
"(",
"self",
".",
"handle",
",",
"int",
"(",
"delta",
")",
")",
")"
] |
Increment counter value.
Parameters
----------
value_change : int
Amount by which to add to the counter
|
[
"Increment",
"counter",
"value",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L415-L423
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
Counter.decrement
|
def decrement(self, delta=1):
"""Decrement counter value.
Parameters
----------
value_change : int
Amount by which to subtract from the counter
"""
check_call(_LIB.MXProfileAdjustCounter(self.handle, -int(delta)))
|
python
|
def decrement(self, delta=1):
"""Decrement counter value.
Parameters
----------
value_change : int
Amount by which to subtract from the counter
"""
check_call(_LIB.MXProfileAdjustCounter(self.handle, -int(delta)))
|
[
"def",
"decrement",
"(",
"self",
",",
"delta",
"=",
"1",
")",
":",
"check_call",
"(",
"_LIB",
".",
"MXProfileAdjustCounter",
"(",
"self",
".",
"handle",
",",
"-",
"int",
"(",
"delta",
")",
")",
")"
] |
Decrement counter value.
Parameters
----------
value_change : int
Amount by which to subtract from the counter
|
[
"Decrement",
"counter",
"value",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L425-L433
|
train
|
apache/incubator-mxnet
|
python/mxnet/profiler.py
|
Marker.mark
|
def mark(self, scope='process'):
"""Set up the profiler state to record operator.
Parameters
----------
scope : string, optional
Indicates what scope the marker should refer to.
Can be 'global', 'process', thread', task', and 'marker'
Default is `process`.
"""
check_call(_LIB.MXProfileSetMarker(self.domain.handle, c_str(self.name), c_str(scope)))
|
python
|
def mark(self, scope='process'):
"""Set up the profiler state to record operator.
Parameters
----------
scope : string, optional
Indicates what scope the marker should refer to.
Can be 'global', 'process', thread', task', and 'marker'
Default is `process`.
"""
check_call(_LIB.MXProfileSetMarker(self.domain.handle, c_str(self.name), c_str(scope)))
|
[
"def",
"mark",
"(",
"self",
",",
"scope",
"=",
"'process'",
")",
":",
"check_call",
"(",
"_LIB",
".",
"MXProfileSetMarker",
"(",
"self",
".",
"domain",
".",
"handle",
",",
"c_str",
"(",
"self",
".",
"name",
")",
",",
"c_str",
"(",
"scope",
")",
")",
")"
] |
Set up the profiler state to record operator.
Parameters
----------
scope : string, optional
Indicates what scope the marker should refer to.
Can be 'global', 'process', thread', task', and 'marker'
Default is `process`.
|
[
"Set",
"up",
"the",
"profiler",
"state",
"to",
"record",
"operator",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/profiler.py#L463-L473
|
train
|
apache/incubator-mxnet
|
python/mxnet/rtc.py
|
CudaModule.get_kernel
|
def get_kernel(self, name, signature):
r"""Get CUDA kernel from compiled module.
Parameters
----------
name : str
String name of the kernel.
signature : str
Function signature for the kernel. For example, if a kernel is
declared as::
extern "C" __global__ void axpy(const float *x, double *y, int alpha)
Then its signature should be::
const float *x, double *y, int alpha
or::
const float *, double *, int
Note that `*` in signature marks an argument as array and
`const` marks an argument as constant (input) array.
Returns
-------
CudaKernel
CUDA kernels that can be launched on GPUs.
"""
hdl = CudaKernelHandle()
is_ndarray = []
is_const = []
dtypes = []
pattern = re.compile(r"""^\s*(const)?\s*([\w_]+)\s*(\*)?\s*([\w_]+)?\s*$""")
args = re.sub(r"\s+", " ", signature).split(",")
for arg in args:
match = pattern.match(arg)
if not match or match.groups()[1] == 'const':
raise ValueError(
'Invalid function prototype "%s". Must be in the '
'form of "(const) type (*) (name)"'%arg)
is_const.append(bool(match.groups()[0]))
dtype = match.groups()[1]
is_ndarray.append(bool(match.groups()[2]))
if dtype not in _DTYPE_CPP_TO_NP:
raise TypeError(
"Unsupported kernel argument type %s. Supported types are: %s."%(
arg, ','.join(_DTYPE_CPP_TO_NP.keys())))
dtypes.append(_DTYPE_NP_TO_MX[_DTYPE_CPP_TO_NP[dtype]])
check_call(_LIB.MXRtcCudaKernelCreate(
self.handle,
c_str(name),
len(dtypes),
c_array_buf(ctypes.c_int, array('i', is_ndarray)),
c_array_buf(ctypes.c_int, array('i', is_const)),
c_array_buf(ctypes.c_int, array('i', dtypes)),
ctypes.byref(hdl)))
return CudaKernel(hdl, name, is_ndarray, dtypes)
|
python
|
def get_kernel(self, name, signature):
r"""Get CUDA kernel from compiled module.
Parameters
----------
name : str
String name of the kernel.
signature : str
Function signature for the kernel. For example, if a kernel is
declared as::
extern "C" __global__ void axpy(const float *x, double *y, int alpha)
Then its signature should be::
const float *x, double *y, int alpha
or::
const float *, double *, int
Note that `*` in signature marks an argument as array and
`const` marks an argument as constant (input) array.
Returns
-------
CudaKernel
CUDA kernels that can be launched on GPUs.
"""
hdl = CudaKernelHandle()
is_ndarray = []
is_const = []
dtypes = []
pattern = re.compile(r"""^\s*(const)?\s*([\w_]+)\s*(\*)?\s*([\w_]+)?\s*$""")
args = re.sub(r"\s+", " ", signature).split(",")
for arg in args:
match = pattern.match(arg)
if not match or match.groups()[1] == 'const':
raise ValueError(
'Invalid function prototype "%s". Must be in the '
'form of "(const) type (*) (name)"'%arg)
is_const.append(bool(match.groups()[0]))
dtype = match.groups()[1]
is_ndarray.append(bool(match.groups()[2]))
if dtype not in _DTYPE_CPP_TO_NP:
raise TypeError(
"Unsupported kernel argument type %s. Supported types are: %s."%(
arg, ','.join(_DTYPE_CPP_TO_NP.keys())))
dtypes.append(_DTYPE_NP_TO_MX[_DTYPE_CPP_TO_NP[dtype]])
check_call(_LIB.MXRtcCudaKernelCreate(
self.handle,
c_str(name),
len(dtypes),
c_array_buf(ctypes.c_int, array('i', is_ndarray)),
c_array_buf(ctypes.c_int, array('i', is_const)),
c_array_buf(ctypes.c_int, array('i', dtypes)),
ctypes.byref(hdl)))
return CudaKernel(hdl, name, is_ndarray, dtypes)
|
[
"def",
"get_kernel",
"(",
"self",
",",
"name",
",",
"signature",
")",
":",
"hdl",
"=",
"CudaKernelHandle",
"(",
")",
"is_ndarray",
"=",
"[",
"]",
"is_const",
"=",
"[",
"]",
"dtypes",
"=",
"[",
"]",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r\"\"\"^\\s*(const)?\\s*([\\w_]+)\\s*(\\*)?\\s*([\\w_]+)?\\s*$\"\"\"",
")",
"args",
"=",
"re",
".",
"sub",
"(",
"r\"\\s+\"",
",",
"\" \"",
",",
"signature",
")",
".",
"split",
"(",
"\",\"",
")",
"for",
"arg",
"in",
"args",
":",
"match",
"=",
"pattern",
".",
"match",
"(",
"arg",
")",
"if",
"not",
"match",
"or",
"match",
".",
"groups",
"(",
")",
"[",
"1",
"]",
"==",
"'const'",
":",
"raise",
"ValueError",
"(",
"'Invalid function prototype \"%s\". Must be in the '",
"'form of \"(const) type (*) (name)\"'",
"%",
"arg",
")",
"is_const",
".",
"append",
"(",
"bool",
"(",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
")",
"dtype",
"=",
"match",
".",
"groups",
"(",
")",
"[",
"1",
"]",
"is_ndarray",
".",
"append",
"(",
"bool",
"(",
"match",
".",
"groups",
"(",
")",
"[",
"2",
"]",
")",
")",
"if",
"dtype",
"not",
"in",
"_DTYPE_CPP_TO_NP",
":",
"raise",
"TypeError",
"(",
"\"Unsupported kernel argument type %s. Supported types are: %s.\"",
"%",
"(",
"arg",
",",
"','",
".",
"join",
"(",
"_DTYPE_CPP_TO_NP",
".",
"keys",
"(",
")",
")",
")",
")",
"dtypes",
".",
"append",
"(",
"_DTYPE_NP_TO_MX",
"[",
"_DTYPE_CPP_TO_NP",
"[",
"dtype",
"]",
"]",
")",
"check_call",
"(",
"_LIB",
".",
"MXRtcCudaKernelCreate",
"(",
"self",
".",
"handle",
",",
"c_str",
"(",
"name",
")",
",",
"len",
"(",
"dtypes",
")",
",",
"c_array_buf",
"(",
"ctypes",
".",
"c_int",
",",
"array",
"(",
"'i'",
",",
"is_ndarray",
")",
")",
",",
"c_array_buf",
"(",
"ctypes",
".",
"c_int",
",",
"array",
"(",
"'i'",
",",
"is_const",
")",
")",
",",
"c_array_buf",
"(",
"ctypes",
".",
"c_int",
",",
"array",
"(",
"'i'",
",",
"dtypes",
")",
")",
",",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"return",
"CudaKernel",
"(",
"hdl",
",",
"name",
",",
"is_ndarray",
",",
"dtypes",
")"
] |
r"""Get CUDA kernel from compiled module.
Parameters
----------
name : str
String name of the kernel.
signature : str
Function signature for the kernel. For example, if a kernel is
declared as::
extern "C" __global__ void axpy(const float *x, double *y, int alpha)
Then its signature should be::
const float *x, double *y, int alpha
or::
const float *, double *, int
Note that `*` in signature marks an argument as array and
`const` marks an argument as constant (input) array.
Returns
-------
CudaKernel
CUDA kernels that can be launched on GPUs.
|
[
"r",
"Get",
"CUDA",
"kernel",
"from",
"compiled",
"module",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/rtc.py#L112-L171
|
train
|
apache/incubator-mxnet
|
python/mxnet/rtc.py
|
CudaKernel.launch
|
def launch(self, args, ctx, grid_dims, block_dims, shared_mem=0):
"""Launch cuda kernel.
Parameters
----------
args : tuple of NDArray or numbers
List of arguments for kernel. NDArrays are expected for pointer
types (e.g. `float*`, `double*`) while numbers are expected for
non-pointer types (e.g. `int`, `float`).
ctx : Context
The context to launch kernel on. Must be GPU context.
grid_dims : tuple of 3 integers
Grid dimensions for CUDA kernel.
block_dims : tuple of 3 integers
Block dimensions for CUDA kernel.
shared_mem : integer, optional
Size of dynamically allocated shared memory. Defaults to 0.
"""
assert ctx.device_type == 'gpu', "Cuda kernel can only be launched on GPU"
assert len(grid_dims) == 3, "grid_dims must be a tuple of 3 integers"
assert len(block_dims) == 3, "grid_dims must be a tuple of 3 integers"
assert len(args) == len(self._dtypes), \
"CudaKernel(%s) expects %d arguments but got %d"%(
self._name, len(self._dtypes), len(args))
void_args = []
ref_holder = []
for i, (arg, is_nd, dtype) in enumerate(zip(args, self._is_ndarray, self._dtypes)):
if is_nd:
assert isinstance(arg, NDArray), \
"The %d-th argument is expected to be a NDArray but got %s"%(
i, type(arg))
void_args.append(arg.handle)
else:
assert isinstance(arg, numeric_types), \
"The %d-th argument is expected to be a number, but got %s"%(
i, type(arg))
ref_holder.append(np.array(arg, dtype=dtype))
void_args.append(ref_holder[-1].ctypes.data_as(ctypes.c_void_p))
check_call(_LIB.MXRtcCudaKernelCall(
self.handle,
ctx.device_id,
c_array(ctypes.c_void_p, void_args),
mx_uint(grid_dims[0]), mx_uint(grid_dims[1]), mx_uint(grid_dims[2]),
mx_uint(block_dims[0]), mx_uint(block_dims[1]), mx_uint(block_dims[2]),
mx_uint(shared_mem)))
|
python
|
def launch(self, args, ctx, grid_dims, block_dims, shared_mem=0):
"""Launch cuda kernel.
Parameters
----------
args : tuple of NDArray or numbers
List of arguments for kernel. NDArrays are expected for pointer
types (e.g. `float*`, `double*`) while numbers are expected for
non-pointer types (e.g. `int`, `float`).
ctx : Context
The context to launch kernel on. Must be GPU context.
grid_dims : tuple of 3 integers
Grid dimensions for CUDA kernel.
block_dims : tuple of 3 integers
Block dimensions for CUDA kernel.
shared_mem : integer, optional
Size of dynamically allocated shared memory. Defaults to 0.
"""
assert ctx.device_type == 'gpu', "Cuda kernel can only be launched on GPU"
assert len(grid_dims) == 3, "grid_dims must be a tuple of 3 integers"
assert len(block_dims) == 3, "grid_dims must be a tuple of 3 integers"
assert len(args) == len(self._dtypes), \
"CudaKernel(%s) expects %d arguments but got %d"%(
self._name, len(self._dtypes), len(args))
void_args = []
ref_holder = []
for i, (arg, is_nd, dtype) in enumerate(zip(args, self._is_ndarray, self._dtypes)):
if is_nd:
assert isinstance(arg, NDArray), \
"The %d-th argument is expected to be a NDArray but got %s"%(
i, type(arg))
void_args.append(arg.handle)
else:
assert isinstance(arg, numeric_types), \
"The %d-th argument is expected to be a number, but got %s"%(
i, type(arg))
ref_holder.append(np.array(arg, dtype=dtype))
void_args.append(ref_holder[-1].ctypes.data_as(ctypes.c_void_p))
check_call(_LIB.MXRtcCudaKernelCall(
self.handle,
ctx.device_id,
c_array(ctypes.c_void_p, void_args),
mx_uint(grid_dims[0]), mx_uint(grid_dims[1]), mx_uint(grid_dims[2]),
mx_uint(block_dims[0]), mx_uint(block_dims[1]), mx_uint(block_dims[2]),
mx_uint(shared_mem)))
|
[
"def",
"launch",
"(",
"self",
",",
"args",
",",
"ctx",
",",
"grid_dims",
",",
"block_dims",
",",
"shared_mem",
"=",
"0",
")",
":",
"assert",
"ctx",
".",
"device_type",
"==",
"'gpu'",
",",
"\"Cuda kernel can only be launched on GPU\"",
"assert",
"len",
"(",
"grid_dims",
")",
"==",
"3",
",",
"\"grid_dims must be a tuple of 3 integers\"",
"assert",
"len",
"(",
"block_dims",
")",
"==",
"3",
",",
"\"grid_dims must be a tuple of 3 integers\"",
"assert",
"len",
"(",
"args",
")",
"==",
"len",
"(",
"self",
".",
"_dtypes",
")",
",",
"\"CudaKernel(%s) expects %d arguments but got %d\"",
"%",
"(",
"self",
".",
"_name",
",",
"len",
"(",
"self",
".",
"_dtypes",
")",
",",
"len",
"(",
"args",
")",
")",
"void_args",
"=",
"[",
"]",
"ref_holder",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"arg",
",",
"is_nd",
",",
"dtype",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"args",
",",
"self",
".",
"_is_ndarray",
",",
"self",
".",
"_dtypes",
")",
")",
":",
"if",
"is_nd",
":",
"assert",
"isinstance",
"(",
"arg",
",",
"NDArray",
")",
",",
"\"The %d-th argument is expected to be a NDArray but got %s\"",
"%",
"(",
"i",
",",
"type",
"(",
"arg",
")",
")",
"void_args",
".",
"append",
"(",
"arg",
".",
"handle",
")",
"else",
":",
"assert",
"isinstance",
"(",
"arg",
",",
"numeric_types",
")",
",",
"\"The %d-th argument is expected to be a number, but got %s\"",
"%",
"(",
"i",
",",
"type",
"(",
"arg",
")",
")",
"ref_holder",
".",
"append",
"(",
"np",
".",
"array",
"(",
"arg",
",",
"dtype",
"=",
"dtype",
")",
")",
"void_args",
".",
"append",
"(",
"ref_holder",
"[",
"-",
"1",
"]",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"c_void_p",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXRtcCudaKernelCall",
"(",
"self",
".",
"handle",
",",
"ctx",
".",
"device_id",
",",
"c_array",
"(",
"ctypes",
".",
"c_void_p",
",",
"void_args",
")",
",",
"mx_uint",
"(",
"grid_dims",
"[",
"0",
"]",
")",
",",
"mx_uint",
"(",
"grid_dims",
"[",
"1",
"]",
")",
",",
"mx_uint",
"(",
"grid_dims",
"[",
"2",
"]",
")",
",",
"mx_uint",
"(",
"block_dims",
"[",
"0",
"]",
")",
",",
"mx_uint",
"(",
"block_dims",
"[",
"1",
"]",
")",
",",
"mx_uint",
"(",
"block_dims",
"[",
"2",
"]",
")",
",",
"mx_uint",
"(",
"shared_mem",
")",
")",
")"
] |
Launch cuda kernel.
Parameters
----------
args : tuple of NDArray or numbers
List of arguments for kernel. NDArrays are expected for pointer
types (e.g. `float*`, `double*`) while numbers are expected for
non-pointer types (e.g. `int`, `float`).
ctx : Context
The context to launch kernel on. Must be GPU context.
grid_dims : tuple of 3 integers
Grid dimensions for CUDA kernel.
block_dims : tuple of 3 integers
Block dimensions for CUDA kernel.
shared_mem : integer, optional
Size of dynamically allocated shared memory. Defaults to 0.
|
[
"Launch",
"cuda",
"kernel",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/rtc.py#L185-L230
|
train
|
apache/incubator-mxnet
|
example/ssd/evaluate/eval_metric.py
|
MApMetric.reset
|
def reset(self):
"""Clear the internal statistics to initial state."""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num
self.records = dict()
self.counts = dict()
|
python
|
def reset(self):
"""Clear the internal statistics to initial state."""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num
self.records = dict()
self.counts = dict()
|
[
"def",
"reset",
"(",
"self",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'num'",
",",
"None",
")",
"is",
"None",
":",
"self",
".",
"num_inst",
"=",
"0",
"self",
".",
"sum_metric",
"=",
"0.0",
"else",
":",
"self",
".",
"num_inst",
"=",
"[",
"0",
"]",
"*",
"self",
".",
"num",
"self",
".",
"sum_metric",
"=",
"[",
"0.0",
"]",
"*",
"self",
".",
"num",
"self",
".",
"records",
"=",
"dict",
"(",
")",
"self",
".",
"counts",
"=",
"dict",
"(",
")"
] |
Clear the internal statistics to initial state.
|
[
"Clear",
"the",
"internal",
"statistics",
"to",
"initial",
"state",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_metric.py#L53-L62
|
train
|
apache/incubator-mxnet
|
example/ssd/evaluate/eval_metric.py
|
MApMetric.update
|
def update(self, labels, preds):
"""
Update internal records. This function now only update internal buffer,
sum_metric and num_inst are updated in _update() function instead when
get() is called to return results.
Params:
----------
labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional
2-d array of ground-truths, n objects(id-xmin-ymin-xmax-ymax-[difficult])
preds: mx.nd.array (m * 6)
2-d array of detections, m objects(id-score-xmin-ymin-xmax-ymax)
"""
def iou(x, ys):
"""
Calculate intersection-over-union overlap
Params:
----------
x : numpy.array
single box [xmin, ymin ,xmax, ymax]
ys : numpy.array
multiple box [[xmin, ymin, xmax, ymax], [...], ]
Returns:
-----------
numpy.array
[iou1, iou2, ...], size == ys.shape[0]
"""
ixmin = np.maximum(ys[:, 0], x[0])
iymin = np.maximum(ys[:, 1], x[1])
ixmax = np.minimum(ys[:, 2], x[2])
iymax = np.minimum(ys[:, 3], x[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = (x[2] - x[0]) * (x[3] - x[1]) + (ys[:, 2] - ys[:, 0]) * \
(ys[:, 3] - ys[:, 1]) - inters
ious = inters / uni
ious[uni < 1e-12] = 0 # in case bad boxes
return ious
# independant execution for each image
for i in range(labels[0].shape[0]):
# get as numpy arrays
label = labels[0][i].asnumpy()
if np.sum(label[:, 0] >= 0) < 1:
continue
pred = preds[self.pred_idx][i].asnumpy()
# calculate for each class
while (pred.shape[0] > 0):
cid = int(pred[0, 0])
indices = np.where(pred[:, 0].astype(int) == cid)[0]
if cid < 0:
pred = np.delete(pred, indices, axis=0)
continue
dets = pred[indices]
pred = np.delete(pred, indices, axis=0)
# sort by score, desceding
dets = dets[dets[:,1].argsort()[::-1]]
records = np.hstack((dets[:, 1][:, np.newaxis], np.zeros((dets.shape[0], 1))))
# ground-truths
label_indices = np.where(label[:, 0].astype(int) == cid)[0]
gts = label[label_indices, :]
label = np.delete(label, label_indices, axis=0)
if gts.size > 0:
found = [False] * gts.shape[0]
for j in range(dets.shape[0]):
# compute overlaps
ious = iou(dets[j, 2:], gts[:, 1:5])
ovargmax = np.argmax(ious)
ovmax = ious[ovargmax]
if ovmax > self.ovp_thresh:
if (not self.use_difficult and
gts.shape[1] >= 6 and
gts[ovargmax, 5] > 0):
pass
else:
if not found[ovargmax]:
records[j, -1] = 1 # tp
found[ovargmax] = True
else:
# duplicate
records[j, -1] = 2 # fp
else:
records[j, -1] = 2 # fp
else:
# no gt, mark all fp
records[:, -1] = 2
# ground truth count
if (not self.use_difficult and gts.shape[1] >= 6):
gt_count = np.sum(gts[:, 5] < 1)
else:
gt_count = gts.shape[0]
# now we push records to buffer
# first column: score, second column: tp/fp
# 0: not set(matched to difficult or something), 1: tp, 2: fp
records = records[np.where(records[:, -1] > 0)[0], :]
if records.size > 0:
self._insert(cid, records, gt_count)
# add missing class if not present in prediction
while (label.shape[0] > 0):
cid = int(label[0, 0])
label_indices = np.where(label[:, 0].astype(int) == cid)[0]
label = np.delete(label, label_indices, axis=0)
if cid < 0:
continue
gt_count = label_indices.size
self._insert(cid, np.array([[0, 0]]), gt_count)
|
python
|
def update(self, labels, preds):
"""
Update internal records. This function now only update internal buffer,
sum_metric and num_inst are updated in _update() function instead when
get() is called to return results.
Params:
----------
labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional
2-d array of ground-truths, n objects(id-xmin-ymin-xmax-ymax-[difficult])
preds: mx.nd.array (m * 6)
2-d array of detections, m objects(id-score-xmin-ymin-xmax-ymax)
"""
def iou(x, ys):
"""
Calculate intersection-over-union overlap
Params:
----------
x : numpy.array
single box [xmin, ymin ,xmax, ymax]
ys : numpy.array
multiple box [[xmin, ymin, xmax, ymax], [...], ]
Returns:
-----------
numpy.array
[iou1, iou2, ...], size == ys.shape[0]
"""
ixmin = np.maximum(ys[:, 0], x[0])
iymin = np.maximum(ys[:, 1], x[1])
ixmax = np.minimum(ys[:, 2], x[2])
iymax = np.minimum(ys[:, 3], x[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = (x[2] - x[0]) * (x[3] - x[1]) + (ys[:, 2] - ys[:, 0]) * \
(ys[:, 3] - ys[:, 1]) - inters
ious = inters / uni
ious[uni < 1e-12] = 0 # in case bad boxes
return ious
# independant execution for each image
for i in range(labels[0].shape[0]):
# get as numpy arrays
label = labels[0][i].asnumpy()
if np.sum(label[:, 0] >= 0) < 1:
continue
pred = preds[self.pred_idx][i].asnumpy()
# calculate for each class
while (pred.shape[0] > 0):
cid = int(pred[0, 0])
indices = np.where(pred[:, 0].astype(int) == cid)[0]
if cid < 0:
pred = np.delete(pred, indices, axis=0)
continue
dets = pred[indices]
pred = np.delete(pred, indices, axis=0)
# sort by score, desceding
dets = dets[dets[:,1].argsort()[::-1]]
records = np.hstack((dets[:, 1][:, np.newaxis], np.zeros((dets.shape[0], 1))))
# ground-truths
label_indices = np.where(label[:, 0].astype(int) == cid)[0]
gts = label[label_indices, :]
label = np.delete(label, label_indices, axis=0)
if gts.size > 0:
found = [False] * gts.shape[0]
for j in range(dets.shape[0]):
# compute overlaps
ious = iou(dets[j, 2:], gts[:, 1:5])
ovargmax = np.argmax(ious)
ovmax = ious[ovargmax]
if ovmax > self.ovp_thresh:
if (not self.use_difficult and
gts.shape[1] >= 6 and
gts[ovargmax, 5] > 0):
pass
else:
if not found[ovargmax]:
records[j, -1] = 1 # tp
found[ovargmax] = True
else:
# duplicate
records[j, -1] = 2 # fp
else:
records[j, -1] = 2 # fp
else:
# no gt, mark all fp
records[:, -1] = 2
# ground truth count
if (not self.use_difficult and gts.shape[1] >= 6):
gt_count = np.sum(gts[:, 5] < 1)
else:
gt_count = gts.shape[0]
# now we push records to buffer
# first column: score, second column: tp/fp
# 0: not set(matched to difficult or something), 1: tp, 2: fp
records = records[np.where(records[:, -1] > 0)[0], :]
if records.size > 0:
self._insert(cid, records, gt_count)
# add missing class if not present in prediction
while (label.shape[0] > 0):
cid = int(label[0, 0])
label_indices = np.where(label[:, 0].astype(int) == cid)[0]
label = np.delete(label, label_indices, axis=0)
if cid < 0:
continue
gt_count = label_indices.size
self._insert(cid, np.array([[0, 0]]), gt_count)
|
[
"def",
"update",
"(",
"self",
",",
"labels",
",",
"preds",
")",
":",
"def",
"iou",
"(",
"x",
",",
"ys",
")",
":",
"\"\"\"\n Calculate intersection-over-union overlap\n Params:\n ----------\n x : numpy.array\n single box [xmin, ymin ,xmax, ymax]\n ys : numpy.array\n multiple box [[xmin, ymin, xmax, ymax], [...], ]\n Returns:\n -----------\n numpy.array\n [iou1, iou2, ...], size == ys.shape[0]\n \"\"\"",
"ixmin",
"=",
"np",
".",
"maximum",
"(",
"ys",
"[",
":",
",",
"0",
"]",
",",
"x",
"[",
"0",
"]",
")",
"iymin",
"=",
"np",
".",
"maximum",
"(",
"ys",
"[",
":",
",",
"1",
"]",
",",
"x",
"[",
"1",
"]",
")",
"ixmax",
"=",
"np",
".",
"minimum",
"(",
"ys",
"[",
":",
",",
"2",
"]",
",",
"x",
"[",
"2",
"]",
")",
"iymax",
"=",
"np",
".",
"minimum",
"(",
"ys",
"[",
":",
",",
"3",
"]",
",",
"x",
"[",
"3",
"]",
")",
"iw",
"=",
"np",
".",
"maximum",
"(",
"ixmax",
"-",
"ixmin",
",",
"0.",
")",
"ih",
"=",
"np",
".",
"maximum",
"(",
"iymax",
"-",
"iymin",
",",
"0.",
")",
"inters",
"=",
"iw",
"*",
"ih",
"uni",
"=",
"(",
"x",
"[",
"2",
"]",
"-",
"x",
"[",
"0",
"]",
")",
"*",
"(",
"x",
"[",
"3",
"]",
"-",
"x",
"[",
"1",
"]",
")",
"+",
"(",
"ys",
"[",
":",
",",
"2",
"]",
"-",
"ys",
"[",
":",
",",
"0",
"]",
")",
"*",
"(",
"ys",
"[",
":",
",",
"3",
"]",
"-",
"ys",
"[",
":",
",",
"1",
"]",
")",
"-",
"inters",
"ious",
"=",
"inters",
"/",
"uni",
"ious",
"[",
"uni",
"<",
"1e-12",
"]",
"=",
"0",
"# in case bad boxes",
"return",
"ious",
"# independant execution for each image",
"for",
"i",
"in",
"range",
"(",
"labels",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
")",
":",
"# get as numpy arrays",
"label",
"=",
"labels",
"[",
"0",
"]",
"[",
"i",
"]",
".",
"asnumpy",
"(",
")",
"if",
"np",
".",
"sum",
"(",
"label",
"[",
":",
",",
"0",
"]",
">=",
"0",
")",
"<",
"1",
":",
"continue",
"pred",
"=",
"preds",
"[",
"self",
".",
"pred_idx",
"]",
"[",
"i",
"]",
".",
"asnumpy",
"(",
")",
"# calculate for each class",
"while",
"(",
"pred",
".",
"shape",
"[",
"0",
"]",
">",
"0",
")",
":",
"cid",
"=",
"int",
"(",
"pred",
"[",
"0",
",",
"0",
"]",
")",
"indices",
"=",
"np",
".",
"where",
"(",
"pred",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"cid",
")",
"[",
"0",
"]",
"if",
"cid",
"<",
"0",
":",
"pred",
"=",
"np",
".",
"delete",
"(",
"pred",
",",
"indices",
",",
"axis",
"=",
"0",
")",
"continue",
"dets",
"=",
"pred",
"[",
"indices",
"]",
"pred",
"=",
"np",
".",
"delete",
"(",
"pred",
",",
"indices",
",",
"axis",
"=",
"0",
")",
"# sort by score, desceding",
"dets",
"=",
"dets",
"[",
"dets",
"[",
":",
",",
"1",
"]",
".",
"argsort",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"]",
"records",
"=",
"np",
".",
"hstack",
"(",
"(",
"dets",
"[",
":",
",",
"1",
"]",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"np",
".",
"zeros",
"(",
"(",
"dets",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
")",
")",
"# ground-truths",
"label_indices",
"=",
"np",
".",
"where",
"(",
"label",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"cid",
")",
"[",
"0",
"]",
"gts",
"=",
"label",
"[",
"label_indices",
",",
":",
"]",
"label",
"=",
"np",
".",
"delete",
"(",
"label",
",",
"label_indices",
",",
"axis",
"=",
"0",
")",
"if",
"gts",
".",
"size",
">",
"0",
":",
"found",
"=",
"[",
"False",
"]",
"*",
"gts",
".",
"shape",
"[",
"0",
"]",
"for",
"j",
"in",
"range",
"(",
"dets",
".",
"shape",
"[",
"0",
"]",
")",
":",
"# compute overlaps",
"ious",
"=",
"iou",
"(",
"dets",
"[",
"j",
",",
"2",
":",
"]",
",",
"gts",
"[",
":",
",",
"1",
":",
"5",
"]",
")",
"ovargmax",
"=",
"np",
".",
"argmax",
"(",
"ious",
")",
"ovmax",
"=",
"ious",
"[",
"ovargmax",
"]",
"if",
"ovmax",
">",
"self",
".",
"ovp_thresh",
":",
"if",
"(",
"not",
"self",
".",
"use_difficult",
"and",
"gts",
".",
"shape",
"[",
"1",
"]",
">=",
"6",
"and",
"gts",
"[",
"ovargmax",
",",
"5",
"]",
">",
"0",
")",
":",
"pass",
"else",
":",
"if",
"not",
"found",
"[",
"ovargmax",
"]",
":",
"records",
"[",
"j",
",",
"-",
"1",
"]",
"=",
"1",
"# tp",
"found",
"[",
"ovargmax",
"]",
"=",
"True",
"else",
":",
"# duplicate",
"records",
"[",
"j",
",",
"-",
"1",
"]",
"=",
"2",
"# fp",
"else",
":",
"records",
"[",
"j",
",",
"-",
"1",
"]",
"=",
"2",
"# fp",
"else",
":",
"# no gt, mark all fp",
"records",
"[",
":",
",",
"-",
"1",
"]",
"=",
"2",
"# ground truth count",
"if",
"(",
"not",
"self",
".",
"use_difficult",
"and",
"gts",
".",
"shape",
"[",
"1",
"]",
">=",
"6",
")",
":",
"gt_count",
"=",
"np",
".",
"sum",
"(",
"gts",
"[",
":",
",",
"5",
"]",
"<",
"1",
")",
"else",
":",
"gt_count",
"=",
"gts",
".",
"shape",
"[",
"0",
"]",
"# now we push records to buffer",
"# first column: score, second column: tp/fp",
"# 0: not set(matched to difficult or something), 1: tp, 2: fp",
"records",
"=",
"records",
"[",
"np",
".",
"where",
"(",
"records",
"[",
":",
",",
"-",
"1",
"]",
">",
"0",
")",
"[",
"0",
"]",
",",
":",
"]",
"if",
"records",
".",
"size",
">",
"0",
":",
"self",
".",
"_insert",
"(",
"cid",
",",
"records",
",",
"gt_count",
")",
"# add missing class if not present in prediction",
"while",
"(",
"label",
".",
"shape",
"[",
"0",
"]",
">",
"0",
")",
":",
"cid",
"=",
"int",
"(",
"label",
"[",
"0",
",",
"0",
"]",
")",
"label_indices",
"=",
"np",
".",
"where",
"(",
"label",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"cid",
")",
"[",
"0",
"]",
"label",
"=",
"np",
".",
"delete",
"(",
"label",
",",
"label_indices",
",",
"axis",
"=",
"0",
")",
"if",
"cid",
"<",
"0",
":",
"continue",
"gt_count",
"=",
"label_indices",
".",
"size",
"self",
".",
"_insert",
"(",
"cid",
",",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"0",
"]",
"]",
")",
",",
"gt_count",
")"
] |
Update internal records. This function now only update internal buffer,
sum_metric and num_inst are updated in _update() function instead when
get() is called to return results.
Params:
----------
labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional
2-d array of ground-truths, n objects(id-xmin-ymin-xmax-ymax-[difficult])
preds: mx.nd.array (m * 6)
2-d array of detections, m objects(id-score-xmin-ymin-xmax-ymax)
|
[
"Update",
"internal",
"records",
".",
"This",
"function",
"now",
"only",
"update",
"internal",
"buffer",
"sum_metric",
"and",
"num_inst",
"are",
"updated",
"in",
"_update",
"()",
"function",
"instead",
"when",
"get",
"()",
"is",
"called",
"to",
"return",
"results",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_metric.py#L86-L195
|
train
|
apache/incubator-mxnet
|
example/ssd/evaluate/eval_metric.py
|
MApMetric._update
|
def _update(self):
""" update num_inst and sum_metric """
aps = []
for k, v in self.records.items():
recall, prec = self._recall_prec(v, self.counts[k])
ap = self._average_precision(recall, prec)
aps.append(ap)
if self.num is not None and k < (self.num - 1):
self.sum_metric[k] = ap
self.num_inst[k] = 1
if self.num is None:
self.num_inst = 1
self.sum_metric = np.mean(aps)
else:
self.num_inst[-1] = 1
self.sum_metric[-1] = np.mean(aps)
|
python
|
def _update(self):
""" update num_inst and sum_metric """
aps = []
for k, v in self.records.items():
recall, prec = self._recall_prec(v, self.counts[k])
ap = self._average_precision(recall, prec)
aps.append(ap)
if self.num is not None and k < (self.num - 1):
self.sum_metric[k] = ap
self.num_inst[k] = 1
if self.num is None:
self.num_inst = 1
self.sum_metric = np.mean(aps)
else:
self.num_inst[-1] = 1
self.sum_metric[-1] = np.mean(aps)
|
[
"def",
"_update",
"(",
"self",
")",
":",
"aps",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"self",
".",
"records",
".",
"items",
"(",
")",
":",
"recall",
",",
"prec",
"=",
"self",
".",
"_recall_prec",
"(",
"v",
",",
"self",
".",
"counts",
"[",
"k",
"]",
")",
"ap",
"=",
"self",
".",
"_average_precision",
"(",
"recall",
",",
"prec",
")",
"aps",
".",
"append",
"(",
"ap",
")",
"if",
"self",
".",
"num",
"is",
"not",
"None",
"and",
"k",
"<",
"(",
"self",
".",
"num",
"-",
"1",
")",
":",
"self",
".",
"sum_metric",
"[",
"k",
"]",
"=",
"ap",
"self",
".",
"num_inst",
"[",
"k",
"]",
"=",
"1",
"if",
"self",
".",
"num",
"is",
"None",
":",
"self",
".",
"num_inst",
"=",
"1",
"self",
".",
"sum_metric",
"=",
"np",
".",
"mean",
"(",
"aps",
")",
"else",
":",
"self",
".",
"num_inst",
"[",
"-",
"1",
"]",
"=",
"1",
"self",
".",
"sum_metric",
"[",
"-",
"1",
"]",
"=",
"np",
".",
"mean",
"(",
"aps",
")"
] |
update num_inst and sum_metric
|
[
"update",
"num_inst",
"and",
"sum_metric"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_metric.py#L197-L212
|
train
|
apache/incubator-mxnet
|
example/ssd/evaluate/eval_metric.py
|
MApMetric._recall_prec
|
def _recall_prec(self, record, count):
""" get recall and precision from internal records """
record = np.delete(record, np.where(record[:, 1].astype(int) == 0)[0], axis=0)
sorted_records = record[record[:,0].argsort()[::-1]]
tp = np.cumsum(sorted_records[:, 1].astype(int) == 1)
fp = np.cumsum(sorted_records[:, 1].astype(int) == 2)
if count <= 0:
recall = tp * 0.0
else:
recall = tp / float(count)
prec = tp.astype(float) / (tp + fp)
return recall, prec
|
python
|
def _recall_prec(self, record, count):
""" get recall and precision from internal records """
record = np.delete(record, np.where(record[:, 1].astype(int) == 0)[0], axis=0)
sorted_records = record[record[:,0].argsort()[::-1]]
tp = np.cumsum(sorted_records[:, 1].astype(int) == 1)
fp = np.cumsum(sorted_records[:, 1].astype(int) == 2)
if count <= 0:
recall = tp * 0.0
else:
recall = tp / float(count)
prec = tp.astype(float) / (tp + fp)
return recall, prec
|
[
"def",
"_recall_prec",
"(",
"self",
",",
"record",
",",
"count",
")",
":",
"record",
"=",
"np",
".",
"delete",
"(",
"record",
",",
"np",
".",
"where",
"(",
"record",
"[",
":",
",",
"1",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"0",
")",
"[",
"0",
"]",
",",
"axis",
"=",
"0",
")",
"sorted_records",
"=",
"record",
"[",
"record",
"[",
":",
",",
"0",
"]",
".",
"argsort",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"]",
"tp",
"=",
"np",
".",
"cumsum",
"(",
"sorted_records",
"[",
":",
",",
"1",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"1",
")",
"fp",
"=",
"np",
".",
"cumsum",
"(",
"sorted_records",
"[",
":",
",",
"1",
"]",
".",
"astype",
"(",
"int",
")",
"==",
"2",
")",
"if",
"count",
"<=",
"0",
":",
"recall",
"=",
"tp",
"*",
"0.0",
"else",
":",
"recall",
"=",
"tp",
"/",
"float",
"(",
"count",
")",
"prec",
"=",
"tp",
".",
"astype",
"(",
"float",
")",
"/",
"(",
"tp",
"+",
"fp",
")",
"return",
"recall",
",",
"prec"
] |
get recall and precision from internal records
|
[
"get",
"recall",
"and",
"precision",
"from",
"internal",
"records"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_metric.py#L214-L225
|
train
|
apache/incubator-mxnet
|
example/ssd/evaluate/eval_metric.py
|
MApMetric._average_precision
|
def _average_precision(self, rec, prec):
"""
calculate average precision
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
"""
# append sentinel values at both ends
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute precision integration ladder
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# look for recall value changes
i = np.where(mrec[1:] != mrec[:-1])[0]
# sum (\delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
|
python
|
def _average_precision(self, rec, prec):
"""
calculate average precision
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
"""
# append sentinel values at both ends
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute precision integration ladder
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# look for recall value changes
i = np.where(mrec[1:] != mrec[:-1])[0]
# sum (\delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
|
[
"def",
"_average_precision",
"(",
"self",
",",
"rec",
",",
"prec",
")",
":",
"# append sentinel values at both ends",
"mrec",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"0.",
"]",
",",
"rec",
",",
"[",
"1.",
"]",
")",
")",
"mpre",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"0.",
"]",
",",
"prec",
",",
"[",
"0.",
"]",
")",
")",
"# compute precision integration ladder",
"for",
"i",
"in",
"range",
"(",
"mpre",
".",
"size",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"mpre",
"[",
"i",
"-",
"1",
"]",
"=",
"np",
".",
"maximum",
"(",
"mpre",
"[",
"i",
"-",
"1",
"]",
",",
"mpre",
"[",
"i",
"]",
")",
"# look for recall value changes",
"i",
"=",
"np",
".",
"where",
"(",
"mrec",
"[",
"1",
":",
"]",
"!=",
"mrec",
"[",
":",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"# sum (\\delta recall) * prec",
"ap",
"=",
"np",
".",
"sum",
"(",
"(",
"mrec",
"[",
"i",
"+",
"1",
"]",
"-",
"mrec",
"[",
"i",
"]",
")",
"*",
"mpre",
"[",
"i",
"+",
"1",
"]",
")",
"return",
"ap"
] |
calculate average precision
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
|
[
"calculate",
"average",
"precision"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_metric.py#L227-L254
|
train
|
apache/incubator-mxnet
|
example/ssd/evaluate/eval_metric.py
|
MApMetric._insert
|
def _insert(self, key, records, count):
""" Insert records according to key """
if key not in self.records:
assert key not in self.counts
self.records[key] = records
self.counts[key] = count
else:
self.records[key] = np.vstack((self.records[key], records))
assert key in self.counts
self.counts[key] += count
|
python
|
def _insert(self, key, records, count):
""" Insert records according to key """
if key not in self.records:
assert key not in self.counts
self.records[key] = records
self.counts[key] = count
else:
self.records[key] = np.vstack((self.records[key], records))
assert key in self.counts
self.counts[key] += count
|
[
"def",
"_insert",
"(",
"self",
",",
"key",
",",
"records",
",",
"count",
")",
":",
"if",
"key",
"not",
"in",
"self",
".",
"records",
":",
"assert",
"key",
"not",
"in",
"self",
".",
"counts",
"self",
".",
"records",
"[",
"key",
"]",
"=",
"records",
"self",
".",
"counts",
"[",
"key",
"]",
"=",
"count",
"else",
":",
"self",
".",
"records",
"[",
"key",
"]",
"=",
"np",
".",
"vstack",
"(",
"(",
"self",
".",
"records",
"[",
"key",
"]",
",",
"records",
")",
")",
"assert",
"key",
"in",
"self",
".",
"counts",
"self",
".",
"counts",
"[",
"key",
"]",
"+=",
"count"
] |
Insert records according to key
|
[
"Insert",
"records",
"according",
"to",
"key"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_metric.py#L256-L265
|
train
|
apache/incubator-mxnet
|
example/ssd/evaluate/eval_metric.py
|
VOC07MApMetric._average_precision
|
def _average_precision(self, rec, prec):
"""
calculate average precision, override the default one,
special 11-point metric
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
"""
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap += p / 11.
return ap
|
python
|
def _average_precision(self, rec, prec):
"""
calculate average precision, override the default one,
special 11-point metric
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
"""
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap += p / 11.
return ap
|
[
"def",
"_average_precision",
"(",
"self",
",",
"rec",
",",
"prec",
")",
":",
"ap",
"=",
"0.",
"for",
"t",
"in",
"np",
".",
"arange",
"(",
"0.",
",",
"1.1",
",",
"0.1",
")",
":",
"if",
"np",
".",
"sum",
"(",
"rec",
">=",
"t",
")",
"==",
"0",
":",
"p",
"=",
"0",
"else",
":",
"p",
"=",
"np",
".",
"max",
"(",
"prec",
"[",
"rec",
">=",
"t",
"]",
")",
"ap",
"+=",
"p",
"/",
"11.",
"return",
"ap"
] |
calculate average precision, override the default one,
special 11-point metric
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
|
[
"calculate",
"average",
"precision",
"override",
"the",
"default",
"one",
"special",
"11",
"-",
"point",
"metric"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_metric.py#L273-L295
|
train
|
apache/incubator-mxnet
|
example/image-classification/fine-tune.py
|
get_fine_tune_model
|
def get_fine_tune_model(symbol, arg_params, num_classes, layer_name, dtype='float32'):
"""
symbol: the pre-trained network symbol
arg_params: the argument parameters of the pre-trained model
num_classes: the number of classes for the fine-tune datasets
layer_name: the layer name before the last fully-connected layer
"""
all_layers = symbol.get_internals()
net = all_layers[layer_name+'_output']
net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes, name='fc')
if dtype == 'float16':
net = mx.sym.Cast(data=net, dtype=np.float32)
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k})
return (net, new_args)
|
python
|
def get_fine_tune_model(symbol, arg_params, num_classes, layer_name, dtype='float32'):
"""
symbol: the pre-trained network symbol
arg_params: the argument parameters of the pre-trained model
num_classes: the number of classes for the fine-tune datasets
layer_name: the layer name before the last fully-connected layer
"""
all_layers = symbol.get_internals()
net = all_layers[layer_name+'_output']
net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes, name='fc')
if dtype == 'float16':
net = mx.sym.Cast(data=net, dtype=np.float32)
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k})
return (net, new_args)
|
[
"def",
"get_fine_tune_model",
"(",
"symbol",
",",
"arg_params",
",",
"num_classes",
",",
"layer_name",
",",
"dtype",
"=",
"'float32'",
")",
":",
"all_layers",
"=",
"symbol",
".",
"get_internals",
"(",
")",
"net",
"=",
"all_layers",
"[",
"layer_name",
"+",
"'_output'",
"]",
"net",
"=",
"mx",
".",
"symbol",
".",
"FullyConnected",
"(",
"data",
"=",
"net",
",",
"num_hidden",
"=",
"num_classes",
",",
"name",
"=",
"'fc'",
")",
"if",
"dtype",
"==",
"'float16'",
":",
"net",
"=",
"mx",
".",
"sym",
".",
"Cast",
"(",
"data",
"=",
"net",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"net",
"=",
"mx",
".",
"symbol",
".",
"SoftmaxOutput",
"(",
"data",
"=",
"net",
",",
"name",
"=",
"'softmax'",
")",
"new_args",
"=",
"dict",
"(",
"{",
"k",
":",
"arg_params",
"[",
"k",
"]",
"for",
"k",
"in",
"arg_params",
"if",
"'fc'",
"not",
"in",
"k",
"}",
")",
"return",
"(",
"net",
",",
"new_args",
")"
] |
symbol: the pre-trained network symbol
arg_params: the argument parameters of the pre-trained model
num_classes: the number of classes for the fine-tune datasets
layer_name: the layer name before the last fully-connected layer
|
[
"symbol",
":",
"the",
"pre",
"-",
"trained",
"network",
"symbol",
"arg_params",
":",
"the",
"argument",
"parameters",
"of",
"the",
"pre",
"-",
"trained",
"model",
"num_classes",
":",
"the",
"number",
"of",
"classes",
"for",
"the",
"fine",
"-",
"tune",
"datasets",
"layer_name",
":",
"the",
"layer",
"name",
"before",
"the",
"last",
"fully",
"-",
"connected",
"layer"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/image-classification/fine-tune.py#L28-L42
|
train
|
apache/incubator-mxnet
|
example/gluon/lipnet/data_loader.py
|
LipsDataset._list_images
|
def _list_images(self, root):
"""
Description : generate list for lip images
"""
self.labels = []
self.items = []
valid_unseen_sub_idx = [1, 2, 20, 22]
skip_sub_idx = [21]
if self._mode == 'train':
sub_idx = ['s' + str(i) for i in range(1, 35) \
if i not in valid_unseen_sub_idx + skip_sub_idx]
elif self._mode == 'valid':
sub_idx = ['s' + str(i) for i in valid_unseen_sub_idx]
folder_path = []
for i in sub_idx:
folder_path.extend(glob.glob(os.path.join(root, i, "*")))
for folder in folder_path:
filename = glob.glob(os.path.join(folder, "*"))
if len(filename) != self._seq_len:
continue
filename.sort()
label = os.path.split(folder)[-1]
self.items.append((filename, label))
|
python
|
def _list_images(self, root):
"""
Description : generate list for lip images
"""
self.labels = []
self.items = []
valid_unseen_sub_idx = [1, 2, 20, 22]
skip_sub_idx = [21]
if self._mode == 'train':
sub_idx = ['s' + str(i) for i in range(1, 35) \
if i not in valid_unseen_sub_idx + skip_sub_idx]
elif self._mode == 'valid':
sub_idx = ['s' + str(i) for i in valid_unseen_sub_idx]
folder_path = []
for i in sub_idx:
folder_path.extend(glob.glob(os.path.join(root, i, "*")))
for folder in folder_path:
filename = glob.glob(os.path.join(folder, "*"))
if len(filename) != self._seq_len:
continue
filename.sort()
label = os.path.split(folder)[-1]
self.items.append((filename, label))
|
[
"def",
"_list_images",
"(",
"self",
",",
"root",
")",
":",
"self",
".",
"labels",
"=",
"[",
"]",
"self",
".",
"items",
"=",
"[",
"]",
"valid_unseen_sub_idx",
"=",
"[",
"1",
",",
"2",
",",
"20",
",",
"22",
"]",
"skip_sub_idx",
"=",
"[",
"21",
"]",
"if",
"self",
".",
"_mode",
"==",
"'train'",
":",
"sub_idx",
"=",
"[",
"'s'",
"+",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"35",
")",
"if",
"i",
"not",
"in",
"valid_unseen_sub_idx",
"+",
"skip_sub_idx",
"]",
"elif",
"self",
".",
"_mode",
"==",
"'valid'",
":",
"sub_idx",
"=",
"[",
"'s'",
"+",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"valid_unseen_sub_idx",
"]",
"folder_path",
"=",
"[",
"]",
"for",
"i",
"in",
"sub_idx",
":",
"folder_path",
".",
"extend",
"(",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"i",
",",
"\"*\"",
")",
")",
")",
"for",
"folder",
"in",
"folder_path",
":",
"filename",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"\"*\"",
")",
")",
"if",
"len",
"(",
"filename",
")",
"!=",
"self",
".",
"_seq_len",
":",
"continue",
"filename",
".",
"sort",
"(",
")",
"label",
"=",
"os",
".",
"path",
".",
"split",
"(",
"folder",
")",
"[",
"-",
"1",
"]",
"self",
".",
"items",
".",
"append",
"(",
"(",
"filename",
",",
"label",
")",
")"
] |
Description : generate list for lip images
|
[
"Description",
":",
"generate",
"list",
"for",
"lip",
"images"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/data_loader.py#L45-L71
|
train
|
apache/incubator-mxnet
|
example/gluon/lipnet/data_loader.py
|
LipsDataset.align_generation
|
def align_generation(self, file_nm, padding=75):
"""
Description : Align to lip position
"""
align = Align(self._align_root + '/' + file_nm + '.align')
return nd.array(align.sentence(padding))
|
python
|
def align_generation(self, file_nm, padding=75):
"""
Description : Align to lip position
"""
align = Align(self._align_root + '/' + file_nm + '.align')
return nd.array(align.sentence(padding))
|
[
"def",
"align_generation",
"(",
"self",
",",
"file_nm",
",",
"padding",
"=",
"75",
")",
":",
"align",
"=",
"Align",
"(",
"self",
".",
"_align_root",
"+",
"'/'",
"+",
"file_nm",
"+",
"'.align'",
")",
"return",
"nd",
".",
"array",
"(",
"align",
".",
"sentence",
"(",
"padding",
")",
")"
] |
Description : Align to lip position
|
[
"Description",
":",
"Align",
"to",
"lip",
"position"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/data_loader.py#L73-L78
|
train
|
apache/incubator-mxnet
|
python/mxnet/initializer.py
|
Initializer.set_verbosity
|
def set_verbosity(self, verbose=False, print_func=None):
"""Switch on/off verbose mode
Parameters
----------
verbose : bool
switch on/off verbose mode
print_func : function
A function that computes statistics of initialized arrays.
Takes an `NDArray` and returns an `str`. Defaults to mean
absolute value str((abs(x)/size(x)).asscalar()).
"""
self._verbose = verbose
if print_func is None:
def asum_stat(x):
"""returns |x|/size(x), async execution."""
return str((ndarray.norm(x)/sqrt(x.size)).asscalar())
print_func = asum_stat
self._print_func = print_func
return self
|
python
|
def set_verbosity(self, verbose=False, print_func=None):
"""Switch on/off verbose mode
Parameters
----------
verbose : bool
switch on/off verbose mode
print_func : function
A function that computes statistics of initialized arrays.
Takes an `NDArray` and returns an `str`. Defaults to mean
absolute value str((abs(x)/size(x)).asscalar()).
"""
self._verbose = verbose
if print_func is None:
def asum_stat(x):
"""returns |x|/size(x), async execution."""
return str((ndarray.norm(x)/sqrt(x.size)).asscalar())
print_func = asum_stat
self._print_func = print_func
return self
|
[
"def",
"set_verbosity",
"(",
"self",
",",
"verbose",
"=",
"False",
",",
"print_func",
"=",
"None",
")",
":",
"self",
".",
"_verbose",
"=",
"verbose",
"if",
"print_func",
"is",
"None",
":",
"def",
"asum_stat",
"(",
"x",
")",
":",
"\"\"\"returns |x|/size(x), async execution.\"\"\"",
"return",
"str",
"(",
"(",
"ndarray",
".",
"norm",
"(",
"x",
")",
"/",
"sqrt",
"(",
"x",
".",
"size",
")",
")",
".",
"asscalar",
"(",
")",
")",
"print_func",
"=",
"asum_stat",
"self",
".",
"_print_func",
"=",
"print_func",
"return",
"self"
] |
Switch on/off verbose mode
Parameters
----------
verbose : bool
switch on/off verbose mode
print_func : function
A function that computes statistics of initialized arrays.
Takes an `NDArray` and returns an `str`. Defaults to mean
absolute value str((abs(x)/size(x)).asscalar()).
|
[
"Switch",
"on",
"/",
"off",
"verbose",
"mode"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/initializer.py#L61-L80
|
train
|
apache/incubator-mxnet
|
python/mxnet/initializer.py
|
Initializer._verbose_print
|
def _verbose_print(self, desc, init, arr):
"""Internal verbose print function
Parameters
----------
desc : InitDesc or str
name of the array
init : str
initializer pattern
arr : NDArray
initialized array
"""
if self._verbose and self._print_func:
logging.info('Initialized %s as %s: %s', desc, init, self._print_func(arr))
|
python
|
def _verbose_print(self, desc, init, arr):
"""Internal verbose print function
Parameters
----------
desc : InitDesc or str
name of the array
init : str
initializer pattern
arr : NDArray
initialized array
"""
if self._verbose and self._print_func:
logging.info('Initialized %s as %s: %s', desc, init, self._print_func(arr))
|
[
"def",
"_verbose_print",
"(",
"self",
",",
"desc",
",",
"init",
",",
"arr",
")",
":",
"if",
"self",
".",
"_verbose",
"and",
"self",
".",
"_print_func",
":",
"logging",
".",
"info",
"(",
"'Initialized %s as %s: %s'",
",",
"desc",
",",
"init",
",",
"self",
".",
"_print_func",
"(",
"arr",
")",
")"
] |
Internal verbose print function
Parameters
----------
desc : InitDesc or str
name of the array
init : str
initializer pattern
arr : NDArray
initialized array
|
[
"Internal",
"verbose",
"print",
"function"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/initializer.py#L82-L95
|
train
|
apache/incubator-mxnet
|
python/mxnet/initializer.py
|
Initializer._legacy_init
|
def _legacy_init(self, name, arr):
"""Legacy initialization method.
Parameters
----------
name : str
Name of corresponding NDArray.
arr : NDArray
NDArray to be initialized.
"""
warnings.warn(
"\033[91mCalling initializer with init(str, NDArray) has been deprecated." \
"please use init(mx.init.InitDesc(...), NDArray) instead.\033[0m",
DeprecationWarning, stacklevel=3)
if not isinstance(name, string_types):
raise TypeError('name must be string')
if not isinstance(arr, NDArray):
raise TypeError('arr must be NDArray')
if name.startswith('upsampling'):
self._init_bilinear(name, arr)
elif name.startswith('stn_loc') and name.endswith('weight'):
self._init_zero(name, arr)
elif name.startswith('stn_loc') and name.endswith('bias'):
self._init_loc_bias(name, arr)
elif name.endswith('bias'):
self._init_bias(name, arr)
elif name.endswith('gamma'):
self._init_gamma(name, arr)
elif name.endswith('beta'):
self._init_beta(name, arr)
elif name.endswith('weight'):
self._init_weight(name, arr)
elif name.endswith("moving_mean"):
self._init_zero(name, arr)
elif name.endswith("moving_var"):
self._init_one(name, arr)
elif name.endswith("moving_inv_var"):
self._init_zero(name, arr)
elif name.endswith("moving_avg"):
self._init_zero(name, arr)
elif name.endswith('min'):
self._init_zero(name, arr)
elif name.endswith('max'):
self._init_one(name, arr)
else:
self._init_default(name, arr)
|
python
|
def _legacy_init(self, name, arr):
"""Legacy initialization method.
Parameters
----------
name : str
Name of corresponding NDArray.
arr : NDArray
NDArray to be initialized.
"""
warnings.warn(
"\033[91mCalling initializer with init(str, NDArray) has been deprecated." \
"please use init(mx.init.InitDesc(...), NDArray) instead.\033[0m",
DeprecationWarning, stacklevel=3)
if not isinstance(name, string_types):
raise TypeError('name must be string')
if not isinstance(arr, NDArray):
raise TypeError('arr must be NDArray')
if name.startswith('upsampling'):
self._init_bilinear(name, arr)
elif name.startswith('stn_loc') and name.endswith('weight'):
self._init_zero(name, arr)
elif name.startswith('stn_loc') and name.endswith('bias'):
self._init_loc_bias(name, arr)
elif name.endswith('bias'):
self._init_bias(name, arr)
elif name.endswith('gamma'):
self._init_gamma(name, arr)
elif name.endswith('beta'):
self._init_beta(name, arr)
elif name.endswith('weight'):
self._init_weight(name, arr)
elif name.endswith("moving_mean"):
self._init_zero(name, arr)
elif name.endswith("moving_var"):
self._init_one(name, arr)
elif name.endswith("moving_inv_var"):
self._init_zero(name, arr)
elif name.endswith("moving_avg"):
self._init_zero(name, arr)
elif name.endswith('min'):
self._init_zero(name, arr)
elif name.endswith('max'):
self._init_one(name, arr)
else:
self._init_default(name, arr)
|
[
"def",
"_legacy_init",
"(",
"self",
",",
"name",
",",
"arr",
")",
":",
"warnings",
".",
"warn",
"(",
"\"\\033[91mCalling initializer with init(str, NDArray) has been deprecated.\"",
"\"please use init(mx.init.InitDesc(...), NDArray) instead.\\033[0m\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"3",
")",
"if",
"not",
"isinstance",
"(",
"name",
",",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'name must be string'",
")",
"if",
"not",
"isinstance",
"(",
"arr",
",",
"NDArray",
")",
":",
"raise",
"TypeError",
"(",
"'arr must be NDArray'",
")",
"if",
"name",
".",
"startswith",
"(",
"'upsampling'",
")",
":",
"self",
".",
"_init_bilinear",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"startswith",
"(",
"'stn_loc'",
")",
"and",
"name",
".",
"endswith",
"(",
"'weight'",
")",
":",
"self",
".",
"_init_zero",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"startswith",
"(",
"'stn_loc'",
")",
"and",
"name",
".",
"endswith",
"(",
"'bias'",
")",
":",
"self",
".",
"_init_loc_bias",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'bias'",
")",
":",
"self",
".",
"_init_bias",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'gamma'",
")",
":",
"self",
".",
"_init_gamma",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'beta'",
")",
":",
"self",
".",
"_init_beta",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'weight'",
")",
":",
"self",
".",
"_init_weight",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"\"moving_mean\"",
")",
":",
"self",
".",
"_init_zero",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"\"moving_var\"",
")",
":",
"self",
".",
"_init_one",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"\"moving_inv_var\"",
")",
":",
"self",
".",
"_init_zero",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"\"moving_avg\"",
")",
":",
"self",
".",
"_init_zero",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'min'",
")",
":",
"self",
".",
"_init_zero",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'max'",
")",
":",
"self",
".",
"_init_one",
"(",
"name",
",",
"arr",
")",
"else",
":",
"self",
".",
"_init_default",
"(",
"name",
",",
"arr",
")"
] |
Legacy initialization method.
Parameters
----------
name : str
Name of corresponding NDArray.
arr : NDArray
NDArray to be initialized.
|
[
"Legacy",
"initialization",
"method",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/initializer.py#L171-L217
|
train
|
apache/incubator-mxnet
|
example/ssd/dataset/imdb.py
|
Imdb.save_imglist
|
def save_imglist(self, fname=None, root=None, shuffle=False):
"""
save imglist to disk
Parameters:
----------
fname : str
saved filename
"""
def progress_bar(count, total, suffix=''):
import sys
bar_len = 24
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
sys.stdout.flush()
str_list = []
for index in range(self.num_images):
progress_bar(index, self.num_images)
label = self.label_from_index(index)
if label.size < 1:
continue
path = self.image_path_from_index(index)
if root:
path = osp.relpath(path, root)
str_list.append('\t'.join([str(index), str(2), str(label.shape[1])] \
+ ["{0:.4f}".format(x) for x in label.ravel()] + [path,]) + '\n')
if str_list:
if shuffle:
import random
random.shuffle(str_list)
if not fname:
fname = self.name + '.lst'
with open(fname, 'w') as f:
for line in str_list:
f.write(line)
else:
raise RuntimeError("No image in imdb")
|
python
|
def save_imglist(self, fname=None, root=None, shuffle=False):
"""
save imglist to disk
Parameters:
----------
fname : str
saved filename
"""
def progress_bar(count, total, suffix=''):
import sys
bar_len = 24
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
sys.stdout.flush()
str_list = []
for index in range(self.num_images):
progress_bar(index, self.num_images)
label = self.label_from_index(index)
if label.size < 1:
continue
path = self.image_path_from_index(index)
if root:
path = osp.relpath(path, root)
str_list.append('\t'.join([str(index), str(2), str(label.shape[1])] \
+ ["{0:.4f}".format(x) for x in label.ravel()] + [path,]) + '\n')
if str_list:
if shuffle:
import random
random.shuffle(str_list)
if not fname:
fname = self.name + '.lst'
with open(fname, 'w') as f:
for line in str_list:
f.write(line)
else:
raise RuntimeError("No image in imdb")
|
[
"def",
"save_imglist",
"(",
"self",
",",
"fname",
"=",
"None",
",",
"root",
"=",
"None",
",",
"shuffle",
"=",
"False",
")",
":",
"def",
"progress_bar",
"(",
"count",
",",
"total",
",",
"suffix",
"=",
"''",
")",
":",
"import",
"sys",
"bar_len",
"=",
"24",
"filled_len",
"=",
"int",
"(",
"round",
"(",
"bar_len",
"*",
"count",
"/",
"float",
"(",
"total",
")",
")",
")",
"percents",
"=",
"round",
"(",
"100.0",
"*",
"count",
"/",
"float",
"(",
"total",
")",
",",
"1",
")",
"bar",
"=",
"'='",
"*",
"filled_len",
"+",
"'-'",
"*",
"(",
"bar_len",
"-",
"filled_len",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'[%s] %s%s ...%s\\r'",
"%",
"(",
"bar",
",",
"percents",
",",
"'%'",
",",
"suffix",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"str_list",
"=",
"[",
"]",
"for",
"index",
"in",
"range",
"(",
"self",
".",
"num_images",
")",
":",
"progress_bar",
"(",
"index",
",",
"self",
".",
"num_images",
")",
"label",
"=",
"self",
".",
"label_from_index",
"(",
"index",
")",
"if",
"label",
".",
"size",
"<",
"1",
":",
"continue",
"path",
"=",
"self",
".",
"image_path_from_index",
"(",
"index",
")",
"if",
"root",
":",
"path",
"=",
"osp",
".",
"relpath",
"(",
"path",
",",
"root",
")",
"str_list",
".",
"append",
"(",
"'\\t'",
".",
"join",
"(",
"[",
"str",
"(",
"index",
")",
",",
"str",
"(",
"2",
")",
",",
"str",
"(",
"label",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"+",
"[",
"\"{0:.4f}\"",
".",
"format",
"(",
"x",
")",
"for",
"x",
"in",
"label",
".",
"ravel",
"(",
")",
"]",
"+",
"[",
"path",
",",
"]",
")",
"+",
"'\\n'",
")",
"if",
"str_list",
":",
"if",
"shuffle",
":",
"import",
"random",
"random",
".",
"shuffle",
"(",
"str_list",
")",
"if",
"not",
"fname",
":",
"fname",
"=",
"self",
".",
"name",
"+",
"'.lst'",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"str_list",
":",
"f",
".",
"write",
"(",
"line",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No image in imdb\"",
")"
] |
save imglist to disk
Parameters:
----------
fname : str
saved filename
|
[
"save",
"imglist",
"to",
"disk"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/dataset/imdb.py#L70-L110
|
train
|
apache/incubator-mxnet
|
example/ssd/dataset/imdb.py
|
Imdb._load_class_names
|
def _load_class_names(self, filename, dirname):
"""
load class names from text file
Parameters:
----------
filename: str
file stores class names
dirname: str
file directory
"""
full_path = osp.join(dirname, filename)
classes = []
with open(full_path, 'r') as f:
classes = [l.strip() for l in f.readlines()]
return classes
|
python
|
def _load_class_names(self, filename, dirname):
"""
load class names from text file
Parameters:
----------
filename: str
file stores class names
dirname: str
file directory
"""
full_path = osp.join(dirname, filename)
classes = []
with open(full_path, 'r') as f:
classes = [l.strip() for l in f.readlines()]
return classes
|
[
"def",
"_load_class_names",
"(",
"self",
",",
"filename",
",",
"dirname",
")",
":",
"full_path",
"=",
"osp",
".",
"join",
"(",
"dirname",
",",
"filename",
")",
"classes",
"=",
"[",
"]",
"with",
"open",
"(",
"full_path",
",",
"'r'",
")",
"as",
"f",
":",
"classes",
"=",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
"return",
"classes"
] |
load class names from text file
Parameters:
----------
filename: str
file stores class names
dirname: str
file directory
|
[
"load",
"class",
"names",
"from",
"text",
"file"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/dataset/imdb.py#L112-L127
|
train
|
apache/incubator-mxnet
|
example/image-classification/train_mnist.py
|
read_data
|
def read_data(label, image):
"""
download and read data into numpy
"""
base_url = 'http://yann.lecun.com/exdb/mnist/'
with gzip.open(download_file(base_url+label, os.path.join('data',label))) as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
label = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(download_file(base_url+image, os.path.join('data',image)), 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
return (label, image)
|
python
|
def read_data(label, image):
"""
download and read data into numpy
"""
base_url = 'http://yann.lecun.com/exdb/mnist/'
with gzip.open(download_file(base_url+label, os.path.join('data',label))) as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
label = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(download_file(base_url+image, os.path.join('data',image)), 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
return (label, image)
|
[
"def",
"read_data",
"(",
"label",
",",
"image",
")",
":",
"base_url",
"=",
"'http://yann.lecun.com/exdb/mnist/'",
"with",
"gzip",
".",
"open",
"(",
"download_file",
"(",
"base_url",
"+",
"label",
",",
"os",
".",
"path",
".",
"join",
"(",
"'data'",
",",
"label",
")",
")",
")",
"as",
"flbl",
":",
"magic",
",",
"num",
"=",
"struct",
".",
"unpack",
"(",
"\">II\"",
",",
"flbl",
".",
"read",
"(",
"8",
")",
")",
"label",
"=",
"np",
".",
"fromstring",
"(",
"flbl",
".",
"read",
"(",
")",
",",
"dtype",
"=",
"np",
".",
"int8",
")",
"with",
"gzip",
".",
"open",
"(",
"download_file",
"(",
"base_url",
"+",
"image",
",",
"os",
".",
"path",
".",
"join",
"(",
"'data'",
",",
"image",
")",
")",
",",
"'rb'",
")",
"as",
"fimg",
":",
"magic",
",",
"num",
",",
"rows",
",",
"cols",
"=",
"struct",
".",
"unpack",
"(",
"\">IIII\"",
",",
"fimg",
".",
"read",
"(",
"16",
")",
")",
"image",
"=",
"np",
".",
"fromstring",
"(",
"fimg",
".",
"read",
"(",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
".",
"reshape",
"(",
"len",
"(",
"label",
")",
",",
"rows",
",",
"cols",
")",
"return",
"(",
"label",
",",
"image",
")"
] |
download and read data into numpy
|
[
"download",
"and",
"read",
"data",
"into",
"numpy"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/image-classification/train_mnist.py#L31-L42
|
train
|
apache/incubator-mxnet
|
example/image-classification/train_mnist.py
|
get_mnist_iter
|
def get_mnist_iter(args, kv):
"""
create data iterator with NDArrayIter
"""
(train_lbl, train_img) = read_data(
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz')
(val_lbl, val_img) = read_data(
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz')
train = mx.io.NDArrayIter(
to4d(train_img), train_lbl, args.batch_size, shuffle=True)
val = mx.io.NDArrayIter(
to4d(val_img), val_lbl, args.batch_size)
return (train, val)
|
python
|
def get_mnist_iter(args, kv):
"""
create data iterator with NDArrayIter
"""
(train_lbl, train_img) = read_data(
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz')
(val_lbl, val_img) = read_data(
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz')
train = mx.io.NDArrayIter(
to4d(train_img), train_lbl, args.batch_size, shuffle=True)
val = mx.io.NDArrayIter(
to4d(val_img), val_lbl, args.batch_size)
return (train, val)
|
[
"def",
"get_mnist_iter",
"(",
"args",
",",
"kv",
")",
":",
"(",
"train_lbl",
",",
"train_img",
")",
"=",
"read_data",
"(",
"'train-labels-idx1-ubyte.gz'",
",",
"'train-images-idx3-ubyte.gz'",
")",
"(",
"val_lbl",
",",
"val_img",
")",
"=",
"read_data",
"(",
"'t10k-labels-idx1-ubyte.gz'",
",",
"'t10k-images-idx3-ubyte.gz'",
")",
"train",
"=",
"mx",
".",
"io",
".",
"NDArrayIter",
"(",
"to4d",
"(",
"train_img",
")",
",",
"train_lbl",
",",
"args",
".",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
"val",
"=",
"mx",
".",
"io",
".",
"NDArrayIter",
"(",
"to4d",
"(",
"val_img",
")",
",",
"val_lbl",
",",
"args",
".",
"batch_size",
")",
"return",
"(",
"train",
",",
"val",
")"
] |
create data iterator with NDArrayIter
|
[
"create",
"data",
"iterator",
"with",
"NDArrayIter"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/image-classification/train_mnist.py#L51-L63
|
train
|
apache/incubator-mxnet
|
example/fcn-xs/image_segmentaion.py
|
make_file_extension_assertion
|
def make_file_extension_assertion(extension):
"""Function factory for file extension argparse assertion
Args:
extension (string): the file extension to assert
Returns:
string: the supplied extension, if assertion is successful.
"""
def file_extension_assertion(file_path):
base, ext = os.path.splitext(file_path)
if ext.lower() != extension:
raise argparse.ArgumentTypeError('File must have ' + extension + ' extension')
return file_path
return file_extension_assertion
|
python
|
def make_file_extension_assertion(extension):
"""Function factory for file extension argparse assertion
Args:
extension (string): the file extension to assert
Returns:
string: the supplied extension, if assertion is successful.
"""
def file_extension_assertion(file_path):
base, ext = os.path.splitext(file_path)
if ext.lower() != extension:
raise argparse.ArgumentTypeError('File must have ' + extension + ' extension')
return file_path
return file_extension_assertion
|
[
"def",
"make_file_extension_assertion",
"(",
"extension",
")",
":",
"def",
"file_extension_assertion",
"(",
"file_path",
")",
":",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_path",
")",
"if",
"ext",
".",
"lower",
"(",
")",
"!=",
"extension",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"'File must have '",
"+",
"extension",
"+",
"' extension'",
")",
"return",
"file_path",
"return",
"file_extension_assertion"
] |
Function factory for file extension argparse assertion
Args:
extension (string): the file extension to assert
Returns:
string: the supplied extension, if assertion is successful.
|
[
"Function",
"factory",
"for",
"file",
"extension",
"argparse",
"assertion",
"Args",
":",
"extension",
"(",
"string",
")",
":",
"the",
"file",
"extension",
"to",
"assert"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/fcn-xs/image_segmentaion.py#L31-L45
|
train
|
apache/incubator-mxnet
|
example/fcn-xs/image_segmentaion.py
|
get_palette
|
def get_palette(num_colors=256):
"""generates the colormap for visualizing the segmentation mask
Args:
num_colors (int): the number of colors to generate in the output palette
Returns:
string: the supplied extension, if assertion is successful.
"""
pallete = [0]*(num_colors*3)
for j in range(0, num_colors):
lab = j
pallete[j*3+0] = 0
pallete[j*3+1] = 0
pallete[j*3+2] = 0
i = 0
while (lab > 0):
pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i))
pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i))
pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i))
i = i + 1
lab >>= 3
return pallete
|
python
|
def get_palette(num_colors=256):
"""generates the colormap for visualizing the segmentation mask
Args:
num_colors (int): the number of colors to generate in the output palette
Returns:
string: the supplied extension, if assertion is successful.
"""
pallete = [0]*(num_colors*3)
for j in range(0, num_colors):
lab = j
pallete[j*3+0] = 0
pallete[j*3+1] = 0
pallete[j*3+2] = 0
i = 0
while (lab > 0):
pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i))
pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i))
pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i))
i = i + 1
lab >>= 3
return pallete
|
[
"def",
"get_palette",
"(",
"num_colors",
"=",
"256",
")",
":",
"pallete",
"=",
"[",
"0",
"]",
"*",
"(",
"num_colors",
"*",
"3",
")",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"num_colors",
")",
":",
"lab",
"=",
"j",
"pallete",
"[",
"j",
"*",
"3",
"+",
"0",
"]",
"=",
"0",
"pallete",
"[",
"j",
"*",
"3",
"+",
"1",
"]",
"=",
"0",
"pallete",
"[",
"j",
"*",
"3",
"+",
"2",
"]",
"=",
"0",
"i",
"=",
"0",
"while",
"(",
"lab",
">",
"0",
")",
":",
"pallete",
"[",
"j",
"*",
"3",
"+",
"0",
"]",
"|=",
"(",
"(",
"(",
"lab",
">>",
"0",
")",
"&",
"1",
")",
"<<",
"(",
"7",
"-",
"i",
")",
")",
"pallete",
"[",
"j",
"*",
"3",
"+",
"1",
"]",
"|=",
"(",
"(",
"(",
"lab",
">>",
"1",
")",
"&",
"1",
")",
"<<",
"(",
"7",
"-",
"i",
")",
")",
"pallete",
"[",
"j",
"*",
"3",
"+",
"2",
"]",
"|=",
"(",
"(",
"(",
"lab",
">>",
"2",
")",
"&",
"1",
")",
"<<",
"(",
"7",
"-",
"i",
")",
")",
"i",
"=",
"i",
"+",
"1",
"lab",
">>=",
"3",
"return",
"pallete"
] |
generates the colormap for visualizing the segmentation mask
Args:
num_colors (int): the number of colors to generate in the output palette
Returns:
string: the supplied extension, if assertion is successful.
|
[
"generates",
"the",
"colormap",
"for",
"visualizing",
"the",
"segmentation",
"mask",
"Args",
":",
"num_colors",
"(",
"int",
")",
":",
"the",
"number",
"of",
"colors",
"to",
"generate",
"in",
"the",
"output",
"palette"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/fcn-xs/image_segmentaion.py#L47-L69
|
train
|
apache/incubator-mxnet
|
example/fcn-xs/image_segmentaion.py
|
get_data
|
def get_data(img_path):
"""get the (1, 3, h, w) np.array data for the supplied image
Args:
img_path (string): the input image path
Returns:
np.array: image data in a (1, 3, h, w) shape
"""
mean = np.array([123.68, 116.779, 103.939]) # (R,G,B)
img = Image.open(img_path)
img = np.array(img, dtype=np.float32)
reshaped_mean = mean.reshape(1, 1, 3)
img = img - reshaped_mean
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
img = np.expand_dims(img, axis=0)
return img
|
python
|
def get_data(img_path):
"""get the (1, 3, h, w) np.array data for the supplied image
Args:
img_path (string): the input image path
Returns:
np.array: image data in a (1, 3, h, w) shape
"""
mean = np.array([123.68, 116.779, 103.939]) # (R,G,B)
img = Image.open(img_path)
img = np.array(img, dtype=np.float32)
reshaped_mean = mean.reshape(1, 1, 3)
img = img - reshaped_mean
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
img = np.expand_dims(img, axis=0)
return img
|
[
"def",
"get_data",
"(",
"img_path",
")",
":",
"mean",
"=",
"np",
".",
"array",
"(",
"[",
"123.68",
",",
"116.779",
",",
"103.939",
"]",
")",
"# (R,G,B)",
"img",
"=",
"Image",
".",
"open",
"(",
"img_path",
")",
"img",
"=",
"np",
".",
"array",
"(",
"img",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"reshaped_mean",
"=",
"mean",
".",
"reshape",
"(",
"1",
",",
"1",
",",
"3",
")",
"img",
"=",
"img",
"-",
"reshaped_mean",
"img",
"=",
"np",
".",
"swapaxes",
"(",
"img",
",",
"0",
",",
"2",
")",
"img",
"=",
"np",
".",
"swapaxes",
"(",
"img",
",",
"1",
",",
"2",
")",
"img",
"=",
"np",
".",
"expand_dims",
"(",
"img",
",",
"axis",
"=",
"0",
")",
"return",
"img"
] |
get the (1, 3, h, w) np.array data for the supplied image
Args:
img_path (string): the input image path
Returns:
np.array: image data in a (1, 3, h, w) shape
|
[
"get",
"the",
"(",
"1",
"3",
"h",
"w",
")",
"np",
".",
"array",
"data",
"for",
"the",
"supplied",
"image",
"Args",
":",
"img_path",
"(",
"string",
")",
":",
"the",
"input",
"image",
"path"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/fcn-xs/image_segmentaion.py#L71-L88
|
train
|
apache/incubator-mxnet
|
example/fcn-xs/image_segmentaion.py
|
main
|
def main():
"""Module main execution"""
# Initialization variables - update to change your model and execution context
model_prefix = "FCN8s_VGG16"
epoch = 19
# By default, MXNet will run on the CPU. Change to ctx = mx.gpu() to run on GPU.
ctx = mx.cpu()
fcnxs, fcnxs_args, fcnxs_auxs = mx.model.load_checkpoint(model_prefix, epoch)
fcnxs_args["data"] = mx.nd.array(get_data(args.input), ctx)
data_shape = fcnxs_args["data"].shape
label_shape = (1, data_shape[2]*data_shape[3])
fcnxs_args["softmax_label"] = mx.nd.empty(label_shape, ctx)
exector = fcnxs.bind(ctx, fcnxs_args, args_grad=None, grad_req="null", aux_states=fcnxs_args)
exector.forward(is_train=False)
output = exector.outputs[0]
out_img = np.uint8(np.squeeze(output.asnumpy().argmax(axis=1)))
out_img = Image.fromarray(out_img)
out_img.putpalette(get_palette())
out_img.save(args.output)
|
python
|
def main():
"""Module main execution"""
# Initialization variables - update to change your model and execution context
model_prefix = "FCN8s_VGG16"
epoch = 19
# By default, MXNet will run on the CPU. Change to ctx = mx.gpu() to run on GPU.
ctx = mx.cpu()
fcnxs, fcnxs_args, fcnxs_auxs = mx.model.load_checkpoint(model_prefix, epoch)
fcnxs_args["data"] = mx.nd.array(get_data(args.input), ctx)
data_shape = fcnxs_args["data"].shape
label_shape = (1, data_shape[2]*data_shape[3])
fcnxs_args["softmax_label"] = mx.nd.empty(label_shape, ctx)
exector = fcnxs.bind(ctx, fcnxs_args, args_grad=None, grad_req="null", aux_states=fcnxs_args)
exector.forward(is_train=False)
output = exector.outputs[0]
out_img = np.uint8(np.squeeze(output.asnumpy().argmax(axis=1)))
out_img = Image.fromarray(out_img)
out_img.putpalette(get_palette())
out_img.save(args.output)
|
[
"def",
"main",
"(",
")",
":",
"# Initialization variables - update to change your model and execution context",
"model_prefix",
"=",
"\"FCN8s_VGG16\"",
"epoch",
"=",
"19",
"# By default, MXNet will run on the CPU. Change to ctx = mx.gpu() to run on GPU.",
"ctx",
"=",
"mx",
".",
"cpu",
"(",
")",
"fcnxs",
",",
"fcnxs_args",
",",
"fcnxs_auxs",
"=",
"mx",
".",
"model",
".",
"load_checkpoint",
"(",
"model_prefix",
",",
"epoch",
")",
"fcnxs_args",
"[",
"\"data\"",
"]",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"get_data",
"(",
"args",
".",
"input",
")",
",",
"ctx",
")",
"data_shape",
"=",
"fcnxs_args",
"[",
"\"data\"",
"]",
".",
"shape",
"label_shape",
"=",
"(",
"1",
",",
"data_shape",
"[",
"2",
"]",
"*",
"data_shape",
"[",
"3",
"]",
")",
"fcnxs_args",
"[",
"\"softmax_label\"",
"]",
"=",
"mx",
".",
"nd",
".",
"empty",
"(",
"label_shape",
",",
"ctx",
")",
"exector",
"=",
"fcnxs",
".",
"bind",
"(",
"ctx",
",",
"fcnxs_args",
",",
"args_grad",
"=",
"None",
",",
"grad_req",
"=",
"\"null\"",
",",
"aux_states",
"=",
"fcnxs_args",
")",
"exector",
".",
"forward",
"(",
"is_train",
"=",
"False",
")",
"output",
"=",
"exector",
".",
"outputs",
"[",
"0",
"]",
"out_img",
"=",
"np",
".",
"uint8",
"(",
"np",
".",
"squeeze",
"(",
"output",
".",
"asnumpy",
"(",
")",
".",
"argmax",
"(",
"axis",
"=",
"1",
")",
")",
")",
"out_img",
"=",
"Image",
".",
"fromarray",
"(",
"out_img",
")",
"out_img",
".",
"putpalette",
"(",
"get_palette",
"(",
")",
")",
"out_img",
".",
"save",
"(",
"args",
".",
"output",
")"
] |
Module main execution
|
[
"Module",
"main",
"execution"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/fcn-xs/image_segmentaion.py#L90-L110
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.