repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
apache/incubator-mxnet
|
python/mxnet/io/io.py
|
NDArrayIter.hard_reset
|
def hard_reset(self):
"""Ignore roll over data and set to start."""
if self.shuffle:
self._shuffle_data()
self.cursor = -self.batch_size
self._cache_data = None
self._cache_label = None
|
python
|
def hard_reset(self):
"""Ignore roll over data and set to start."""
if self.shuffle:
self._shuffle_data()
self.cursor = -self.batch_size
self._cache_data = None
self._cache_label = None
|
[
"def",
"hard_reset",
"(",
"self",
")",
":",
"if",
"self",
".",
"shuffle",
":",
"self",
".",
"_shuffle_data",
"(",
")",
"self",
".",
"cursor",
"=",
"-",
"self",
".",
"batch_size",
"self",
".",
"_cache_data",
"=",
"None",
"self",
".",
"_cache_label",
"=",
"None"
] |
Ignore roll over data and set to start.
|
[
"Ignore",
"roll",
"over",
"data",
"and",
"set",
"to",
"start",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L650-L656
|
train
|
apache/incubator-mxnet
|
python/mxnet/io/io.py
|
NDArrayIter.reset
|
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.shuffle:
self._shuffle_data()
# the range below indicate the last batch
if self.last_batch_handle == 'roll_over' and \
self.num_data - self.batch_size < self.cursor < self.num_data:
# (self.cursor - self.num_data) represents the data we have for the last batch
self.cursor = self.cursor - self.num_data - self.batch_size
else:
self.cursor = -self.batch_size
|
python
|
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.shuffle:
self._shuffle_data()
# the range below indicate the last batch
if self.last_batch_handle == 'roll_over' and \
self.num_data - self.batch_size < self.cursor < self.num_data:
# (self.cursor - self.num_data) represents the data we have for the last batch
self.cursor = self.cursor - self.num_data - self.batch_size
else:
self.cursor = -self.batch_size
|
[
"def",
"reset",
"(",
"self",
")",
":",
"if",
"self",
".",
"shuffle",
":",
"self",
".",
"_shuffle_data",
"(",
")",
"# the range below indicate the last batch",
"if",
"self",
".",
"last_batch_handle",
"==",
"'roll_over'",
"and",
"self",
".",
"num_data",
"-",
"self",
".",
"batch_size",
"<",
"self",
".",
"cursor",
"<",
"self",
".",
"num_data",
":",
"# (self.cursor - self.num_data) represents the data we have for the last batch",
"self",
".",
"cursor",
"=",
"self",
".",
"cursor",
"-",
"self",
".",
"num_data",
"-",
"self",
".",
"batch_size",
"else",
":",
"self",
".",
"cursor",
"=",
"-",
"self",
".",
"batch_size"
] |
Resets the iterator to the beginning of the data.
|
[
"Resets",
"the",
"iterator",
"to",
"the",
"beginning",
"of",
"the",
"data",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L658-L668
|
train
|
apache/incubator-mxnet
|
python/mxnet/io/io.py
|
NDArrayIter.iter_next
|
def iter_next(self):
"""Increments the coursor by batch_size for next batch
and check current cursor if it exceed the number of data points."""
self.cursor += self.batch_size
return self.cursor < self.num_data
|
python
|
def iter_next(self):
"""Increments the coursor by batch_size for next batch
and check current cursor if it exceed the number of data points."""
self.cursor += self.batch_size
return self.cursor < self.num_data
|
[
"def",
"iter_next",
"(",
"self",
")",
":",
"self",
".",
"cursor",
"+=",
"self",
".",
"batch_size",
"return",
"self",
".",
"cursor",
"<",
"self",
".",
"num_data"
] |
Increments the coursor by batch_size for next batch
and check current cursor if it exceed the number of data points.
|
[
"Increments",
"the",
"coursor",
"by",
"batch_size",
"for",
"next",
"batch",
"and",
"check",
"current",
"cursor",
"if",
"it",
"exceed",
"the",
"number",
"of",
"data",
"points",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L670-L674
|
train
|
apache/incubator-mxnet
|
python/mxnet/io/io.py
|
NDArrayIter.next
|
def next(self):
"""Returns the next batch of data."""
if not self.iter_next():
raise StopIteration
data = self.getdata()
label = self.getlabel()
# iter should stop when last batch is not complete
if data[0].shape[0] != self.batch_size:
# in this case, cache it for next epoch
self._cache_data = data
self._cache_label = label
raise StopIteration
return DataBatch(data=data, label=label, \
pad=self.getpad(), index=None)
|
python
|
def next(self):
"""Returns the next batch of data."""
if not self.iter_next():
raise StopIteration
data = self.getdata()
label = self.getlabel()
# iter should stop when last batch is not complete
if data[0].shape[0] != self.batch_size:
# in this case, cache it for next epoch
self._cache_data = data
self._cache_label = label
raise StopIteration
return DataBatch(data=data, label=label, \
pad=self.getpad(), index=None)
|
[
"def",
"next",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"iter_next",
"(",
")",
":",
"raise",
"StopIteration",
"data",
"=",
"self",
".",
"getdata",
"(",
")",
"label",
"=",
"self",
".",
"getlabel",
"(",
")",
"# iter should stop when last batch is not complete",
"if",
"data",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
"!=",
"self",
".",
"batch_size",
":",
"# in this case, cache it for next epoch",
"self",
".",
"_cache_data",
"=",
"data",
"self",
".",
"_cache_label",
"=",
"label",
"raise",
"StopIteration",
"return",
"DataBatch",
"(",
"data",
"=",
"data",
",",
"label",
"=",
"label",
",",
"pad",
"=",
"self",
".",
"getpad",
"(",
")",
",",
"index",
"=",
"None",
")"
] |
Returns the next batch of data.
|
[
"Returns",
"the",
"next",
"batch",
"of",
"data",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L676-L689
|
train
|
apache/incubator-mxnet
|
python/mxnet/io/io.py
|
NDArrayIter._getdata
|
def _getdata(self, data_source, start=None, end=None):
"""Load data from underlying arrays."""
assert start is not None or end is not None, 'should at least specify start or end'
start = start if start is not None else 0
if end is None:
end = data_source[0][1].shape[0] if data_source else 0
s = slice(start, end)
return [
x[1][s]
if isinstance(x[1], (np.ndarray, NDArray)) else
# h5py (only supports indices in increasing order)
array(x[1][sorted(self.idx[s])][[
list(self.idx[s]).index(i)
for i in sorted(self.idx[s])
]]) for x in data_source
]
|
python
|
def _getdata(self, data_source, start=None, end=None):
"""Load data from underlying arrays."""
assert start is not None or end is not None, 'should at least specify start or end'
start = start if start is not None else 0
if end is None:
end = data_source[0][1].shape[0] if data_source else 0
s = slice(start, end)
return [
x[1][s]
if isinstance(x[1], (np.ndarray, NDArray)) else
# h5py (only supports indices in increasing order)
array(x[1][sorted(self.idx[s])][[
list(self.idx[s]).index(i)
for i in sorted(self.idx[s])
]]) for x in data_source
]
|
[
"def",
"_getdata",
"(",
"self",
",",
"data_source",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"assert",
"start",
"is",
"not",
"None",
"or",
"end",
"is",
"not",
"None",
",",
"'should at least specify start or end'",
"start",
"=",
"start",
"if",
"start",
"is",
"not",
"None",
"else",
"0",
"if",
"end",
"is",
"None",
":",
"end",
"=",
"data_source",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"shape",
"[",
"0",
"]",
"if",
"data_source",
"else",
"0",
"s",
"=",
"slice",
"(",
"start",
",",
"end",
")",
"return",
"[",
"x",
"[",
"1",
"]",
"[",
"s",
"]",
"if",
"isinstance",
"(",
"x",
"[",
"1",
"]",
",",
"(",
"np",
".",
"ndarray",
",",
"NDArray",
")",
")",
"else",
"# h5py (only supports indices in increasing order)",
"array",
"(",
"x",
"[",
"1",
"]",
"[",
"sorted",
"(",
"self",
".",
"idx",
"[",
"s",
"]",
")",
"]",
"[",
"[",
"list",
"(",
"self",
".",
"idx",
"[",
"s",
"]",
")",
".",
"index",
"(",
"i",
")",
"for",
"i",
"in",
"sorted",
"(",
"self",
".",
"idx",
"[",
"s",
"]",
")",
"]",
"]",
")",
"for",
"x",
"in",
"data_source",
"]"
] |
Load data from underlying arrays.
|
[
"Load",
"data",
"from",
"underlying",
"arrays",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L691-L706
|
train
|
apache/incubator-mxnet
|
python/mxnet/io/io.py
|
NDArrayIter._concat
|
def _concat(self, first_data, second_data):
"""Helper function to concat two NDArrays."""
assert len(first_data) == len(
second_data), 'data source should contain the same size'
if first_data and second_data:
return [
concat(
first_data[x],
second_data[x],
dim=0
) for x in range(len(first_data))
]
elif (not first_data) and (not second_data):
return []
else:
return [
first_data[0] if first_data else second_data[0]
for x in range(len(first_data))
]
|
python
|
def _concat(self, first_data, second_data):
"""Helper function to concat two NDArrays."""
assert len(first_data) == len(
second_data), 'data source should contain the same size'
if first_data and second_data:
return [
concat(
first_data[x],
second_data[x],
dim=0
) for x in range(len(first_data))
]
elif (not first_data) and (not second_data):
return []
else:
return [
first_data[0] if first_data else second_data[0]
for x in range(len(first_data))
]
|
[
"def",
"_concat",
"(",
"self",
",",
"first_data",
",",
"second_data",
")",
":",
"assert",
"len",
"(",
"first_data",
")",
"==",
"len",
"(",
"second_data",
")",
",",
"'data source should contain the same size'",
"if",
"first_data",
"and",
"second_data",
":",
"return",
"[",
"concat",
"(",
"first_data",
"[",
"x",
"]",
",",
"second_data",
"[",
"x",
"]",
",",
"dim",
"=",
"0",
")",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"first_data",
")",
")",
"]",
"elif",
"(",
"not",
"first_data",
")",
"and",
"(",
"not",
"second_data",
")",
":",
"return",
"[",
"]",
"else",
":",
"return",
"[",
"first_data",
"[",
"0",
"]",
"if",
"first_data",
"else",
"second_data",
"[",
"0",
"]",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"first_data",
")",
")",
"]"
] |
Helper function to concat two NDArrays.
|
[
"Helper",
"function",
"to",
"concat",
"two",
"NDArrays",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L708-L726
|
train
|
apache/incubator-mxnet
|
python/mxnet/io/io.py
|
NDArrayIter._batchify
|
def _batchify(self, data_source):
"""Load data from underlying arrays, internal use only."""
assert self.cursor < self.num_data, 'DataIter needs reset.'
# first batch of next epoch with 'roll_over'
if self.last_batch_handle == 'roll_over' and \
-self.batch_size < self.cursor < 0:
assert self._cache_data is not None or self._cache_label is not None, \
'next epoch should have cached data'
cache_data = self._cache_data if self._cache_data is not None else self._cache_label
second_data = self._getdata(
data_source, end=self.cursor + self.batch_size)
if self._cache_data is not None:
self._cache_data = None
else:
self._cache_label = None
return self._concat(cache_data, second_data)
# last batch with 'pad'
elif self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
pad = self.batch_size - self.num_data + self.cursor
first_data = self._getdata(data_source, start=self.cursor)
second_data = self._getdata(data_source, end=pad)
return self._concat(first_data, second_data)
# normal case
else:
if self.cursor + self.batch_size < self.num_data:
end_idx = self.cursor + self.batch_size
# get incomplete last batch
else:
end_idx = self.num_data
return self._getdata(data_source, self.cursor, end_idx)
|
python
|
def _batchify(self, data_source):
"""Load data from underlying arrays, internal use only."""
assert self.cursor < self.num_data, 'DataIter needs reset.'
# first batch of next epoch with 'roll_over'
if self.last_batch_handle == 'roll_over' and \
-self.batch_size < self.cursor < 0:
assert self._cache_data is not None or self._cache_label is not None, \
'next epoch should have cached data'
cache_data = self._cache_data if self._cache_data is not None else self._cache_label
second_data = self._getdata(
data_source, end=self.cursor + self.batch_size)
if self._cache_data is not None:
self._cache_data = None
else:
self._cache_label = None
return self._concat(cache_data, second_data)
# last batch with 'pad'
elif self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
pad = self.batch_size - self.num_data + self.cursor
first_data = self._getdata(data_source, start=self.cursor)
second_data = self._getdata(data_source, end=pad)
return self._concat(first_data, second_data)
# normal case
else:
if self.cursor + self.batch_size < self.num_data:
end_idx = self.cursor + self.batch_size
# get incomplete last batch
else:
end_idx = self.num_data
return self._getdata(data_source, self.cursor, end_idx)
|
[
"def",
"_batchify",
"(",
"self",
",",
"data_source",
")",
":",
"assert",
"self",
".",
"cursor",
"<",
"self",
".",
"num_data",
",",
"'DataIter needs reset.'",
"# first batch of next epoch with 'roll_over'",
"if",
"self",
".",
"last_batch_handle",
"==",
"'roll_over'",
"and",
"-",
"self",
".",
"batch_size",
"<",
"self",
".",
"cursor",
"<",
"0",
":",
"assert",
"self",
".",
"_cache_data",
"is",
"not",
"None",
"or",
"self",
".",
"_cache_label",
"is",
"not",
"None",
",",
"'next epoch should have cached data'",
"cache_data",
"=",
"self",
".",
"_cache_data",
"if",
"self",
".",
"_cache_data",
"is",
"not",
"None",
"else",
"self",
".",
"_cache_label",
"second_data",
"=",
"self",
".",
"_getdata",
"(",
"data_source",
",",
"end",
"=",
"self",
".",
"cursor",
"+",
"self",
".",
"batch_size",
")",
"if",
"self",
".",
"_cache_data",
"is",
"not",
"None",
":",
"self",
".",
"_cache_data",
"=",
"None",
"else",
":",
"self",
".",
"_cache_label",
"=",
"None",
"return",
"self",
".",
"_concat",
"(",
"cache_data",
",",
"second_data",
")",
"# last batch with 'pad'",
"elif",
"self",
".",
"last_batch_handle",
"==",
"'pad'",
"and",
"self",
".",
"cursor",
"+",
"self",
".",
"batch_size",
">",
"self",
".",
"num_data",
":",
"pad",
"=",
"self",
".",
"batch_size",
"-",
"self",
".",
"num_data",
"+",
"self",
".",
"cursor",
"first_data",
"=",
"self",
".",
"_getdata",
"(",
"data_source",
",",
"start",
"=",
"self",
".",
"cursor",
")",
"second_data",
"=",
"self",
".",
"_getdata",
"(",
"data_source",
",",
"end",
"=",
"pad",
")",
"return",
"self",
".",
"_concat",
"(",
"first_data",
",",
"second_data",
")",
"# normal case",
"else",
":",
"if",
"self",
".",
"cursor",
"+",
"self",
".",
"batch_size",
"<",
"self",
".",
"num_data",
":",
"end_idx",
"=",
"self",
".",
"cursor",
"+",
"self",
".",
"batch_size",
"# get incomplete last batch",
"else",
":",
"end_idx",
"=",
"self",
".",
"num_data",
"return",
"self",
".",
"_getdata",
"(",
"data_source",
",",
"self",
".",
"cursor",
",",
"end_idx",
")"
] |
Load data from underlying arrays, internal use only.
|
[
"Load",
"data",
"from",
"underlying",
"arrays",
"internal",
"use",
"only",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L728-L758
|
train
|
apache/incubator-mxnet
|
python/mxnet/io/io.py
|
NDArrayIter.getpad
|
def getpad(self):
"""Get pad value of DataBatch."""
if self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
return self.cursor + self.batch_size - self.num_data
# check the first batch
elif self.last_batch_handle == 'roll_over' and \
-self.batch_size < self.cursor < 0:
return -self.cursor
else:
return 0
|
python
|
def getpad(self):
"""Get pad value of DataBatch."""
if self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
return self.cursor + self.batch_size - self.num_data
# check the first batch
elif self.last_batch_handle == 'roll_over' and \
-self.batch_size < self.cursor < 0:
return -self.cursor
else:
return 0
|
[
"def",
"getpad",
"(",
"self",
")",
":",
"if",
"self",
".",
"last_batch_handle",
"==",
"'pad'",
"and",
"self",
".",
"cursor",
"+",
"self",
".",
"batch_size",
">",
"self",
".",
"num_data",
":",
"return",
"self",
".",
"cursor",
"+",
"self",
".",
"batch_size",
"-",
"self",
".",
"num_data",
"# check the first batch",
"elif",
"self",
".",
"last_batch_handle",
"==",
"'roll_over'",
"and",
"-",
"self",
".",
"batch_size",
"<",
"self",
".",
"cursor",
"<",
"0",
":",
"return",
"-",
"self",
".",
"cursor",
"else",
":",
"return",
"0"
] |
Get pad value of DataBatch.
|
[
"Get",
"pad",
"value",
"of",
"DataBatch",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L768-L778
|
train
|
apache/incubator-mxnet
|
python/mxnet/io/io.py
|
NDArrayIter._shuffle_data
|
def _shuffle_data(self):
"""Shuffle the data."""
# shuffle index
np.random.shuffle(self.idx)
# get the data by corresponding index
self.data = _getdata_by_idx(self.data, self.idx)
self.label = _getdata_by_idx(self.label, self.idx)
|
python
|
def _shuffle_data(self):
"""Shuffle the data."""
# shuffle index
np.random.shuffle(self.idx)
# get the data by corresponding index
self.data = _getdata_by_idx(self.data, self.idx)
self.label = _getdata_by_idx(self.label, self.idx)
|
[
"def",
"_shuffle_data",
"(",
"self",
")",
":",
"# shuffle index",
"np",
".",
"random",
".",
"shuffle",
"(",
"self",
".",
"idx",
")",
"# get the data by corresponding index",
"self",
".",
"data",
"=",
"_getdata_by_idx",
"(",
"self",
".",
"data",
",",
"self",
".",
"idx",
")",
"self",
".",
"label",
"=",
"_getdata_by_idx",
"(",
"self",
".",
"label",
",",
"self",
".",
"idx",
")"
] |
Shuffle the data.
|
[
"Shuffle",
"the",
"data",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L780-L786
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_quantize_params
|
def _quantize_params(qsym, params, th_dict):
"""Given a quantized symbol and a dict of params that have not been quantized,
generate quantized params. Currently only supports quantizing the arg_params
with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols
that are excluded from being quantized, their corresponding params will
not be quantized, but saved together with quantized params of the symbols that
have been quantized.
Parameters
----------
qsym : Symbol
Quantized symbol from FP32 symbol.
params : dict of str->NDArray
th_dict: dict of min/max pairs of layers' output
"""
inputs_name = qsym.list_arguments()
quantized_params = {}
for name in inputs_name:
if name.endswith(('weight_quantize', 'bias_quantize')):
original_name = name[:-len('_quantize')]
param = params[original_name]
val, vmin, vmax = ndarray.contrib.quantize(data=param,
min_range=ndarray.min(param),
max_range=ndarray.max(param),
out_type='int8')
quantized_params[name] = val
quantized_params[name+'_min'] = vmin
quantized_params[name+'_max'] = vmax
elif name in params:
quantized_params[name] = params[name]
elif name.endswith(('_min')):
output = name[: - len('_min')]
if output in th_dict:
quantized_params[name] = ndarray.array([th_dict[output][0]])
elif name.endswith(('_max')):
output = name[: - len('_min')]
if output in th_dict:
quantized_params[name] = ndarray.array([th_dict[output][1]])
return quantized_params
|
python
|
def _quantize_params(qsym, params, th_dict):
"""Given a quantized symbol and a dict of params that have not been quantized,
generate quantized params. Currently only supports quantizing the arg_params
with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols
that are excluded from being quantized, their corresponding params will
not be quantized, but saved together with quantized params of the symbols that
have been quantized.
Parameters
----------
qsym : Symbol
Quantized symbol from FP32 symbol.
params : dict of str->NDArray
th_dict: dict of min/max pairs of layers' output
"""
inputs_name = qsym.list_arguments()
quantized_params = {}
for name in inputs_name:
if name.endswith(('weight_quantize', 'bias_quantize')):
original_name = name[:-len('_quantize')]
param = params[original_name]
val, vmin, vmax = ndarray.contrib.quantize(data=param,
min_range=ndarray.min(param),
max_range=ndarray.max(param),
out_type='int8')
quantized_params[name] = val
quantized_params[name+'_min'] = vmin
quantized_params[name+'_max'] = vmax
elif name in params:
quantized_params[name] = params[name]
elif name.endswith(('_min')):
output = name[: - len('_min')]
if output in th_dict:
quantized_params[name] = ndarray.array([th_dict[output][0]])
elif name.endswith(('_max')):
output = name[: - len('_min')]
if output in th_dict:
quantized_params[name] = ndarray.array([th_dict[output][1]])
return quantized_params
|
[
"def",
"_quantize_params",
"(",
"qsym",
",",
"params",
",",
"th_dict",
")",
":",
"inputs_name",
"=",
"qsym",
".",
"list_arguments",
"(",
")",
"quantized_params",
"=",
"{",
"}",
"for",
"name",
"in",
"inputs_name",
":",
"if",
"name",
".",
"endswith",
"(",
"(",
"'weight_quantize'",
",",
"'bias_quantize'",
")",
")",
":",
"original_name",
"=",
"name",
"[",
":",
"-",
"len",
"(",
"'_quantize'",
")",
"]",
"param",
"=",
"params",
"[",
"original_name",
"]",
"val",
",",
"vmin",
",",
"vmax",
"=",
"ndarray",
".",
"contrib",
".",
"quantize",
"(",
"data",
"=",
"param",
",",
"min_range",
"=",
"ndarray",
".",
"min",
"(",
"param",
")",
",",
"max_range",
"=",
"ndarray",
".",
"max",
"(",
"param",
")",
",",
"out_type",
"=",
"'int8'",
")",
"quantized_params",
"[",
"name",
"]",
"=",
"val",
"quantized_params",
"[",
"name",
"+",
"'_min'",
"]",
"=",
"vmin",
"quantized_params",
"[",
"name",
"+",
"'_max'",
"]",
"=",
"vmax",
"elif",
"name",
"in",
"params",
":",
"quantized_params",
"[",
"name",
"]",
"=",
"params",
"[",
"name",
"]",
"elif",
"name",
".",
"endswith",
"(",
"(",
"'_min'",
")",
")",
":",
"output",
"=",
"name",
"[",
":",
"-",
"len",
"(",
"'_min'",
")",
"]",
"if",
"output",
"in",
"th_dict",
":",
"quantized_params",
"[",
"name",
"]",
"=",
"ndarray",
".",
"array",
"(",
"[",
"th_dict",
"[",
"output",
"]",
"[",
"0",
"]",
"]",
")",
"elif",
"name",
".",
"endswith",
"(",
"(",
"'_max'",
")",
")",
":",
"output",
"=",
"name",
"[",
":",
"-",
"len",
"(",
"'_min'",
")",
"]",
"if",
"output",
"in",
"th_dict",
":",
"quantized_params",
"[",
"name",
"]",
"=",
"ndarray",
".",
"array",
"(",
"[",
"th_dict",
"[",
"output",
"]",
"[",
"1",
"]",
"]",
")",
"return",
"quantized_params"
] |
Given a quantized symbol and a dict of params that have not been quantized,
generate quantized params. Currently only supports quantizing the arg_params
with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols
that are excluded from being quantized, their corresponding params will
not be quantized, but saved together with quantized params of the symbols that
have been quantized.
Parameters
----------
qsym : Symbol
Quantized symbol from FP32 symbol.
params : dict of str->NDArray
th_dict: dict of min/max pairs of layers' output
|
[
"Given",
"a",
"quantized",
"symbol",
"and",
"a",
"dict",
"of",
"params",
"that",
"have",
"not",
"been",
"quantized",
"generate",
"quantized",
"params",
".",
"Currently",
"only",
"supports",
"quantizing",
"the",
"arg_params",
"with",
"names",
"of",
"weight",
"or",
"bias",
"not",
"aux_params",
".",
"If",
"qsym",
"contains",
"symbols",
"that",
"are",
"excluded",
"from",
"being",
"quantized",
"their",
"corresponding",
"params",
"will",
"not",
"be",
"quantized",
"but",
"saved",
"together",
"with",
"quantized",
"params",
"of",
"the",
"symbols",
"that",
"have",
"been",
"quantized",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L43-L81
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_quantize_symbol
|
def _quantize_symbol(sym, excluded_symbols=None, offline_params=None, quantized_dtype='int8'):
"""Given a symbol object representing a neural network of data type FP32,
quantize it into a INT8 network.
Parameters
----------
sym : Symbol
FP32 neural network symbol.
excluded_sym_names : list of strings
A list of strings representing the names of the symbols that users want to excluding
from being quantized.
offline_params : list of strs
Names of the parameters that users want to quantize offline. It's always recommended to
quantize parameters offline so that quantizing parameters during the inference can be
avoided.
quantized_dtype: str
The quantized destination type for input data.
"""
num_excluded_symbols = 0
if excluded_symbols is not None:
assert isinstance(excluded_symbols, list)
num_excluded_symbols = len(excluded_symbols)
else:
excluded_symbols = []
num_offline = 0
offline = []
if offline_params is not None:
num_offline = len(offline_params)
for k in offline_params:
offline.append(c_str(k))
out = SymbolHandle()
check_call(_LIB.MXQuantizeSymbol(sym.handle,
ctypes.byref(out),
mx_uint(num_excluded_symbols),
c_str_array(excluded_symbols),
mx_uint(num_offline),
c_array(ctypes.c_char_p, offline),
c_str(quantized_dtype),
ctypes.c_bool(True)))
return Symbol(out)
|
python
|
def _quantize_symbol(sym, excluded_symbols=None, offline_params=None, quantized_dtype='int8'):
"""Given a symbol object representing a neural network of data type FP32,
quantize it into a INT8 network.
Parameters
----------
sym : Symbol
FP32 neural network symbol.
excluded_sym_names : list of strings
A list of strings representing the names of the symbols that users want to excluding
from being quantized.
offline_params : list of strs
Names of the parameters that users want to quantize offline. It's always recommended to
quantize parameters offline so that quantizing parameters during the inference can be
avoided.
quantized_dtype: str
The quantized destination type for input data.
"""
num_excluded_symbols = 0
if excluded_symbols is not None:
assert isinstance(excluded_symbols, list)
num_excluded_symbols = len(excluded_symbols)
else:
excluded_symbols = []
num_offline = 0
offline = []
if offline_params is not None:
num_offline = len(offline_params)
for k in offline_params:
offline.append(c_str(k))
out = SymbolHandle()
check_call(_LIB.MXQuantizeSymbol(sym.handle,
ctypes.byref(out),
mx_uint(num_excluded_symbols),
c_str_array(excluded_symbols),
mx_uint(num_offline),
c_array(ctypes.c_char_p, offline),
c_str(quantized_dtype),
ctypes.c_bool(True)))
return Symbol(out)
|
[
"def",
"_quantize_symbol",
"(",
"sym",
",",
"excluded_symbols",
"=",
"None",
",",
"offline_params",
"=",
"None",
",",
"quantized_dtype",
"=",
"'int8'",
")",
":",
"num_excluded_symbols",
"=",
"0",
"if",
"excluded_symbols",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"excluded_symbols",
",",
"list",
")",
"num_excluded_symbols",
"=",
"len",
"(",
"excluded_symbols",
")",
"else",
":",
"excluded_symbols",
"=",
"[",
"]",
"num_offline",
"=",
"0",
"offline",
"=",
"[",
"]",
"if",
"offline_params",
"is",
"not",
"None",
":",
"num_offline",
"=",
"len",
"(",
"offline_params",
")",
"for",
"k",
"in",
"offline_params",
":",
"offline",
".",
"append",
"(",
"c_str",
"(",
"k",
")",
")",
"out",
"=",
"SymbolHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXQuantizeSymbol",
"(",
"sym",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"out",
")",
",",
"mx_uint",
"(",
"num_excluded_symbols",
")",
",",
"c_str_array",
"(",
"excluded_symbols",
")",
",",
"mx_uint",
"(",
"num_offline",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_char_p",
",",
"offline",
")",
",",
"c_str",
"(",
"quantized_dtype",
")",
",",
"ctypes",
".",
"c_bool",
"(",
"True",
")",
")",
")",
"return",
"Symbol",
"(",
"out",
")"
] |
Given a symbol object representing a neural network of data type FP32,
quantize it into a INT8 network.
Parameters
----------
sym : Symbol
FP32 neural network symbol.
excluded_sym_names : list of strings
A list of strings representing the names of the symbols that users want to excluding
from being quantized.
offline_params : list of strs
Names of the parameters that users want to quantize offline. It's always recommended to
quantize parameters offline so that quantizing parameters during the inference can be
avoided.
quantized_dtype: str
The quantized destination type for input data.
|
[
"Given",
"a",
"symbol",
"object",
"representing",
"a",
"neural",
"network",
"of",
"data",
"type",
"FP32",
"quantize",
"it",
"into",
"a",
"INT8",
"network",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L83-L124
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_calibrate_quantized_sym
|
def _calibrate_quantized_sym(qsym, th_dict):
"""Given a dictionary containing the thresholds for quantizing the layers,
set the thresholds into the quantized symbol as the params of requantize operators.
"""
if th_dict is None or len(th_dict) == 0:
return qsym
num_layer_outputs = len(th_dict)
layer_output_names = []
min_vals = []
max_vals = []
for k, v in th_dict.items():
layer_output_names.append(k)
min_vals.append(v[0])
max_vals.append(v[1])
calibrated_sym = SymbolHandle()
check_call(_LIB.MXSetCalibTableToQuantizedSymbol(qsym.handle,
mx_uint(num_layer_outputs),
c_str_array(layer_output_names),
c_array(ctypes.c_float, min_vals),
c_array(ctypes.c_float, max_vals),
ctypes.byref(calibrated_sym)))
return Symbol(calibrated_sym)
|
python
|
def _calibrate_quantized_sym(qsym, th_dict):
"""Given a dictionary containing the thresholds for quantizing the layers,
set the thresholds into the quantized symbol as the params of requantize operators.
"""
if th_dict is None or len(th_dict) == 0:
return qsym
num_layer_outputs = len(th_dict)
layer_output_names = []
min_vals = []
max_vals = []
for k, v in th_dict.items():
layer_output_names.append(k)
min_vals.append(v[0])
max_vals.append(v[1])
calibrated_sym = SymbolHandle()
check_call(_LIB.MXSetCalibTableToQuantizedSymbol(qsym.handle,
mx_uint(num_layer_outputs),
c_str_array(layer_output_names),
c_array(ctypes.c_float, min_vals),
c_array(ctypes.c_float, max_vals),
ctypes.byref(calibrated_sym)))
return Symbol(calibrated_sym)
|
[
"def",
"_calibrate_quantized_sym",
"(",
"qsym",
",",
"th_dict",
")",
":",
"if",
"th_dict",
"is",
"None",
"or",
"len",
"(",
"th_dict",
")",
"==",
"0",
":",
"return",
"qsym",
"num_layer_outputs",
"=",
"len",
"(",
"th_dict",
")",
"layer_output_names",
"=",
"[",
"]",
"min_vals",
"=",
"[",
"]",
"max_vals",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"th_dict",
".",
"items",
"(",
")",
":",
"layer_output_names",
".",
"append",
"(",
"k",
")",
"min_vals",
".",
"append",
"(",
"v",
"[",
"0",
"]",
")",
"max_vals",
".",
"append",
"(",
"v",
"[",
"1",
"]",
")",
"calibrated_sym",
"=",
"SymbolHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXSetCalibTableToQuantizedSymbol",
"(",
"qsym",
".",
"handle",
",",
"mx_uint",
"(",
"num_layer_outputs",
")",
",",
"c_str_array",
"(",
"layer_output_names",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_float",
",",
"min_vals",
")",
",",
"c_array",
"(",
"ctypes",
".",
"c_float",
",",
"max_vals",
")",
",",
"ctypes",
".",
"byref",
"(",
"calibrated_sym",
")",
")",
")",
"return",
"Symbol",
"(",
"calibrated_sym",
")"
] |
Given a dictionary containing the thresholds for quantizing the layers,
set the thresholds into the quantized symbol as the params of requantize operators.
|
[
"Given",
"a",
"dictionary",
"containing",
"the",
"thresholds",
"for",
"quantizing",
"the",
"layers",
"set",
"the",
"thresholds",
"into",
"the",
"quantized",
"symbol",
"as",
"the",
"params",
"of",
"requantize",
"operators",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L179-L201
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_collect_layer_output_min_max
|
def _collect_layer_output_min_max(mod, data, include_layer=None,
max_num_examples=None, logger=None):
"""Collect min and max values from layer outputs and save them in
a dictionary mapped by layer names.
"""
collector = _LayerOutputMinMaxCollector(include_layer=include_layer, logger=logger)
num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger)
return collector.min_max_dict, num_examples
|
python
|
def _collect_layer_output_min_max(mod, data, include_layer=None,
max_num_examples=None, logger=None):
"""Collect min and max values from layer outputs and save them in
a dictionary mapped by layer names.
"""
collector = _LayerOutputMinMaxCollector(include_layer=include_layer, logger=logger)
num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger)
return collector.min_max_dict, num_examples
|
[
"def",
"_collect_layer_output_min_max",
"(",
"mod",
",",
"data",
",",
"include_layer",
"=",
"None",
",",
"max_num_examples",
"=",
"None",
",",
"logger",
"=",
"None",
")",
":",
"collector",
"=",
"_LayerOutputMinMaxCollector",
"(",
"include_layer",
"=",
"include_layer",
",",
"logger",
"=",
"logger",
")",
"num_examples",
"=",
"_collect_layer_statistics",
"(",
"mod",
",",
"data",
",",
"collector",
",",
"max_num_examples",
",",
"logger",
")",
"return",
"collector",
".",
"min_max_dict",
",",
"num_examples"
] |
Collect min and max values from layer outputs and save them in
a dictionary mapped by layer names.
|
[
"Collect",
"min",
"and",
"max",
"values",
"from",
"layer",
"outputs",
"and",
"save",
"them",
"in",
"a",
"dictionary",
"mapped",
"by",
"layer",
"names",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L223-L230
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_collect_layer_outputs
|
def _collect_layer_outputs(mod, data, include_layer=None, max_num_examples=None, logger=None):
"""Collect layer outputs and save them in a dictionary mapped by layer names."""
collector = _LayerOutputCollector(include_layer=include_layer, logger=logger)
num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger)
return collector.nd_dict, num_examples
|
python
|
def _collect_layer_outputs(mod, data, include_layer=None, max_num_examples=None, logger=None):
"""Collect layer outputs and save them in a dictionary mapped by layer names."""
collector = _LayerOutputCollector(include_layer=include_layer, logger=logger)
num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger)
return collector.nd_dict, num_examples
|
[
"def",
"_collect_layer_outputs",
"(",
"mod",
",",
"data",
",",
"include_layer",
"=",
"None",
",",
"max_num_examples",
"=",
"None",
",",
"logger",
"=",
"None",
")",
":",
"collector",
"=",
"_LayerOutputCollector",
"(",
"include_layer",
"=",
"include_layer",
",",
"logger",
"=",
"logger",
")",
"num_examples",
"=",
"_collect_layer_statistics",
"(",
"mod",
",",
"data",
",",
"collector",
",",
"max_num_examples",
",",
"logger",
")",
"return",
"collector",
".",
"nd_dict",
",",
"num_examples"
] |
Collect layer outputs and save them in a dictionary mapped by layer names.
|
[
"Collect",
"layer",
"outputs",
"and",
"save",
"them",
"in",
"a",
"dictionary",
"mapped",
"by",
"layer",
"names",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L233-L237
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_smooth_distribution
|
def _smooth_distribution(p, eps=0.0001):
"""Given a discrete distribution (may have not been normalized to 1),
smooth it by replacing zeros with eps multiplied by a scaling factor and taking the
corresponding amount off the non-zero values.
Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf
"""
is_zeros = (p == 0).astype(np.float32)
is_nonzeros = (p != 0).astype(np.float32)
n_zeros = is_zeros.sum()
n_nonzeros = p.size - n_zeros
if not n_nonzeros:
raise ValueError('The discrete probability distribution is malformed. All entries are 0.')
eps1 = eps * float(n_zeros) / float(n_nonzeros)
assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)
hist = p.astype(np.float32)
hist += eps * is_zeros + (-eps1) * is_nonzeros
assert (hist <= 0).sum() == 0
return hist
|
python
|
def _smooth_distribution(p, eps=0.0001):
"""Given a discrete distribution (may have not been normalized to 1),
smooth it by replacing zeros with eps multiplied by a scaling factor and taking the
corresponding amount off the non-zero values.
Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf
"""
is_zeros = (p == 0).astype(np.float32)
is_nonzeros = (p != 0).astype(np.float32)
n_zeros = is_zeros.sum()
n_nonzeros = p.size - n_zeros
if not n_nonzeros:
raise ValueError('The discrete probability distribution is malformed. All entries are 0.')
eps1 = eps * float(n_zeros) / float(n_nonzeros)
assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)
hist = p.astype(np.float32)
hist += eps * is_zeros + (-eps1) * is_nonzeros
assert (hist <= 0).sum() == 0
return hist
|
[
"def",
"_smooth_distribution",
"(",
"p",
",",
"eps",
"=",
"0.0001",
")",
":",
"is_zeros",
"=",
"(",
"p",
"==",
"0",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"is_nonzeros",
"=",
"(",
"p",
"!=",
"0",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"n_zeros",
"=",
"is_zeros",
".",
"sum",
"(",
")",
"n_nonzeros",
"=",
"p",
".",
"size",
"-",
"n_zeros",
"if",
"not",
"n_nonzeros",
":",
"raise",
"ValueError",
"(",
"'The discrete probability distribution is malformed. All entries are 0.'",
")",
"eps1",
"=",
"eps",
"*",
"float",
"(",
"n_zeros",
")",
"/",
"float",
"(",
"n_nonzeros",
")",
"assert",
"eps1",
"<",
"1.0",
",",
"'n_zeros=%d, n_nonzeros=%d, eps1=%f'",
"%",
"(",
"n_zeros",
",",
"n_nonzeros",
",",
"eps1",
")",
"hist",
"=",
"p",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"hist",
"+=",
"eps",
"*",
"is_zeros",
"+",
"(",
"-",
"eps1",
")",
"*",
"is_nonzeros",
"assert",
"(",
"hist",
"<=",
"0",
")",
".",
"sum",
"(",
")",
"==",
"0",
"return",
"hist"
] |
Given a discrete distribution (may have not been normalized to 1),
smooth it by replacing zeros with eps multiplied by a scaling factor and taking the
corresponding amount off the non-zero values.
Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf
|
[
"Given",
"a",
"discrete",
"distribution",
"(",
"may",
"have",
"not",
"been",
"normalized",
"to",
"1",
")",
"smooth",
"it",
"by",
"replacing",
"zeros",
"with",
"eps",
"multiplied",
"by",
"a",
"scaling",
"factor",
"and",
"taking",
"the",
"corresponding",
"amount",
"off",
"the",
"non",
"-",
"zero",
"values",
".",
"Ref",
":",
"http",
":",
"//",
"web",
".",
"engr",
".",
"illinois",
".",
"edu",
"/",
"~hanj",
"/",
"cs412",
"/",
"bk3",
"/",
"KL",
"-",
"divergence",
".",
"pdf"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L240-L257
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_get_optimal_threshold
|
def _get_optimal_threshold(arr, quantized_dtype, num_bins=8001, num_quantized_bins=255):
"""Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
if isinstance(arr, NDArray):
arr = arr.asnumpy()
elif isinstance(arr, list):
assert len(arr) != 0
for i, nd in enumerate(arr):
if isinstance(nd, NDArray):
arr[i] = nd.asnumpy()
elif not isinstance(nd, np.ndarray):
raise TypeError('get_optimal_threshold only supports input type of NDArray,'
' list of np.ndarrays or NDArrays, and np.ndarray,'
' while received type=%s' % (str(type(nd))))
arr = np.concatenate(arr)
elif not isinstance(arr, np.ndarray):
raise TypeError('get_optimal_threshold only supports input type of NDArray,'
' list of NDArrays and np.ndarray,'
' while received type=%s' % (str(type(arr))))
min_val = np.min(arr)
max_val = np.max(arr)
th = max(abs(min_val), abs(max_val))
if min_val >= 0 and quantized_dtype in ['auto', 'uint8']:
# We need to move negative bins to positive bins to fit uint8 range.
num_quantized_bins = num_quantized_bins * 2 + 1
hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-th, th))
zero_bin_idx = num_bins // 2
num_half_quantized_bins = num_quantized_bins // 2
thresholds = np.zeros(num_bins // 2 + 1 - num_quantized_bins // 2)
divergence = np.zeros_like(thresholds)
quantized_bins = np.zeros(num_quantized_bins, dtype=np.int32)
# i means the number of bins on half axis excluding the zero bin.
for i in range(num_quantized_bins // 2,
num_bins // 2 + 1):
p_bin_idx_start = zero_bin_idx - i
p_bin_idx_stop = zero_bin_idx + i + 1
thresholds[i - num_half_quantized_bins] = hist_edges[p_bin_idx_stop]
sliced_nd_hist = hist[p_bin_idx_start:p_bin_idx_stop]
# generate reference distribution p
p = sliced_nd_hist.copy()
assert p.size % 2 == 1
assert p.size >= num_quantized_bins
# put left outlier count in p[0]
left_outlier_count = np.sum(hist[0:p_bin_idx_start])
p[0] += left_outlier_count
# put right outlier count in p[-1]
right_outlier_count = np.sum(hist[p_bin_idx_stop:])
p[-1] += right_outlier_count
# is_nonzeros[k] indicates whether hist[k] is nonzero
is_nonzeros = (p != 0).astype(np.int32)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = sliced_nd_hist.size // num_quantized_bins
# merge hist into num_quantized_bins bins
for j in range(num_quantized_bins):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[num_quantized_bins * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float32)
for j in range(num_quantized_bins):
start = j * num_merged_bins
if j == num_quantized_bins - 1:
stop = len(is_nonzeros)
else:
stop = start + num_merged_bins
norm = is_nonzeros[start:stop].sum()
if norm != 0:
q[start:stop] = float(quantized_bins[j]) / float(norm)
q[p == 0] = 0
p = _smooth_distribution(p)
# There is a chance that q is an invalid probability distribution.
try:
q = _smooth_distribution(q)
except ValueError:
divergence[i - num_half_quantized_bins] = float("inf")
divergence[i - num_half_quantized_bins] = stats.entropy(p, q)
min_divergence_idx = np.argmin(divergence)
min_divergence = divergence[min_divergence_idx]
opt_th = thresholds[min_divergence_idx]
return min_val, max_val, min_divergence, opt_th
|
python
|
def _get_optimal_threshold(arr, quantized_dtype, num_bins=8001, num_quantized_bins=255):
"""Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
if isinstance(arr, NDArray):
arr = arr.asnumpy()
elif isinstance(arr, list):
assert len(arr) != 0
for i, nd in enumerate(arr):
if isinstance(nd, NDArray):
arr[i] = nd.asnumpy()
elif not isinstance(nd, np.ndarray):
raise TypeError('get_optimal_threshold only supports input type of NDArray,'
' list of np.ndarrays or NDArrays, and np.ndarray,'
' while received type=%s' % (str(type(nd))))
arr = np.concatenate(arr)
elif not isinstance(arr, np.ndarray):
raise TypeError('get_optimal_threshold only supports input type of NDArray,'
' list of NDArrays and np.ndarray,'
' while received type=%s' % (str(type(arr))))
min_val = np.min(arr)
max_val = np.max(arr)
th = max(abs(min_val), abs(max_val))
if min_val >= 0 and quantized_dtype in ['auto', 'uint8']:
# We need to move negative bins to positive bins to fit uint8 range.
num_quantized_bins = num_quantized_bins * 2 + 1
hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-th, th))
zero_bin_idx = num_bins // 2
num_half_quantized_bins = num_quantized_bins // 2
thresholds = np.zeros(num_bins // 2 + 1 - num_quantized_bins // 2)
divergence = np.zeros_like(thresholds)
quantized_bins = np.zeros(num_quantized_bins, dtype=np.int32)
# i means the number of bins on half axis excluding the zero bin.
for i in range(num_quantized_bins // 2,
num_bins // 2 + 1):
p_bin_idx_start = zero_bin_idx - i
p_bin_idx_stop = zero_bin_idx + i + 1
thresholds[i - num_half_quantized_bins] = hist_edges[p_bin_idx_stop]
sliced_nd_hist = hist[p_bin_idx_start:p_bin_idx_stop]
# generate reference distribution p
p = sliced_nd_hist.copy()
assert p.size % 2 == 1
assert p.size >= num_quantized_bins
# put left outlier count in p[0]
left_outlier_count = np.sum(hist[0:p_bin_idx_start])
p[0] += left_outlier_count
# put right outlier count in p[-1]
right_outlier_count = np.sum(hist[p_bin_idx_stop:])
p[-1] += right_outlier_count
# is_nonzeros[k] indicates whether hist[k] is nonzero
is_nonzeros = (p != 0).astype(np.int32)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = sliced_nd_hist.size // num_quantized_bins
# merge hist into num_quantized_bins bins
for j in range(num_quantized_bins):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[num_quantized_bins * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float32)
for j in range(num_quantized_bins):
start = j * num_merged_bins
if j == num_quantized_bins - 1:
stop = len(is_nonzeros)
else:
stop = start + num_merged_bins
norm = is_nonzeros[start:stop].sum()
if norm != 0:
q[start:stop] = float(quantized_bins[j]) / float(norm)
q[p == 0] = 0
p = _smooth_distribution(p)
# There is a chance that q is an invalid probability distribution.
try:
q = _smooth_distribution(q)
except ValueError:
divergence[i - num_half_quantized_bins] = float("inf")
divergence[i - num_half_quantized_bins] = stats.entropy(p, q)
min_divergence_idx = np.argmin(divergence)
min_divergence = divergence[min_divergence_idx]
opt_th = thresholds[min_divergence_idx]
return min_val, max_val, min_divergence, opt_th
|
[
"def",
"_get_optimal_threshold",
"(",
"arr",
",",
"quantized_dtype",
",",
"num_bins",
"=",
"8001",
",",
"num_quantized_bins",
"=",
"255",
")",
":",
"if",
"isinstance",
"(",
"arr",
",",
"NDArray",
")",
":",
"arr",
"=",
"arr",
".",
"asnumpy",
"(",
")",
"elif",
"isinstance",
"(",
"arr",
",",
"list",
")",
":",
"assert",
"len",
"(",
"arr",
")",
"!=",
"0",
"for",
"i",
",",
"nd",
"in",
"enumerate",
"(",
"arr",
")",
":",
"if",
"isinstance",
"(",
"nd",
",",
"NDArray",
")",
":",
"arr",
"[",
"i",
"]",
"=",
"nd",
".",
"asnumpy",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"nd",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"TypeError",
"(",
"'get_optimal_threshold only supports input type of NDArray,'",
"' list of np.ndarrays or NDArrays, and np.ndarray,'",
"' while received type=%s'",
"%",
"(",
"str",
"(",
"type",
"(",
"nd",
")",
")",
")",
")",
"arr",
"=",
"np",
".",
"concatenate",
"(",
"arr",
")",
"elif",
"not",
"isinstance",
"(",
"arr",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"TypeError",
"(",
"'get_optimal_threshold only supports input type of NDArray,'",
"' list of NDArrays and np.ndarray,'",
"' while received type=%s'",
"%",
"(",
"str",
"(",
"type",
"(",
"arr",
")",
")",
")",
")",
"min_val",
"=",
"np",
".",
"min",
"(",
"arr",
")",
"max_val",
"=",
"np",
".",
"max",
"(",
"arr",
")",
"th",
"=",
"max",
"(",
"abs",
"(",
"min_val",
")",
",",
"abs",
"(",
"max_val",
")",
")",
"if",
"min_val",
">=",
"0",
"and",
"quantized_dtype",
"in",
"[",
"'auto'",
",",
"'uint8'",
"]",
":",
"# We need to move negative bins to positive bins to fit uint8 range.",
"num_quantized_bins",
"=",
"num_quantized_bins",
"*",
"2",
"+",
"1",
"hist",
",",
"hist_edges",
"=",
"np",
".",
"histogram",
"(",
"arr",
",",
"bins",
"=",
"num_bins",
",",
"range",
"=",
"(",
"-",
"th",
",",
"th",
")",
")",
"zero_bin_idx",
"=",
"num_bins",
"//",
"2",
"num_half_quantized_bins",
"=",
"num_quantized_bins",
"//",
"2",
"thresholds",
"=",
"np",
".",
"zeros",
"(",
"num_bins",
"//",
"2",
"+",
"1",
"-",
"num_quantized_bins",
"//",
"2",
")",
"divergence",
"=",
"np",
".",
"zeros_like",
"(",
"thresholds",
")",
"quantized_bins",
"=",
"np",
".",
"zeros",
"(",
"num_quantized_bins",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"# i means the number of bins on half axis excluding the zero bin.",
"for",
"i",
"in",
"range",
"(",
"num_quantized_bins",
"//",
"2",
",",
"num_bins",
"//",
"2",
"+",
"1",
")",
":",
"p_bin_idx_start",
"=",
"zero_bin_idx",
"-",
"i",
"p_bin_idx_stop",
"=",
"zero_bin_idx",
"+",
"i",
"+",
"1",
"thresholds",
"[",
"i",
"-",
"num_half_quantized_bins",
"]",
"=",
"hist_edges",
"[",
"p_bin_idx_stop",
"]",
"sliced_nd_hist",
"=",
"hist",
"[",
"p_bin_idx_start",
":",
"p_bin_idx_stop",
"]",
"# generate reference distribution p",
"p",
"=",
"sliced_nd_hist",
".",
"copy",
"(",
")",
"assert",
"p",
".",
"size",
"%",
"2",
"==",
"1",
"assert",
"p",
".",
"size",
">=",
"num_quantized_bins",
"# put left outlier count in p[0]",
"left_outlier_count",
"=",
"np",
".",
"sum",
"(",
"hist",
"[",
"0",
":",
"p_bin_idx_start",
"]",
")",
"p",
"[",
"0",
"]",
"+=",
"left_outlier_count",
"# put right outlier count in p[-1]",
"right_outlier_count",
"=",
"np",
".",
"sum",
"(",
"hist",
"[",
"p_bin_idx_stop",
":",
"]",
")",
"p",
"[",
"-",
"1",
"]",
"+=",
"right_outlier_count",
"# is_nonzeros[k] indicates whether hist[k] is nonzero",
"is_nonzeros",
"=",
"(",
"p",
"!=",
"0",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"# calculate how many bins should be merged to generate quantized distribution q",
"num_merged_bins",
"=",
"sliced_nd_hist",
".",
"size",
"//",
"num_quantized_bins",
"# merge hist into num_quantized_bins bins",
"for",
"j",
"in",
"range",
"(",
"num_quantized_bins",
")",
":",
"start",
"=",
"j",
"*",
"num_merged_bins",
"stop",
"=",
"start",
"+",
"num_merged_bins",
"quantized_bins",
"[",
"j",
"]",
"=",
"sliced_nd_hist",
"[",
"start",
":",
"stop",
"]",
".",
"sum",
"(",
")",
"quantized_bins",
"[",
"-",
"1",
"]",
"+=",
"sliced_nd_hist",
"[",
"num_quantized_bins",
"*",
"num_merged_bins",
":",
"]",
".",
"sum",
"(",
")",
"# expand quantized_bins into p.size bins",
"q",
"=",
"np",
".",
"zeros",
"(",
"sliced_nd_hist",
".",
"size",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"j",
"in",
"range",
"(",
"num_quantized_bins",
")",
":",
"start",
"=",
"j",
"*",
"num_merged_bins",
"if",
"j",
"==",
"num_quantized_bins",
"-",
"1",
":",
"stop",
"=",
"len",
"(",
"is_nonzeros",
")",
"else",
":",
"stop",
"=",
"start",
"+",
"num_merged_bins",
"norm",
"=",
"is_nonzeros",
"[",
"start",
":",
"stop",
"]",
".",
"sum",
"(",
")",
"if",
"norm",
"!=",
"0",
":",
"q",
"[",
"start",
":",
"stop",
"]",
"=",
"float",
"(",
"quantized_bins",
"[",
"j",
"]",
")",
"/",
"float",
"(",
"norm",
")",
"q",
"[",
"p",
"==",
"0",
"]",
"=",
"0",
"p",
"=",
"_smooth_distribution",
"(",
"p",
")",
"# There is a chance that q is an invalid probability distribution.",
"try",
":",
"q",
"=",
"_smooth_distribution",
"(",
"q",
")",
"except",
"ValueError",
":",
"divergence",
"[",
"i",
"-",
"num_half_quantized_bins",
"]",
"=",
"float",
"(",
"\"inf\"",
")",
"divergence",
"[",
"i",
"-",
"num_half_quantized_bins",
"]",
"=",
"stats",
".",
"entropy",
"(",
"p",
",",
"q",
")",
"min_divergence_idx",
"=",
"np",
".",
"argmin",
"(",
"divergence",
")",
"min_divergence",
"=",
"divergence",
"[",
"min_divergence_idx",
"]",
"opt_th",
"=",
"thresholds",
"[",
"min_divergence_idx",
"]",
"return",
"min_val",
",",
"max_val",
",",
"min_divergence",
",",
"opt_th"
] |
Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
|
[
"Given",
"a",
"dataset",
"find",
"the",
"optimal",
"threshold",
"for",
"quantizing",
"it",
".",
"The",
"reference",
"distribution",
"is",
"q",
"and",
"the",
"candidate",
"distribution",
"is",
"p",
".",
"q",
"is",
"a",
"truncated",
"version",
"of",
"the",
"original",
"distribution",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L261-L351
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_get_optimal_thresholds
|
def _get_optimal_thresholds(nd_dict, quantized_dtype, num_bins=8001, num_quantized_bins=255, logger=None):
"""Given a ndarray dict, find the optimal threshold for quantizing each value of the key."""
if stats is None:
raise ImportError('scipy.stats is required for running entropy mode of calculating'
' the optimal thresholds for quantizing FP32 ndarrays into int8.'
' Please check if the scipy python bindings are installed.')
assert isinstance(nd_dict, dict)
if logger is not None:
logger.info('Calculating optimal thresholds for quantization using KL divergence'
' with num_bins=%d and num_quantized_bins=%d' % (num_bins, num_quantized_bins))
th_dict = {}
# copy nd_dict keys since the keys() only returns a view in python3
layer_names = list(nd_dict.keys())
for name in layer_names:
assert name in nd_dict
min_val, max_val, min_divergence, opt_th = \
_get_optimal_threshold(nd_dict[name], quantized_dtype, num_bins=num_bins,
num_quantized_bins=num_quantized_bins)
del nd_dict[name] # release the memory of ndarray
if min_val < 0:
th_dict[name] = (-opt_th, opt_th)
else:
th_dict[name] = (0, opt_th)
if logger is not None:
logger.info('layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f'
% (name, min_val, max_val, min_divergence, opt_th))
return th_dict
|
python
|
def _get_optimal_thresholds(nd_dict, quantized_dtype, num_bins=8001, num_quantized_bins=255, logger=None):
"""Given a ndarray dict, find the optimal threshold for quantizing each value of the key."""
if stats is None:
raise ImportError('scipy.stats is required for running entropy mode of calculating'
' the optimal thresholds for quantizing FP32 ndarrays into int8.'
' Please check if the scipy python bindings are installed.')
assert isinstance(nd_dict, dict)
if logger is not None:
logger.info('Calculating optimal thresholds for quantization using KL divergence'
' with num_bins=%d and num_quantized_bins=%d' % (num_bins, num_quantized_bins))
th_dict = {}
# copy nd_dict keys since the keys() only returns a view in python3
layer_names = list(nd_dict.keys())
for name in layer_names:
assert name in nd_dict
min_val, max_val, min_divergence, opt_th = \
_get_optimal_threshold(nd_dict[name], quantized_dtype, num_bins=num_bins,
num_quantized_bins=num_quantized_bins)
del nd_dict[name] # release the memory of ndarray
if min_val < 0:
th_dict[name] = (-opt_th, opt_th)
else:
th_dict[name] = (0, opt_th)
if logger is not None:
logger.info('layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f'
% (name, min_val, max_val, min_divergence, opt_th))
return th_dict
|
[
"def",
"_get_optimal_thresholds",
"(",
"nd_dict",
",",
"quantized_dtype",
",",
"num_bins",
"=",
"8001",
",",
"num_quantized_bins",
"=",
"255",
",",
"logger",
"=",
"None",
")",
":",
"if",
"stats",
"is",
"None",
":",
"raise",
"ImportError",
"(",
"'scipy.stats is required for running entropy mode of calculating'",
"' the optimal thresholds for quantizing FP32 ndarrays into int8.'",
"' Please check if the scipy python bindings are installed.'",
")",
"assert",
"isinstance",
"(",
"nd_dict",
",",
"dict",
")",
"if",
"logger",
"is",
"not",
"None",
":",
"logger",
".",
"info",
"(",
"'Calculating optimal thresholds for quantization using KL divergence'",
"' with num_bins=%d and num_quantized_bins=%d'",
"%",
"(",
"num_bins",
",",
"num_quantized_bins",
")",
")",
"th_dict",
"=",
"{",
"}",
"# copy nd_dict keys since the keys() only returns a view in python3",
"layer_names",
"=",
"list",
"(",
"nd_dict",
".",
"keys",
"(",
")",
")",
"for",
"name",
"in",
"layer_names",
":",
"assert",
"name",
"in",
"nd_dict",
"min_val",
",",
"max_val",
",",
"min_divergence",
",",
"opt_th",
"=",
"_get_optimal_threshold",
"(",
"nd_dict",
"[",
"name",
"]",
",",
"quantized_dtype",
",",
"num_bins",
"=",
"num_bins",
",",
"num_quantized_bins",
"=",
"num_quantized_bins",
")",
"del",
"nd_dict",
"[",
"name",
"]",
"# release the memory of ndarray",
"if",
"min_val",
"<",
"0",
":",
"th_dict",
"[",
"name",
"]",
"=",
"(",
"-",
"opt_th",
",",
"opt_th",
")",
"else",
":",
"th_dict",
"[",
"name",
"]",
"=",
"(",
"0",
",",
"opt_th",
")",
"if",
"logger",
"is",
"not",
"None",
":",
"logger",
".",
"info",
"(",
"'layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f'",
"%",
"(",
"name",
",",
"min_val",
",",
"max_val",
",",
"min_divergence",
",",
"opt_th",
")",
")",
"return",
"th_dict"
] |
Given a ndarray dict, find the optimal threshold for quantizing each value of the key.
|
[
"Given",
"a",
"ndarray",
"dict",
"find",
"the",
"optimal",
"threshold",
"for",
"quantizing",
"each",
"value",
"of",
"the",
"key",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L355-L381
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_load_sym
|
def _load_sym(sym, logger=logging):
"""Given a str as a path the symbol .json file or a symbol, returns a Symbol object."""
if isinstance(sym, str): # sym is a symbol file path
cur_path = os.path.dirname(os.path.realpath(__file__))
symbol_file_path = os.path.join(cur_path, sym)
logger.info('Loading symbol from file %s' % symbol_file_path)
return sym_load(symbol_file_path)
elif isinstance(sym, Symbol):
return sym
else:
raise ValueError('_load_sym only accepts Symbol or path to the symbol file,'
' while received type %s' % str(type(sym)))
|
python
|
def _load_sym(sym, logger=logging):
"""Given a str as a path the symbol .json file or a symbol, returns a Symbol object."""
if isinstance(sym, str): # sym is a symbol file path
cur_path = os.path.dirname(os.path.realpath(__file__))
symbol_file_path = os.path.join(cur_path, sym)
logger.info('Loading symbol from file %s' % symbol_file_path)
return sym_load(symbol_file_path)
elif isinstance(sym, Symbol):
return sym
else:
raise ValueError('_load_sym only accepts Symbol or path to the symbol file,'
' while received type %s' % str(type(sym)))
|
[
"def",
"_load_sym",
"(",
"sym",
",",
"logger",
"=",
"logging",
")",
":",
"if",
"isinstance",
"(",
"sym",
",",
"str",
")",
":",
"# sym is a symbol file path",
"cur_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"symbol_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cur_path",
",",
"sym",
")",
"logger",
".",
"info",
"(",
"'Loading symbol from file %s'",
"%",
"symbol_file_path",
")",
"return",
"sym_load",
"(",
"symbol_file_path",
")",
"elif",
"isinstance",
"(",
"sym",
",",
"Symbol",
")",
":",
"return",
"sym",
"else",
":",
"raise",
"ValueError",
"(",
"'_load_sym only accepts Symbol or path to the symbol file,'",
"' while received type %s'",
"%",
"str",
"(",
"type",
"(",
"sym",
")",
")",
")"
] |
Given a str as a path the symbol .json file or a symbol, returns a Symbol object.
|
[
"Given",
"a",
"str",
"as",
"a",
"path",
"the",
"symbol",
".",
"json",
"file",
"or",
"a",
"symbol",
"returns",
"a",
"Symbol",
"object",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L384-L395
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_load_params
|
def _load_params(params, logger=logging):
"""Given a str as a path to the .params file or a pair of params,
returns two dictionaries representing arg_params and aux_params.
"""
if isinstance(params, str):
cur_path = os.path.dirname(os.path.realpath(__file__))
param_file_path = os.path.join(cur_path, params)
logger.info('Loading params from file %s' % param_file_path)
save_dict = nd_load(param_file_path)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params
elif isinstance(params, (tuple, list)) and len(params) == 2:
return params[0], params[1]
else:
raise ValueError('Unsupported params provided. Must be either a path to the param file or'
' a pair of dictionaries representing arg_params and aux_params')
|
python
|
def _load_params(params, logger=logging):
"""Given a str as a path to the .params file or a pair of params,
returns two dictionaries representing arg_params and aux_params.
"""
if isinstance(params, str):
cur_path = os.path.dirname(os.path.realpath(__file__))
param_file_path = os.path.join(cur_path, params)
logger.info('Loading params from file %s' % param_file_path)
save_dict = nd_load(param_file_path)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params
elif isinstance(params, (tuple, list)) and len(params) == 2:
return params[0], params[1]
else:
raise ValueError('Unsupported params provided. Must be either a path to the param file or'
' a pair of dictionaries representing arg_params and aux_params')
|
[
"def",
"_load_params",
"(",
"params",
",",
"logger",
"=",
"logging",
")",
":",
"if",
"isinstance",
"(",
"params",
",",
"str",
")",
":",
"cur_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"param_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cur_path",
",",
"params",
")",
"logger",
".",
"info",
"(",
"'Loading params from file %s'",
"%",
"param_file_path",
")",
"save_dict",
"=",
"nd_load",
"(",
"param_file_path",
")",
"arg_params",
"=",
"{",
"}",
"aux_params",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"save_dict",
".",
"items",
"(",
")",
":",
"tp",
",",
"name",
"=",
"k",
".",
"split",
"(",
"':'",
",",
"1",
")",
"if",
"tp",
"==",
"'arg'",
":",
"arg_params",
"[",
"name",
"]",
"=",
"v",
"if",
"tp",
"==",
"'aux'",
":",
"aux_params",
"[",
"name",
"]",
"=",
"v",
"return",
"arg_params",
",",
"aux_params",
"elif",
"isinstance",
"(",
"params",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"len",
"(",
"params",
")",
"==",
"2",
":",
"return",
"params",
"[",
"0",
"]",
",",
"params",
"[",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported params provided. Must be either a path to the param file or'",
"' a pair of dictionaries representing arg_params and aux_params'",
")"
] |
Given a str as a path to the .params file or a pair of params,
returns two dictionaries representing arg_params and aux_params.
|
[
"Given",
"a",
"str",
"as",
"a",
"path",
"to",
"the",
".",
"params",
"file",
"or",
"a",
"pair",
"of",
"params",
"returns",
"two",
"dictionaries",
"representing",
"arg_params",
"and",
"aux_params",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L398-L420
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
quantize_model
|
def quantize_model(sym, arg_params, aux_params,
data_names=('data',), label_names=('softmax_label',),
ctx=cpu(), excluded_sym_names=None, calib_mode='entropy',
calib_data=None, num_calib_examples=None, calib_layer=None,
quantized_dtype='int8', logger=logging):
"""User-level API for generating a quantized model from a FP32 model w/ or w/o calibration.
The backend quantized operators are only enabled for Linux systems. Please do not run
inference using the quantized models on Windows for now.
The quantization implementation adopts the TensorFlow's approach:
https://www.tensorflow.org/performance/quantization.
The calibration implementation borrows the idea of Nvidia's 8-bit Inference with TensorRT:
http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
and adapts the method to MXNet.
Parameters
----------
sym : str or Symbol
Defines the structure of a neural network for FP32 data types.
arg_params : dict
Dictionary of name to `NDArray`.
aux_params : dict
Dictionary of name to `NDArray`.
data_names : a list of strs
Data names required for creating a Module object to run forward propagation on the
calibration dataset.
label_names : a list of strs
Label names required for creating a Module object to run forward propagation on the
calibration dataset.
ctx : Context
Defines the device that users want to run forward propagation on the calibration
dataset for collecting layer output statistics. Currently, only supports single context.
excluded_sym_names : list of strings
A list of strings representing the names of the symbols that users want to excluding
from being quantized.
calib_mode : str
If calib_mode='none', no calibration will be used and the thresholds for
requantization after the corresponding layers will be calculated at runtime by
calling min and max operators. The quantized models generated in this
mode are normally 10-20% slower than those with calibrations during inference.
If calib_mode='naive', the min and max values of the layer outputs from a calibration
dataset will be directly taken as the thresholds for quantization.
If calib_mode='entropy' (default mode), the thresholds for quantization will be
derived such that the KL divergence between the distributions of FP32 layer outputs and
quantized layer outputs is minimized based upon the calibration dataset.
calib_data : DataIter
A data iterator initialized by the calibration dataset.
num_calib_examples : int or None
The maximum number of examples that user would like to use for calibration. If not provided,
the whole calibration dataset will be used.
calib_layer : function
Given a layer's output name in string, return True or False for deciding whether to
calibrate this layer. If yes, the statistics of the layer's output will be collected;
otherwise, no information of the layer's output will be collected. If not provided,
all the layers' outputs that need requantization will be collected.
quantized_dtype : str
The quantized destination type for input data. Currently support 'int8'
, 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result.
Default value is 'int8'.
logger : Object
A logging object for printing information during the process of quantization.
Returns
-------
tuple
A tuple of quantized symbol, quantized arg_params, and aux_params.
-------
"""
if excluded_sym_names is None:
excluded_sym_names = []
if not isinstance(excluded_sym_names, list):
raise ValueError('excluded_sym_names must be a list of strings representing'
' the names of the symbols that will not be quantized,'
' while received type %s' % str(type(excluded_sym_names)))
logger.info('Quantizing symbol')
if quantized_dtype not in ('int8', 'uint8', 'auto'):
raise ValueError('unknown quantized_dtype %s received,'
' expected `int8`, `uint8` or `auto`' % quantized_dtype)
qsym = _quantize_symbol(sym, excluded_symbols=excluded_sym_names,
offline_params=list(arg_params.keys()),
quantized_dtype=quantized_dtype)
th_dict = {}
if calib_mode is not None and calib_mode != 'none':
if not isinstance(ctx, Context):
raise ValueError('currently only supports single ctx, while received %s' % str(ctx))
if calib_data is None:
raise ValueError('calib_data must be provided when calib_mode=%s' % calib_mode)
if not isinstance(calib_data, DataIter):
raise ValueError('calib_data must be of DataIter type when calib_mode=%s,'
' while received type %s' % (calib_mode, str(type(calib_data))))
mod = Module(symbol=sym, data_names=data_names, label_names=label_names, context=ctx)
if len(calib_data.provide_label) > 0:
mod.bind(for_training=False, data_shapes=calib_data.provide_data,
label_shapes=calib_data.provide_label)
else:
mod.bind(for_training=False, data_shapes=calib_data.provide_data)
mod.set_params(arg_params, aux_params)
if calib_mode == 'entropy':
nd_dict, num_examples = _collect_layer_outputs(mod, calib_data,
include_layer=calib_layer,
max_num_examples=num_calib_examples,
logger=logger)
logger.info('Collected layer outputs from FP32 model using %d examples' % num_examples)
logger.info('Calculating optimal thresholds for quantization')
th_dict = _get_optimal_thresholds(nd_dict, quantized_dtype, logger=logger)
elif calib_mode == 'naive':
th_dict, num_examples = _collect_layer_output_min_max(
mod, calib_data, include_layer=calib_layer, max_num_examples=num_calib_examples,
logger=logger)
logger.info('Collected layer output min/max values from FP32 model using %d examples'
% num_examples)
else:
raise ValueError('unknown calibration mode %s received,'
' expected `none`, `naive`, or `entropy`' % calib_mode)
logger.info('Calibrating quantized symbol')
qsym = _calibrate_quantized_sym(qsym, th_dict)
logger.info('Quantizing parameters')
qarg_params = _quantize_params(qsym, arg_params, th_dict)
return qsym, qarg_params, aux_params
|
python
|
def quantize_model(sym, arg_params, aux_params,
data_names=('data',), label_names=('softmax_label',),
ctx=cpu(), excluded_sym_names=None, calib_mode='entropy',
calib_data=None, num_calib_examples=None, calib_layer=None,
quantized_dtype='int8', logger=logging):
"""User-level API for generating a quantized model from a FP32 model w/ or w/o calibration.
The backend quantized operators are only enabled for Linux systems. Please do not run
inference using the quantized models on Windows for now.
The quantization implementation adopts the TensorFlow's approach:
https://www.tensorflow.org/performance/quantization.
The calibration implementation borrows the idea of Nvidia's 8-bit Inference with TensorRT:
http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
and adapts the method to MXNet.
Parameters
----------
sym : str or Symbol
Defines the structure of a neural network for FP32 data types.
arg_params : dict
Dictionary of name to `NDArray`.
aux_params : dict
Dictionary of name to `NDArray`.
data_names : a list of strs
Data names required for creating a Module object to run forward propagation on the
calibration dataset.
label_names : a list of strs
Label names required for creating a Module object to run forward propagation on the
calibration dataset.
ctx : Context
Defines the device that users want to run forward propagation on the calibration
dataset for collecting layer output statistics. Currently, only supports single context.
excluded_sym_names : list of strings
A list of strings representing the names of the symbols that users want to excluding
from being quantized.
calib_mode : str
If calib_mode='none', no calibration will be used and the thresholds for
requantization after the corresponding layers will be calculated at runtime by
calling min and max operators. The quantized models generated in this
mode are normally 10-20% slower than those with calibrations during inference.
If calib_mode='naive', the min and max values of the layer outputs from a calibration
dataset will be directly taken as the thresholds for quantization.
If calib_mode='entropy' (default mode), the thresholds for quantization will be
derived such that the KL divergence between the distributions of FP32 layer outputs and
quantized layer outputs is minimized based upon the calibration dataset.
calib_data : DataIter
A data iterator initialized by the calibration dataset.
num_calib_examples : int or None
The maximum number of examples that user would like to use for calibration. If not provided,
the whole calibration dataset will be used.
calib_layer : function
Given a layer's output name in string, return True or False for deciding whether to
calibrate this layer. If yes, the statistics of the layer's output will be collected;
otherwise, no information of the layer's output will be collected. If not provided,
all the layers' outputs that need requantization will be collected.
quantized_dtype : str
The quantized destination type for input data. Currently support 'int8'
, 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result.
Default value is 'int8'.
logger : Object
A logging object for printing information during the process of quantization.
Returns
-------
tuple
A tuple of quantized symbol, quantized arg_params, and aux_params.
-------
"""
if excluded_sym_names is None:
excluded_sym_names = []
if not isinstance(excluded_sym_names, list):
raise ValueError('excluded_sym_names must be a list of strings representing'
' the names of the symbols that will not be quantized,'
' while received type %s' % str(type(excluded_sym_names)))
logger.info('Quantizing symbol')
if quantized_dtype not in ('int8', 'uint8', 'auto'):
raise ValueError('unknown quantized_dtype %s received,'
' expected `int8`, `uint8` or `auto`' % quantized_dtype)
qsym = _quantize_symbol(sym, excluded_symbols=excluded_sym_names,
offline_params=list(arg_params.keys()),
quantized_dtype=quantized_dtype)
th_dict = {}
if calib_mode is not None and calib_mode != 'none':
if not isinstance(ctx, Context):
raise ValueError('currently only supports single ctx, while received %s' % str(ctx))
if calib_data is None:
raise ValueError('calib_data must be provided when calib_mode=%s' % calib_mode)
if not isinstance(calib_data, DataIter):
raise ValueError('calib_data must be of DataIter type when calib_mode=%s,'
' while received type %s' % (calib_mode, str(type(calib_data))))
mod = Module(symbol=sym, data_names=data_names, label_names=label_names, context=ctx)
if len(calib_data.provide_label) > 0:
mod.bind(for_training=False, data_shapes=calib_data.provide_data,
label_shapes=calib_data.provide_label)
else:
mod.bind(for_training=False, data_shapes=calib_data.provide_data)
mod.set_params(arg_params, aux_params)
if calib_mode == 'entropy':
nd_dict, num_examples = _collect_layer_outputs(mod, calib_data,
include_layer=calib_layer,
max_num_examples=num_calib_examples,
logger=logger)
logger.info('Collected layer outputs from FP32 model using %d examples' % num_examples)
logger.info('Calculating optimal thresholds for quantization')
th_dict = _get_optimal_thresholds(nd_dict, quantized_dtype, logger=logger)
elif calib_mode == 'naive':
th_dict, num_examples = _collect_layer_output_min_max(
mod, calib_data, include_layer=calib_layer, max_num_examples=num_calib_examples,
logger=logger)
logger.info('Collected layer output min/max values from FP32 model using %d examples'
% num_examples)
else:
raise ValueError('unknown calibration mode %s received,'
' expected `none`, `naive`, or `entropy`' % calib_mode)
logger.info('Calibrating quantized symbol')
qsym = _calibrate_quantized_sym(qsym, th_dict)
logger.info('Quantizing parameters')
qarg_params = _quantize_params(qsym, arg_params, th_dict)
return qsym, qarg_params, aux_params
|
[
"def",
"quantize_model",
"(",
"sym",
",",
"arg_params",
",",
"aux_params",
",",
"data_names",
"=",
"(",
"'data'",
",",
")",
",",
"label_names",
"=",
"(",
"'softmax_label'",
",",
")",
",",
"ctx",
"=",
"cpu",
"(",
")",
",",
"excluded_sym_names",
"=",
"None",
",",
"calib_mode",
"=",
"'entropy'",
",",
"calib_data",
"=",
"None",
",",
"num_calib_examples",
"=",
"None",
",",
"calib_layer",
"=",
"None",
",",
"quantized_dtype",
"=",
"'int8'",
",",
"logger",
"=",
"logging",
")",
":",
"if",
"excluded_sym_names",
"is",
"None",
":",
"excluded_sym_names",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"excluded_sym_names",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"'excluded_sym_names must be a list of strings representing'",
"' the names of the symbols that will not be quantized,'",
"' while received type %s'",
"%",
"str",
"(",
"type",
"(",
"excluded_sym_names",
")",
")",
")",
"logger",
".",
"info",
"(",
"'Quantizing symbol'",
")",
"if",
"quantized_dtype",
"not",
"in",
"(",
"'int8'",
",",
"'uint8'",
",",
"'auto'",
")",
":",
"raise",
"ValueError",
"(",
"'unknown quantized_dtype %s received,'",
"' expected `int8`, `uint8` or `auto`'",
"%",
"quantized_dtype",
")",
"qsym",
"=",
"_quantize_symbol",
"(",
"sym",
",",
"excluded_symbols",
"=",
"excluded_sym_names",
",",
"offline_params",
"=",
"list",
"(",
"arg_params",
".",
"keys",
"(",
")",
")",
",",
"quantized_dtype",
"=",
"quantized_dtype",
")",
"th_dict",
"=",
"{",
"}",
"if",
"calib_mode",
"is",
"not",
"None",
"and",
"calib_mode",
"!=",
"'none'",
":",
"if",
"not",
"isinstance",
"(",
"ctx",
",",
"Context",
")",
":",
"raise",
"ValueError",
"(",
"'currently only supports single ctx, while received %s'",
"%",
"str",
"(",
"ctx",
")",
")",
"if",
"calib_data",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'calib_data must be provided when calib_mode=%s'",
"%",
"calib_mode",
")",
"if",
"not",
"isinstance",
"(",
"calib_data",
",",
"DataIter",
")",
":",
"raise",
"ValueError",
"(",
"'calib_data must be of DataIter type when calib_mode=%s,'",
"' while received type %s'",
"%",
"(",
"calib_mode",
",",
"str",
"(",
"type",
"(",
"calib_data",
")",
")",
")",
")",
"mod",
"=",
"Module",
"(",
"symbol",
"=",
"sym",
",",
"data_names",
"=",
"data_names",
",",
"label_names",
"=",
"label_names",
",",
"context",
"=",
"ctx",
")",
"if",
"len",
"(",
"calib_data",
".",
"provide_label",
")",
">",
"0",
":",
"mod",
".",
"bind",
"(",
"for_training",
"=",
"False",
",",
"data_shapes",
"=",
"calib_data",
".",
"provide_data",
",",
"label_shapes",
"=",
"calib_data",
".",
"provide_label",
")",
"else",
":",
"mod",
".",
"bind",
"(",
"for_training",
"=",
"False",
",",
"data_shapes",
"=",
"calib_data",
".",
"provide_data",
")",
"mod",
".",
"set_params",
"(",
"arg_params",
",",
"aux_params",
")",
"if",
"calib_mode",
"==",
"'entropy'",
":",
"nd_dict",
",",
"num_examples",
"=",
"_collect_layer_outputs",
"(",
"mod",
",",
"calib_data",
",",
"include_layer",
"=",
"calib_layer",
",",
"max_num_examples",
"=",
"num_calib_examples",
",",
"logger",
"=",
"logger",
")",
"logger",
".",
"info",
"(",
"'Collected layer outputs from FP32 model using %d examples'",
"%",
"num_examples",
")",
"logger",
".",
"info",
"(",
"'Calculating optimal thresholds for quantization'",
")",
"th_dict",
"=",
"_get_optimal_thresholds",
"(",
"nd_dict",
",",
"quantized_dtype",
",",
"logger",
"=",
"logger",
")",
"elif",
"calib_mode",
"==",
"'naive'",
":",
"th_dict",
",",
"num_examples",
"=",
"_collect_layer_output_min_max",
"(",
"mod",
",",
"calib_data",
",",
"include_layer",
"=",
"calib_layer",
",",
"max_num_examples",
"=",
"num_calib_examples",
",",
"logger",
"=",
"logger",
")",
"logger",
".",
"info",
"(",
"'Collected layer output min/max values from FP32 model using %d examples'",
"%",
"num_examples",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'unknown calibration mode %s received,'",
"' expected `none`, `naive`, or `entropy`'",
"%",
"calib_mode",
")",
"logger",
".",
"info",
"(",
"'Calibrating quantized symbol'",
")",
"qsym",
"=",
"_calibrate_quantized_sym",
"(",
"qsym",
",",
"th_dict",
")",
"logger",
".",
"info",
"(",
"'Quantizing parameters'",
")",
"qarg_params",
"=",
"_quantize_params",
"(",
"qsym",
",",
"arg_params",
",",
"th_dict",
")",
"return",
"qsym",
",",
"qarg_params",
",",
"aux_params"
] |
User-level API for generating a quantized model from a FP32 model w/ or w/o calibration.
The backend quantized operators are only enabled for Linux systems. Please do not run
inference using the quantized models on Windows for now.
The quantization implementation adopts the TensorFlow's approach:
https://www.tensorflow.org/performance/quantization.
The calibration implementation borrows the idea of Nvidia's 8-bit Inference with TensorRT:
http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
and adapts the method to MXNet.
Parameters
----------
sym : str or Symbol
Defines the structure of a neural network for FP32 data types.
arg_params : dict
Dictionary of name to `NDArray`.
aux_params : dict
Dictionary of name to `NDArray`.
data_names : a list of strs
Data names required for creating a Module object to run forward propagation on the
calibration dataset.
label_names : a list of strs
Label names required for creating a Module object to run forward propagation on the
calibration dataset.
ctx : Context
Defines the device that users want to run forward propagation on the calibration
dataset for collecting layer output statistics. Currently, only supports single context.
excluded_sym_names : list of strings
A list of strings representing the names of the symbols that users want to excluding
from being quantized.
calib_mode : str
If calib_mode='none', no calibration will be used and the thresholds for
requantization after the corresponding layers will be calculated at runtime by
calling min and max operators. The quantized models generated in this
mode are normally 10-20% slower than those with calibrations during inference.
If calib_mode='naive', the min and max values of the layer outputs from a calibration
dataset will be directly taken as the thresholds for quantization.
If calib_mode='entropy' (default mode), the thresholds for quantization will be
derived such that the KL divergence between the distributions of FP32 layer outputs and
quantized layer outputs is minimized based upon the calibration dataset.
calib_data : DataIter
A data iterator initialized by the calibration dataset.
num_calib_examples : int or None
The maximum number of examples that user would like to use for calibration. If not provided,
the whole calibration dataset will be used.
calib_layer : function
Given a layer's output name in string, return True or False for deciding whether to
calibrate this layer. If yes, the statistics of the layer's output will be collected;
otherwise, no information of the layer's output will be collected. If not provided,
all the layers' outputs that need requantization will be collected.
quantized_dtype : str
The quantized destination type for input data. Currently support 'int8'
, 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result.
Default value is 'int8'.
logger : Object
A logging object for printing information during the process of quantization.
Returns
-------
tuple
A tuple of quantized symbol, quantized arg_params, and aux_params.
-------
|
[
"User",
"-",
"level",
"API",
"for",
"generating",
"a",
"quantized",
"model",
"from",
"a",
"FP32",
"model",
"w",
"/",
"or",
"w",
"/",
"o",
"calibration",
".",
"The",
"backend",
"quantized",
"operators",
"are",
"only",
"enabled",
"for",
"Linux",
"systems",
".",
"Please",
"do",
"not",
"run",
"inference",
"using",
"the",
"quantized",
"models",
"on",
"Windows",
"for",
"now",
".",
"The",
"quantization",
"implementation",
"adopts",
"the",
"TensorFlow",
"s",
"approach",
":",
"https",
":",
"//",
"www",
".",
"tensorflow",
".",
"org",
"/",
"performance",
"/",
"quantization",
".",
"The",
"calibration",
"implementation",
"borrows",
"the",
"idea",
"of",
"Nvidia",
"s",
"8",
"-",
"bit",
"Inference",
"with",
"TensorRT",
":",
"http",
":",
"//",
"on",
"-",
"demand",
".",
"gputechconf",
".",
"com",
"/",
"gtc",
"/",
"2017",
"/",
"presentation",
"/",
"s7310",
"-",
"8",
"-",
"bit",
"-",
"inference",
"-",
"with",
"-",
"tensorrt",
".",
"pdf",
"and",
"adapts",
"the",
"method",
"to",
"MXNet",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L422-L544
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_LayerOutputCollector.collect
|
def collect(self, name, arr):
"""Callback function for collecting layer output NDArrays."""
name = py_str(name)
if self.include_layer is not None and not self.include_layer(name):
return
handle = ctypes.cast(arr, NDArrayHandle)
arr = NDArray(handle, writable=False).copyto(cpu())
if self.logger is not None:
self.logger.info("Collecting layer %s output of shape %s" % (name, arr.shape))
if name in self.nd_dict:
self.nd_dict[name].append(arr)
else:
self.nd_dict[name] = [arr]
|
python
|
def collect(self, name, arr):
"""Callback function for collecting layer output NDArrays."""
name = py_str(name)
if self.include_layer is not None and not self.include_layer(name):
return
handle = ctypes.cast(arr, NDArrayHandle)
arr = NDArray(handle, writable=False).copyto(cpu())
if self.logger is not None:
self.logger.info("Collecting layer %s output of shape %s" % (name, arr.shape))
if name in self.nd_dict:
self.nd_dict[name].append(arr)
else:
self.nd_dict[name] = [arr]
|
[
"def",
"collect",
"(",
"self",
",",
"name",
",",
"arr",
")",
":",
"name",
"=",
"py_str",
"(",
"name",
")",
"if",
"self",
".",
"include_layer",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"include_layer",
"(",
"name",
")",
":",
"return",
"handle",
"=",
"ctypes",
".",
"cast",
"(",
"arr",
",",
"NDArrayHandle",
")",
"arr",
"=",
"NDArray",
"(",
"handle",
",",
"writable",
"=",
"False",
")",
".",
"copyto",
"(",
"cpu",
"(",
")",
")",
"if",
"self",
".",
"logger",
"is",
"not",
"None",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Collecting layer %s output of shape %s\"",
"%",
"(",
"name",
",",
"arr",
".",
"shape",
")",
")",
"if",
"name",
"in",
"self",
".",
"nd_dict",
":",
"self",
".",
"nd_dict",
"[",
"name",
"]",
".",
"append",
"(",
"arr",
")",
"else",
":",
"self",
".",
"nd_dict",
"[",
"name",
"]",
"=",
"[",
"arr",
"]"
] |
Callback function for collecting layer output NDArrays.
|
[
"Callback",
"function",
"for",
"collecting",
"layer",
"output",
"NDArrays",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L137-L149
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/quantization.py
|
_LayerOutputMinMaxCollector.collect
|
def collect(self, name, arr):
"""Callback function for collecting min and max values from an NDArray."""
name = py_str(name)
if self.include_layer is not None and not self.include_layer(name):
return
handle = ctypes.cast(arr, NDArrayHandle)
arr = NDArray(handle, writable=False)
min_range = ndarray.min(arr).asscalar()
max_range = ndarray.max(arr).asscalar()
if name in self.min_max_dict:
cur_min_max = self.min_max_dict[name]
self.min_max_dict[name] = (min(cur_min_max[0], min_range),
max(cur_min_max[1], max_range))
else:
self.min_max_dict[name] = (min_range, max_range)
if self.logger is not None:
self.logger.info("Collecting layer %s min_range=%f, max_range=%f"
% (name, min_range, max_range))
|
python
|
def collect(self, name, arr):
"""Callback function for collecting min and max values from an NDArray."""
name = py_str(name)
if self.include_layer is not None and not self.include_layer(name):
return
handle = ctypes.cast(arr, NDArrayHandle)
arr = NDArray(handle, writable=False)
min_range = ndarray.min(arr).asscalar()
max_range = ndarray.max(arr).asscalar()
if name in self.min_max_dict:
cur_min_max = self.min_max_dict[name]
self.min_max_dict[name] = (min(cur_min_max[0], min_range),
max(cur_min_max[1], max_range))
else:
self.min_max_dict[name] = (min_range, max_range)
if self.logger is not None:
self.logger.info("Collecting layer %s min_range=%f, max_range=%f"
% (name, min_range, max_range))
|
[
"def",
"collect",
"(",
"self",
",",
"name",
",",
"arr",
")",
":",
"name",
"=",
"py_str",
"(",
"name",
")",
"if",
"self",
".",
"include_layer",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"include_layer",
"(",
"name",
")",
":",
"return",
"handle",
"=",
"ctypes",
".",
"cast",
"(",
"arr",
",",
"NDArrayHandle",
")",
"arr",
"=",
"NDArray",
"(",
"handle",
",",
"writable",
"=",
"False",
")",
"min_range",
"=",
"ndarray",
".",
"min",
"(",
"arr",
")",
".",
"asscalar",
"(",
")",
"max_range",
"=",
"ndarray",
".",
"max",
"(",
"arr",
")",
".",
"asscalar",
"(",
")",
"if",
"name",
"in",
"self",
".",
"min_max_dict",
":",
"cur_min_max",
"=",
"self",
".",
"min_max_dict",
"[",
"name",
"]",
"self",
".",
"min_max_dict",
"[",
"name",
"]",
"=",
"(",
"min",
"(",
"cur_min_max",
"[",
"0",
"]",
",",
"min_range",
")",
",",
"max",
"(",
"cur_min_max",
"[",
"1",
"]",
",",
"max_range",
")",
")",
"else",
":",
"self",
".",
"min_max_dict",
"[",
"name",
"]",
"=",
"(",
"min_range",
",",
"max_range",
")",
"if",
"self",
".",
"logger",
"is",
"not",
"None",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Collecting layer %s min_range=%f, max_range=%f\"",
"%",
"(",
"name",
",",
"min_range",
",",
"max_range",
")",
")"
] |
Callback function for collecting min and max values from an NDArray.
|
[
"Callback",
"function",
"for",
"collecting",
"min",
"and",
"max",
"values",
"from",
"an",
"NDArray",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L160-L177
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
encoder
|
def encoder(nef, z_dim, batch_size, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
'''The encoder is a CNN which takes 32x32 image as input
generates the 100 dimensional shape embedding as a sample from normal distribution
using predicted meand and variance
'''
BatchNorm = mx.sym.BatchNorm
data = mx.sym.Variable('data')
e1 = mx.sym.Convolution(data, name='enc1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef, no_bias=no_bias)
ebn1 = BatchNorm(e1, name='encbn1', fix_gamma=fix_gamma, eps=eps)
eact1 = mx.sym.LeakyReLU(ebn1, name='encact1', act_type='leaky', slope=0.2)
e2 = mx.sym.Convolution(eact1, name='enc2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*2, no_bias=no_bias)
ebn2 = BatchNorm(e2, name='encbn2', fix_gamma=fix_gamma, eps=eps)
eact2 = mx.sym.LeakyReLU(ebn2, name='encact2', act_type='leaky', slope=0.2)
e3 = mx.sym.Convolution(eact2, name='enc3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*4, no_bias=no_bias)
ebn3 = BatchNorm(e3, name='encbn3', fix_gamma=fix_gamma, eps=eps)
eact3 = mx.sym.LeakyReLU(ebn3, name='encact3', act_type='leaky', slope=0.2)
e4 = mx.sym.Convolution(eact3, name='enc4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*8, no_bias=no_bias)
ebn4 = BatchNorm(e4, name='encbn4', fix_gamma=fix_gamma, eps=eps)
eact4 = mx.sym.LeakyReLU(ebn4, name='encact4', act_type='leaky', slope=0.2)
eact4 = mx.sym.Flatten(eact4)
z_mu = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_mu")
z_lv = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_lv")
z = z_mu + mx.symbol.broadcast_mul(mx.symbol.exp(0.5*z_lv),mx.symbol.random_normal(loc=0, scale=1,shape=(batch_size,z_dim)))
return z_mu, z_lv, z
|
python
|
def encoder(nef, z_dim, batch_size, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
'''The encoder is a CNN which takes 32x32 image as input
generates the 100 dimensional shape embedding as a sample from normal distribution
using predicted meand and variance
'''
BatchNorm = mx.sym.BatchNorm
data = mx.sym.Variable('data')
e1 = mx.sym.Convolution(data, name='enc1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef, no_bias=no_bias)
ebn1 = BatchNorm(e1, name='encbn1', fix_gamma=fix_gamma, eps=eps)
eact1 = mx.sym.LeakyReLU(ebn1, name='encact1', act_type='leaky', slope=0.2)
e2 = mx.sym.Convolution(eact1, name='enc2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*2, no_bias=no_bias)
ebn2 = BatchNorm(e2, name='encbn2', fix_gamma=fix_gamma, eps=eps)
eact2 = mx.sym.LeakyReLU(ebn2, name='encact2', act_type='leaky', slope=0.2)
e3 = mx.sym.Convolution(eact2, name='enc3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*4, no_bias=no_bias)
ebn3 = BatchNorm(e3, name='encbn3', fix_gamma=fix_gamma, eps=eps)
eact3 = mx.sym.LeakyReLU(ebn3, name='encact3', act_type='leaky', slope=0.2)
e4 = mx.sym.Convolution(eact3, name='enc4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*8, no_bias=no_bias)
ebn4 = BatchNorm(e4, name='encbn4', fix_gamma=fix_gamma, eps=eps)
eact4 = mx.sym.LeakyReLU(ebn4, name='encact4', act_type='leaky', slope=0.2)
eact4 = mx.sym.Flatten(eact4)
z_mu = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_mu")
z_lv = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_lv")
z = z_mu + mx.symbol.broadcast_mul(mx.symbol.exp(0.5*z_lv),mx.symbol.random_normal(loc=0, scale=1,shape=(batch_size,z_dim)))
return z_mu, z_lv, z
|
[
"def",
"encoder",
"(",
"nef",
",",
"z_dim",
",",
"batch_size",
",",
"no_bias",
"=",
"True",
",",
"fix_gamma",
"=",
"True",
",",
"eps",
"=",
"1e-5",
"+",
"1e-12",
")",
":",
"BatchNorm",
"=",
"mx",
".",
"sym",
".",
"BatchNorm",
"data",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'data'",
")",
"e1",
"=",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"data",
",",
"name",
"=",
"'enc1'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"pad",
"=",
"(",
"2",
",",
"2",
")",
",",
"num_filter",
"=",
"nef",
",",
"no_bias",
"=",
"no_bias",
")",
"ebn1",
"=",
"BatchNorm",
"(",
"e1",
",",
"name",
"=",
"'encbn1'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"eact1",
"=",
"mx",
".",
"sym",
".",
"LeakyReLU",
"(",
"ebn1",
",",
"name",
"=",
"'encact1'",
",",
"act_type",
"=",
"'leaky'",
",",
"slope",
"=",
"0.2",
")",
"e2",
"=",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"eact1",
",",
"name",
"=",
"'enc2'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"pad",
"=",
"(",
"2",
",",
"2",
")",
",",
"num_filter",
"=",
"nef",
"*",
"2",
",",
"no_bias",
"=",
"no_bias",
")",
"ebn2",
"=",
"BatchNorm",
"(",
"e2",
",",
"name",
"=",
"'encbn2'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"eact2",
"=",
"mx",
".",
"sym",
".",
"LeakyReLU",
"(",
"ebn2",
",",
"name",
"=",
"'encact2'",
",",
"act_type",
"=",
"'leaky'",
",",
"slope",
"=",
"0.2",
")",
"e3",
"=",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"eact2",
",",
"name",
"=",
"'enc3'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"pad",
"=",
"(",
"2",
",",
"2",
")",
",",
"num_filter",
"=",
"nef",
"*",
"4",
",",
"no_bias",
"=",
"no_bias",
")",
"ebn3",
"=",
"BatchNorm",
"(",
"e3",
",",
"name",
"=",
"'encbn3'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"eact3",
"=",
"mx",
".",
"sym",
".",
"LeakyReLU",
"(",
"ebn3",
",",
"name",
"=",
"'encact3'",
",",
"act_type",
"=",
"'leaky'",
",",
"slope",
"=",
"0.2",
")",
"e4",
"=",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"eact3",
",",
"name",
"=",
"'enc4'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"pad",
"=",
"(",
"2",
",",
"2",
")",
",",
"num_filter",
"=",
"nef",
"*",
"8",
",",
"no_bias",
"=",
"no_bias",
")",
"ebn4",
"=",
"BatchNorm",
"(",
"e4",
",",
"name",
"=",
"'encbn4'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"eact4",
"=",
"mx",
".",
"sym",
".",
"LeakyReLU",
"(",
"ebn4",
",",
"name",
"=",
"'encact4'",
",",
"act_type",
"=",
"'leaky'",
",",
"slope",
"=",
"0.2",
")",
"eact4",
"=",
"mx",
".",
"sym",
".",
"Flatten",
"(",
"eact4",
")",
"z_mu",
"=",
"mx",
".",
"sym",
".",
"FullyConnected",
"(",
"eact4",
",",
"num_hidden",
"=",
"z_dim",
",",
"name",
"=",
"\"enc_mu\"",
")",
"z_lv",
"=",
"mx",
".",
"sym",
".",
"FullyConnected",
"(",
"eact4",
",",
"num_hidden",
"=",
"z_dim",
",",
"name",
"=",
"\"enc_lv\"",
")",
"z",
"=",
"z_mu",
"+",
"mx",
".",
"symbol",
".",
"broadcast_mul",
"(",
"mx",
".",
"symbol",
".",
"exp",
"(",
"0.5",
"*",
"z_lv",
")",
",",
"mx",
".",
"symbol",
".",
"random_normal",
"(",
"loc",
"=",
"0",
",",
"scale",
"=",
"1",
",",
"shape",
"=",
"(",
"batch_size",
",",
"z_dim",
")",
")",
")",
"return",
"z_mu",
",",
"z_lv",
",",
"z"
] |
The encoder is a CNN which takes 32x32 image as input
generates the 100 dimensional shape embedding as a sample from normal distribution
using predicted meand and variance
|
[
"The",
"encoder",
"is",
"a",
"CNN",
"which",
"takes",
"32x32",
"image",
"as",
"input",
"generates",
"the",
"100",
"dimensional",
"shape",
"embedding",
"as",
"a",
"sample",
"from",
"normal",
"distribution",
"using",
"predicted",
"meand",
"and",
"variance"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L54-L86
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
generator
|
def generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim=100, activation='sigmoid'):
'''The genrator is a CNN which takes 100 dimensional embedding as input
and reconstructs the input image given to the encoder
'''
BatchNorm = mx.sym.BatchNorm
rand = mx.sym.Variable('rand')
rand = mx.sym.Reshape(rand, shape=(-1, z_dim, 1, 1))
g1 = mx.sym.Deconvolution(rand, name='gen1', kernel=(5,5), stride=(2,2),target_shape=(2,2), num_filter=ngf*8, no_bias=no_bias)
gbn1 = BatchNorm(g1, name='genbn1', fix_gamma=fix_gamma, eps=eps)
gact1 = mx.sym.Activation(gbn1, name="genact1", act_type="relu")
g2 = mx.sym.Deconvolution(gact1, name='gen2', kernel=(5,5), stride=(2,2),target_shape=(4,4), num_filter=ngf*4, no_bias=no_bias)
gbn2 = BatchNorm(g2, name='genbn2', fix_gamma=fix_gamma, eps=eps)
gact2 = mx.sym.Activation(gbn2, name='genact2', act_type='relu')
g3 = mx.sym.Deconvolution(gact2, name='gen3', kernel=(5,5), stride=(2,2), target_shape=(8,8), num_filter=ngf*2, no_bias=no_bias)
gbn3 = BatchNorm(g3, name='genbn3', fix_gamma=fix_gamma, eps=eps)
gact3 = mx.sym.Activation(gbn3, name='genact3', act_type='relu')
g4 = mx.sym.Deconvolution(gact3, name='gen4', kernel=(5,5), stride=(2,2), target_shape=(16,16), num_filter=ngf, no_bias=no_bias)
gbn4 = BatchNorm(g4, name='genbn4', fix_gamma=fix_gamma, eps=eps)
gact4 = mx.sym.Activation(gbn4, name='genact4', act_type='relu')
g5 = mx.sym.Deconvolution(gact4, name='gen5', kernel=(5,5), stride=(2,2), target_shape=(32,32), num_filter=nc, no_bias=no_bias)
gout = mx.sym.Activation(g5, name='genact5', act_type=activation)
return gout
|
python
|
def generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim=100, activation='sigmoid'):
'''The genrator is a CNN which takes 100 dimensional embedding as input
and reconstructs the input image given to the encoder
'''
BatchNorm = mx.sym.BatchNorm
rand = mx.sym.Variable('rand')
rand = mx.sym.Reshape(rand, shape=(-1, z_dim, 1, 1))
g1 = mx.sym.Deconvolution(rand, name='gen1', kernel=(5,5), stride=(2,2),target_shape=(2,2), num_filter=ngf*8, no_bias=no_bias)
gbn1 = BatchNorm(g1, name='genbn1', fix_gamma=fix_gamma, eps=eps)
gact1 = mx.sym.Activation(gbn1, name="genact1", act_type="relu")
g2 = mx.sym.Deconvolution(gact1, name='gen2', kernel=(5,5), stride=(2,2),target_shape=(4,4), num_filter=ngf*4, no_bias=no_bias)
gbn2 = BatchNorm(g2, name='genbn2', fix_gamma=fix_gamma, eps=eps)
gact2 = mx.sym.Activation(gbn2, name='genact2', act_type='relu')
g3 = mx.sym.Deconvolution(gact2, name='gen3', kernel=(5,5), stride=(2,2), target_shape=(8,8), num_filter=ngf*2, no_bias=no_bias)
gbn3 = BatchNorm(g3, name='genbn3', fix_gamma=fix_gamma, eps=eps)
gact3 = mx.sym.Activation(gbn3, name='genact3', act_type='relu')
g4 = mx.sym.Deconvolution(gact3, name='gen4', kernel=(5,5), stride=(2,2), target_shape=(16,16), num_filter=ngf, no_bias=no_bias)
gbn4 = BatchNorm(g4, name='genbn4', fix_gamma=fix_gamma, eps=eps)
gact4 = mx.sym.Activation(gbn4, name='genact4', act_type='relu')
g5 = mx.sym.Deconvolution(gact4, name='gen5', kernel=(5,5), stride=(2,2), target_shape=(32,32), num_filter=nc, no_bias=no_bias)
gout = mx.sym.Activation(g5, name='genact5', act_type=activation)
return gout
|
[
"def",
"generator",
"(",
"ngf",
",",
"nc",
",",
"no_bias",
"=",
"True",
",",
"fix_gamma",
"=",
"True",
",",
"eps",
"=",
"1e-5",
"+",
"1e-12",
",",
"z_dim",
"=",
"100",
",",
"activation",
"=",
"'sigmoid'",
")",
":",
"BatchNorm",
"=",
"mx",
".",
"sym",
".",
"BatchNorm",
"rand",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'rand'",
")",
"rand",
"=",
"mx",
".",
"sym",
".",
"Reshape",
"(",
"rand",
",",
"shape",
"=",
"(",
"-",
"1",
",",
"z_dim",
",",
"1",
",",
"1",
")",
")",
"g1",
"=",
"mx",
".",
"sym",
".",
"Deconvolution",
"(",
"rand",
",",
"name",
"=",
"'gen1'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"target_shape",
"=",
"(",
"2",
",",
"2",
")",
",",
"num_filter",
"=",
"ngf",
"*",
"8",
",",
"no_bias",
"=",
"no_bias",
")",
"gbn1",
"=",
"BatchNorm",
"(",
"g1",
",",
"name",
"=",
"'genbn1'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"gact1",
"=",
"mx",
".",
"sym",
".",
"Activation",
"(",
"gbn1",
",",
"name",
"=",
"\"genact1\"",
",",
"act_type",
"=",
"\"relu\"",
")",
"g2",
"=",
"mx",
".",
"sym",
".",
"Deconvolution",
"(",
"gact1",
",",
"name",
"=",
"'gen2'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"target_shape",
"=",
"(",
"4",
",",
"4",
")",
",",
"num_filter",
"=",
"ngf",
"*",
"4",
",",
"no_bias",
"=",
"no_bias",
")",
"gbn2",
"=",
"BatchNorm",
"(",
"g2",
",",
"name",
"=",
"'genbn2'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"gact2",
"=",
"mx",
".",
"sym",
".",
"Activation",
"(",
"gbn2",
",",
"name",
"=",
"'genact2'",
",",
"act_type",
"=",
"'relu'",
")",
"g3",
"=",
"mx",
".",
"sym",
".",
"Deconvolution",
"(",
"gact2",
",",
"name",
"=",
"'gen3'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"target_shape",
"=",
"(",
"8",
",",
"8",
")",
",",
"num_filter",
"=",
"ngf",
"*",
"2",
",",
"no_bias",
"=",
"no_bias",
")",
"gbn3",
"=",
"BatchNorm",
"(",
"g3",
",",
"name",
"=",
"'genbn3'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"gact3",
"=",
"mx",
".",
"sym",
".",
"Activation",
"(",
"gbn3",
",",
"name",
"=",
"'genact3'",
",",
"act_type",
"=",
"'relu'",
")",
"g4",
"=",
"mx",
".",
"sym",
".",
"Deconvolution",
"(",
"gact3",
",",
"name",
"=",
"'gen4'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"target_shape",
"=",
"(",
"16",
",",
"16",
")",
",",
"num_filter",
"=",
"ngf",
",",
"no_bias",
"=",
"no_bias",
")",
"gbn4",
"=",
"BatchNorm",
"(",
"g4",
",",
"name",
"=",
"'genbn4'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"gact4",
"=",
"mx",
".",
"sym",
".",
"Activation",
"(",
"gbn4",
",",
"name",
"=",
"'genact4'",
",",
"act_type",
"=",
"'relu'",
")",
"g5",
"=",
"mx",
".",
"sym",
".",
"Deconvolution",
"(",
"gact4",
",",
"name",
"=",
"'gen5'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"target_shape",
"=",
"(",
"32",
",",
"32",
")",
",",
"num_filter",
"=",
"nc",
",",
"no_bias",
"=",
"no_bias",
")",
"gout",
"=",
"mx",
".",
"sym",
".",
"Activation",
"(",
"g5",
",",
"name",
"=",
"'genact5'",
",",
"act_type",
"=",
"activation",
")",
"return",
"gout"
] |
The genrator is a CNN which takes 100 dimensional embedding as input
and reconstructs the input image given to the encoder
|
[
"The",
"genrator",
"is",
"a",
"CNN",
"which",
"takes",
"100",
"dimensional",
"embedding",
"as",
"input",
"and",
"reconstructs",
"the",
"input",
"image",
"given",
"to",
"the",
"encoder"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L88-L116
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
discriminator1
|
def discriminator1(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
'''First part of the discriminator which takes a 32x32 image as input
and output a convolutional feature map, this is required to calculate
the layer loss'''
BatchNorm = mx.sym.BatchNorm
data = mx.sym.Variable('data')
d1 = mx.sym.Convolution(data, name='d1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf, no_bias=no_bias)
dact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2)
d2 = mx.sym.Convolution(dact1, name='d2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*2, no_bias=no_bias)
dbn2 = BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=eps)
dact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2)
d3 = mx.sym.Convolution(dact2, name='d3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*4, no_bias=no_bias)
dbn3 = BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=eps)
dact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2)
return dact3
|
python
|
def discriminator1(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
'''First part of the discriminator which takes a 32x32 image as input
and output a convolutional feature map, this is required to calculate
the layer loss'''
BatchNorm = mx.sym.BatchNorm
data = mx.sym.Variable('data')
d1 = mx.sym.Convolution(data, name='d1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf, no_bias=no_bias)
dact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2)
d2 = mx.sym.Convolution(dact1, name='d2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*2, no_bias=no_bias)
dbn2 = BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=eps)
dact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2)
d3 = mx.sym.Convolution(dact2, name='d3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*4, no_bias=no_bias)
dbn3 = BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=eps)
dact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2)
return dact3
|
[
"def",
"discriminator1",
"(",
"ndf",
",",
"no_bias",
"=",
"True",
",",
"fix_gamma",
"=",
"True",
",",
"eps",
"=",
"1e-5",
"+",
"1e-12",
")",
":",
"BatchNorm",
"=",
"mx",
".",
"sym",
".",
"BatchNorm",
"data",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'data'",
")",
"d1",
"=",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"data",
",",
"name",
"=",
"'d1'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"pad",
"=",
"(",
"2",
",",
"2",
")",
",",
"num_filter",
"=",
"ndf",
",",
"no_bias",
"=",
"no_bias",
")",
"dact1",
"=",
"mx",
".",
"sym",
".",
"LeakyReLU",
"(",
"d1",
",",
"name",
"=",
"'dact1'",
",",
"act_type",
"=",
"'leaky'",
",",
"slope",
"=",
"0.2",
")",
"d2",
"=",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"dact1",
",",
"name",
"=",
"'d2'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"pad",
"=",
"(",
"2",
",",
"2",
")",
",",
"num_filter",
"=",
"ndf",
"*",
"2",
",",
"no_bias",
"=",
"no_bias",
")",
"dbn2",
"=",
"BatchNorm",
"(",
"d2",
",",
"name",
"=",
"'dbn2'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"dact2",
"=",
"mx",
".",
"sym",
".",
"LeakyReLU",
"(",
"dbn2",
",",
"name",
"=",
"'dact2'",
",",
"act_type",
"=",
"'leaky'",
",",
"slope",
"=",
"0.2",
")",
"d3",
"=",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"dact2",
",",
"name",
"=",
"'d3'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"pad",
"=",
"(",
"2",
",",
"2",
")",
",",
"num_filter",
"=",
"ndf",
"*",
"4",
",",
"no_bias",
"=",
"no_bias",
")",
"dbn3",
"=",
"BatchNorm",
"(",
"d3",
",",
"name",
"=",
"'dbn3'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"dact3",
"=",
"mx",
".",
"sym",
".",
"LeakyReLU",
"(",
"dbn3",
",",
"name",
"=",
"'dact3'",
",",
"act_type",
"=",
"'leaky'",
",",
"slope",
"=",
"0.2",
")",
"return",
"dact3"
] |
First part of the discriminator which takes a 32x32 image as input
and output a convolutional feature map, this is required to calculate
the layer loss
|
[
"First",
"part",
"of",
"the",
"discriminator",
"which",
"takes",
"a",
"32x32",
"image",
"as",
"input",
"and",
"output",
"a",
"convolutional",
"feature",
"map",
"this",
"is",
"required",
"to",
"calculate",
"the",
"layer",
"loss"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L118-L137
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
discriminator2
|
def discriminator2(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
'''Second part of the discriminator which takes a 256x8x8 feature map as input
and generates the loss based on whether the input image was a real one or fake one'''
BatchNorm = mx.sym.BatchNorm
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
d4 = mx.sym.Convolution(data, name='d4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*8, no_bias=no_bias)
dbn4 = BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps)
dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2)
h = mx.sym.Flatten(dact4)
d5 = mx.sym.FullyConnected(h, num_hidden=1, name="d5")
dloss = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss')
return dloss
|
python
|
def discriminator2(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
'''Second part of the discriminator which takes a 256x8x8 feature map as input
and generates the loss based on whether the input image was a real one or fake one'''
BatchNorm = mx.sym.BatchNorm
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
d4 = mx.sym.Convolution(data, name='d4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*8, no_bias=no_bias)
dbn4 = BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps)
dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2)
h = mx.sym.Flatten(dact4)
d5 = mx.sym.FullyConnected(h, num_hidden=1, name="d5")
dloss = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss')
return dloss
|
[
"def",
"discriminator2",
"(",
"ndf",
",",
"no_bias",
"=",
"True",
",",
"fix_gamma",
"=",
"True",
",",
"eps",
"=",
"1e-5",
"+",
"1e-12",
")",
":",
"BatchNorm",
"=",
"mx",
".",
"sym",
".",
"BatchNorm",
"data",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'data'",
")",
"label",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'label'",
")",
"d4",
"=",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"data",
",",
"name",
"=",
"'d4'",
",",
"kernel",
"=",
"(",
"5",
",",
"5",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"pad",
"=",
"(",
"2",
",",
"2",
")",
",",
"num_filter",
"=",
"ndf",
"*",
"8",
",",
"no_bias",
"=",
"no_bias",
")",
"dbn4",
"=",
"BatchNorm",
"(",
"d4",
",",
"name",
"=",
"'dbn4'",
",",
"fix_gamma",
"=",
"fix_gamma",
",",
"eps",
"=",
"eps",
")",
"dact4",
"=",
"mx",
".",
"sym",
".",
"LeakyReLU",
"(",
"dbn4",
",",
"name",
"=",
"'dact4'",
",",
"act_type",
"=",
"'leaky'",
",",
"slope",
"=",
"0.2",
")",
"h",
"=",
"mx",
".",
"sym",
".",
"Flatten",
"(",
"dact4",
")",
"d5",
"=",
"mx",
".",
"sym",
".",
"FullyConnected",
"(",
"h",
",",
"num_hidden",
"=",
"1",
",",
"name",
"=",
"\"d5\"",
")",
"dloss",
"=",
"mx",
".",
"sym",
".",
"LogisticRegressionOutput",
"(",
"data",
"=",
"d5",
",",
"label",
"=",
"label",
",",
"name",
"=",
"'dloss'",
")",
"return",
"dloss"
] |
Second part of the discriminator which takes a 256x8x8 feature map as input
and generates the loss based on whether the input image was a real one or fake one
|
[
"Second",
"part",
"of",
"the",
"discriminator",
"which",
"takes",
"a",
"256x8x8",
"feature",
"map",
"as",
"input",
"and",
"generates",
"the",
"loss",
"based",
"on",
"whether",
"the",
"input",
"image",
"was",
"a",
"real",
"one",
"or",
"fake",
"one"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L139-L159
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
GaussianLogDensity
|
def GaussianLogDensity(x, mu, log_var, name='GaussianLogDensity', EPSILON = 1e-6):
'''GaussianLogDensity loss calculation for layer wise loss
'''
c = mx.sym.ones_like(log_var)*2.0 * 3.1416
c = mx.symbol.log(c)
var = mx.sym.exp(log_var)
x_mu2 = mx.symbol.square(x - mu) # [Issue] not sure the dim works or not?
x_mu2_over_var = mx.symbol.broadcast_div(x_mu2, var + EPSILON)
log_prob = -0.5 * (c + log_var + x_mu2_over_var)
log_prob = mx.symbol.sum(log_prob, axis=1, name=name) # keep_dims=True,
return log_prob
|
python
|
def GaussianLogDensity(x, mu, log_var, name='GaussianLogDensity', EPSILON = 1e-6):
'''GaussianLogDensity loss calculation for layer wise loss
'''
c = mx.sym.ones_like(log_var)*2.0 * 3.1416
c = mx.symbol.log(c)
var = mx.sym.exp(log_var)
x_mu2 = mx.symbol.square(x - mu) # [Issue] not sure the dim works or not?
x_mu2_over_var = mx.symbol.broadcast_div(x_mu2, var + EPSILON)
log_prob = -0.5 * (c + log_var + x_mu2_over_var)
log_prob = mx.symbol.sum(log_prob, axis=1, name=name) # keep_dims=True,
return log_prob
|
[
"def",
"GaussianLogDensity",
"(",
"x",
",",
"mu",
",",
"log_var",
",",
"name",
"=",
"'GaussianLogDensity'",
",",
"EPSILON",
"=",
"1e-6",
")",
":",
"c",
"=",
"mx",
".",
"sym",
".",
"ones_like",
"(",
"log_var",
")",
"*",
"2.0",
"*",
"3.1416",
"c",
"=",
"mx",
".",
"symbol",
".",
"log",
"(",
"c",
")",
"var",
"=",
"mx",
".",
"sym",
".",
"exp",
"(",
"log_var",
")",
"x_mu2",
"=",
"mx",
".",
"symbol",
".",
"square",
"(",
"x",
"-",
"mu",
")",
"# [Issue] not sure the dim works or not?",
"x_mu2_over_var",
"=",
"mx",
".",
"symbol",
".",
"broadcast_div",
"(",
"x_mu2",
",",
"var",
"+",
"EPSILON",
")",
"log_prob",
"=",
"-",
"0.5",
"*",
"(",
"c",
"+",
"log_var",
"+",
"x_mu2_over_var",
")",
"log_prob",
"=",
"mx",
".",
"symbol",
".",
"sum",
"(",
"log_prob",
",",
"axis",
"=",
"1",
",",
"name",
"=",
"name",
")",
"# keep_dims=True,",
"return",
"log_prob"
] |
GaussianLogDensity loss calculation for layer wise loss
|
[
"GaussianLogDensity",
"loss",
"calculation",
"for",
"layer",
"wise",
"loss"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L161-L171
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
DiscriminatorLayerLoss
|
def DiscriminatorLayerLoss():
'''Calculate the discriminator layer loss
'''
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
data = mx.sym.Flatten(data)
label = mx.sym.Flatten(label)
label = mx.sym.BlockGrad(label)
zeros = mx.sym.zeros_like(data)
output = -GaussianLogDensity(label, data, zeros)
dloss = mx.symbol.MakeLoss(mx.symbol.mean(output),name='lloss')
return dloss
|
python
|
def DiscriminatorLayerLoss():
'''Calculate the discriminator layer loss
'''
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
data = mx.sym.Flatten(data)
label = mx.sym.Flatten(label)
label = mx.sym.BlockGrad(label)
zeros = mx.sym.zeros_like(data)
output = -GaussianLogDensity(label, data, zeros)
dloss = mx.symbol.MakeLoss(mx.symbol.mean(output),name='lloss')
return dloss
|
[
"def",
"DiscriminatorLayerLoss",
"(",
")",
":",
"data",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'data'",
")",
"label",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'label'",
")",
"data",
"=",
"mx",
".",
"sym",
".",
"Flatten",
"(",
"data",
")",
"label",
"=",
"mx",
".",
"sym",
".",
"Flatten",
"(",
"label",
")",
"label",
"=",
"mx",
".",
"sym",
".",
"BlockGrad",
"(",
"label",
")",
"zeros",
"=",
"mx",
".",
"sym",
".",
"zeros_like",
"(",
"data",
")",
"output",
"=",
"-",
"GaussianLogDensity",
"(",
"label",
",",
"data",
",",
"zeros",
")",
"dloss",
"=",
"mx",
".",
"symbol",
".",
"MakeLoss",
"(",
"mx",
".",
"symbol",
".",
"mean",
"(",
"output",
")",
",",
"name",
"=",
"'lloss'",
")",
"return",
"dloss"
] |
Calculate the discriminator layer loss
|
[
"Calculate",
"the",
"discriminator",
"layer",
"loss"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L173-L192
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
KLDivergenceLoss
|
def KLDivergenceLoss():
'''KLDivergenceLoss loss
'''
data = mx.sym.Variable('data')
mu1, lv1 = mx.sym.split(data, num_outputs=2, axis=0)
mu2 = mx.sym.zeros_like(mu1)
lv2 = mx.sym.zeros_like(lv1)
v1 = mx.sym.exp(lv1)
v2 = mx.sym.exp(lv2)
mu_diff_sq = mx.sym.square(mu1 - mu2)
dimwise_kld = .5 * (
(lv2 - lv1) + mx.symbol.broadcast_div(v1, v2) + mx.symbol.broadcast_div(mu_diff_sq, v2) - 1.)
KL = mx.symbol.sum(dimwise_kld, axis=1)
KLloss = mx.symbol.MakeLoss(mx.symbol.mean(KL),name='KLloss')
return KLloss
|
python
|
def KLDivergenceLoss():
'''KLDivergenceLoss loss
'''
data = mx.sym.Variable('data')
mu1, lv1 = mx.sym.split(data, num_outputs=2, axis=0)
mu2 = mx.sym.zeros_like(mu1)
lv2 = mx.sym.zeros_like(lv1)
v1 = mx.sym.exp(lv1)
v2 = mx.sym.exp(lv2)
mu_diff_sq = mx.sym.square(mu1 - mu2)
dimwise_kld = .5 * (
(lv2 - lv1) + mx.symbol.broadcast_div(v1, v2) + mx.symbol.broadcast_div(mu_diff_sq, v2) - 1.)
KL = mx.symbol.sum(dimwise_kld, axis=1)
KLloss = mx.symbol.MakeLoss(mx.symbol.mean(KL),name='KLloss')
return KLloss
|
[
"def",
"KLDivergenceLoss",
"(",
")",
":",
"data",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'data'",
")",
"mu1",
",",
"lv1",
"=",
"mx",
".",
"sym",
".",
"split",
"(",
"data",
",",
"num_outputs",
"=",
"2",
",",
"axis",
"=",
"0",
")",
"mu2",
"=",
"mx",
".",
"sym",
".",
"zeros_like",
"(",
"mu1",
")",
"lv2",
"=",
"mx",
".",
"sym",
".",
"zeros_like",
"(",
"lv1",
")",
"v1",
"=",
"mx",
".",
"sym",
".",
"exp",
"(",
"lv1",
")",
"v2",
"=",
"mx",
".",
"sym",
".",
"exp",
"(",
"lv2",
")",
"mu_diff_sq",
"=",
"mx",
".",
"sym",
".",
"square",
"(",
"mu1",
"-",
"mu2",
")",
"dimwise_kld",
"=",
".5",
"*",
"(",
"(",
"lv2",
"-",
"lv1",
")",
"+",
"mx",
".",
"symbol",
".",
"broadcast_div",
"(",
"v1",
",",
"v2",
")",
"+",
"mx",
".",
"symbol",
".",
"broadcast_div",
"(",
"mu_diff_sq",
",",
"v2",
")",
"-",
"1.",
")",
"KL",
"=",
"mx",
".",
"symbol",
".",
"sum",
"(",
"dimwise_kld",
",",
"axis",
"=",
"1",
")",
"KLloss",
"=",
"mx",
".",
"symbol",
".",
"MakeLoss",
"(",
"mx",
".",
"symbol",
".",
"mean",
"(",
"KL",
")",
",",
"name",
"=",
"'KLloss'",
")",
"return",
"KLloss"
] |
KLDivergenceLoss loss
|
[
"KLDivergenceLoss",
"loss"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L194-L211
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
get_data
|
def get_data(path, activation):
'''Get the dataset
'''
data = []
image_names = []
for filename in os.listdir(path):
img = cv2.imread(os.path.join(path,filename), cv2.IMREAD_GRAYSCALE)
image_names.append(filename)
if img is not None:
data.append(img)
data = np.asarray(data)
if activation == 'sigmoid':
data = data.astype(np.float32)/(255.0)
elif activation == 'tanh':
data = data.astype(np.float32)/(255.0/2) - 1.0
data = data.reshape((data.shape[0], 1, data.shape[1], data.shape[2]))
np.random.seed(1234)
p = np.random.permutation(data.shape[0])
X = data[p]
return X, image_names
|
python
|
def get_data(path, activation):
'''Get the dataset
'''
data = []
image_names = []
for filename in os.listdir(path):
img = cv2.imread(os.path.join(path,filename), cv2.IMREAD_GRAYSCALE)
image_names.append(filename)
if img is not None:
data.append(img)
data = np.asarray(data)
if activation == 'sigmoid':
data = data.astype(np.float32)/(255.0)
elif activation == 'tanh':
data = data.astype(np.float32)/(255.0/2) - 1.0
data = data.reshape((data.shape[0], 1, data.shape[1], data.shape[2]))
np.random.seed(1234)
p = np.random.permutation(data.shape[0])
X = data[p]
return X, image_names
|
[
"def",
"get_data",
"(",
"path",
",",
"activation",
")",
":",
"data",
"=",
"[",
"]",
"image_names",
"=",
"[",
"]",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"img",
"=",
"cv2",
".",
"imread",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
",",
"cv2",
".",
"IMREAD_GRAYSCALE",
")",
"image_names",
".",
"append",
"(",
"filename",
")",
"if",
"img",
"is",
"not",
"None",
":",
"data",
".",
"append",
"(",
"img",
")",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"if",
"activation",
"==",
"'sigmoid'",
":",
"data",
"=",
"data",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"/",
"(",
"255.0",
")",
"elif",
"activation",
"==",
"'tanh'",
":",
"data",
"=",
"data",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"/",
"(",
"255.0",
"/",
"2",
")",
"-",
"1.0",
"data",
"=",
"data",
".",
"reshape",
"(",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"1",
",",
"data",
".",
"shape",
"[",
"1",
"]",
",",
"data",
".",
"shape",
"[",
"2",
"]",
")",
")",
"np",
".",
"random",
".",
"seed",
"(",
"1234",
")",
"p",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
"X",
"=",
"data",
"[",
"p",
"]",
"return",
"X",
",",
"image_names"
] |
Get the dataset
|
[
"Get",
"the",
"dataset"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L213-L237
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
fill_buf
|
def fill_buf(buf, i, img, shape):
'''fill the ith grid of the buffer matrix with the values from the img
buf : buffer matrix
i : serial of the image in the 2D grid
img : image data
shape : ( height width depth ) of image'''
# grid height is a multiple of individual image height
m = buf.shape[0]/shape[0]
sx = (i%m)*shape[1]
sy = (i//m)*shape[0]
sx = int(sx)
sy = int(sy)
buf[sy:sy+shape[0], sx:sx+shape[1], :] = img
|
python
|
def fill_buf(buf, i, img, shape):
'''fill the ith grid of the buffer matrix with the values from the img
buf : buffer matrix
i : serial of the image in the 2D grid
img : image data
shape : ( height width depth ) of image'''
# grid height is a multiple of individual image height
m = buf.shape[0]/shape[0]
sx = (i%m)*shape[1]
sy = (i//m)*shape[0]
sx = int(sx)
sy = int(sy)
buf[sy:sy+shape[0], sx:sx+shape[1], :] = img
|
[
"def",
"fill_buf",
"(",
"buf",
",",
"i",
",",
"img",
",",
"shape",
")",
":",
"# grid height is a multiple of individual image height",
"m",
"=",
"buf",
".",
"shape",
"[",
"0",
"]",
"/",
"shape",
"[",
"0",
"]",
"sx",
"=",
"(",
"i",
"%",
"m",
")",
"*",
"shape",
"[",
"1",
"]",
"sy",
"=",
"(",
"i",
"//",
"m",
")",
"*",
"shape",
"[",
"0",
"]",
"sx",
"=",
"int",
"(",
"sx",
")",
"sy",
"=",
"int",
"(",
"sy",
")",
"buf",
"[",
"sy",
":",
"sy",
"+",
"shape",
"[",
"0",
"]",
",",
"sx",
":",
"sx",
"+",
"shape",
"[",
"1",
"]",
",",
":",
"]",
"=",
"img"
] |
fill the ith grid of the buffer matrix with the values from the img
buf : buffer matrix
i : serial of the image in the 2D grid
img : image data
shape : ( height width depth ) of image
|
[
"fill",
"the",
"ith",
"grid",
"of",
"the",
"buffer",
"matrix",
"with",
"the",
"values",
"from",
"the",
"img",
"buf",
":",
"buffer",
"matrix",
"i",
":",
"serial",
"of",
"the",
"image",
"in",
"the",
"2D",
"grid",
"img",
":",
"image",
"data",
"shape",
":",
"(",
"height",
"width",
"depth",
")",
"of",
"image"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L254-L268
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
visual
|
def visual(title, X, activation):
'''create a grid of images and save it as a final image
title : grid image name
X : array of images
'''
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
if activation == 'sigmoid':
X = np.clip((X)*(255.0), 0, 255).astype(np.uint8)
elif activation == 'tanh':
X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
cv2.imwrite('%s.jpg' % (title), buff)
|
python
|
def visual(title, X, activation):
'''create a grid of images and save it as a final image
title : grid image name
X : array of images
'''
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
if activation == 'sigmoid':
X = np.clip((X)*(255.0), 0, 255).astype(np.uint8)
elif activation == 'tanh':
X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
cv2.imwrite('%s.jpg' % (title), buff)
|
[
"def",
"visual",
"(",
"title",
",",
"X",
",",
"activation",
")",
":",
"assert",
"len",
"(",
"X",
".",
"shape",
")",
"==",
"4",
"X",
"=",
"X",
".",
"transpose",
"(",
"(",
"0",
",",
"2",
",",
"3",
",",
"1",
")",
")",
"if",
"activation",
"==",
"'sigmoid'",
":",
"X",
"=",
"np",
".",
"clip",
"(",
"(",
"X",
")",
"*",
"(",
"255.0",
")",
",",
"0",
",",
"255",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"elif",
"activation",
"==",
"'tanh'",
":",
"X",
"=",
"np",
".",
"clip",
"(",
"(",
"X",
"+",
"1.0",
")",
"*",
"(",
"255.0",
"/",
"2.0",
")",
",",
"0",
",",
"255",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"n",
"=",
"np",
".",
"ceil",
"(",
"np",
".",
"sqrt",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
")",
"buff",
"=",
"np",
".",
"zeros",
"(",
"(",
"int",
"(",
"n",
"*",
"X",
".",
"shape",
"[",
"1",
"]",
")",
",",
"int",
"(",
"n",
"*",
"X",
".",
"shape",
"[",
"2",
"]",
")",
",",
"int",
"(",
"X",
".",
"shape",
"[",
"3",
"]",
")",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"for",
"i",
",",
"img",
"in",
"enumerate",
"(",
"X",
")",
":",
"fill_buf",
"(",
"buff",
",",
"i",
",",
"img",
",",
"X",
".",
"shape",
"[",
"1",
":",
"3",
"]",
")",
"cv2",
".",
"imwrite",
"(",
"'%s.jpg'",
"%",
"(",
"title",
")",
",",
"buff",
")"
] |
create a grid of images and save it as a final image
title : grid image name
X : array of images
|
[
"create",
"a",
"grid",
"of",
"images",
"and",
"save",
"it",
"as",
"a",
"final",
"image",
"title",
":",
"grid",
"image",
"name",
"X",
":",
"array",
"of",
"images"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L270-L286
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
train
|
def train(dataset, nef, ndf, ngf, nc, batch_size, Z, lr, beta1, epsilon, ctx, check_point, g_dl_weight, output_path, checkpoint_path, data_path, activation,num_epoch, save_after_every, visualize_after_every, show_after_every):
'''adversarial training of the VAE
'''
#encoder
z_mu, z_lv, z = encoder(nef, Z, batch_size)
symE = mx.sym.Group([z_mu, z_lv, z])
#generator
symG = generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim = Z, activation=activation )
#discriminator
h = discriminator1(ndf)
dloss = discriminator2(ndf)
symD1 = h
symD2 = dloss
# ==============data==============
X_train, _ = get_data(data_path, activation)
train_iter = mx.io.NDArrayIter(X_train, batch_size=batch_size, shuffle=True)
rand_iter = RandIter(batch_size, Z)
label = mx.nd.zeros((batch_size,), ctx=ctx)
# =============module E=============
modE = mx.mod.Module(symbol=symE, data_names=('data',), label_names=None, context=ctx)
modE.bind(data_shapes=train_iter.provide_data)
modE.init_params(initializer=mx.init.Normal(0.02))
modE.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-6,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods = [modE]
# =============module G=============
modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx)
modG.bind(data_shapes=rand_iter.provide_data, inputs_need_grad=True)
modG.init_params(initializer=mx.init.Normal(0.02))
modG.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-6,
'beta1': beta1,
'epsilon': epsilon,
})
mods.append(modG)
# =============module D=============
modD1 = mx.mod.Module(symD1, label_names=[], context=ctx)
modD2 = mx.mod.Module(symD2, label_names=('label',), context=ctx)
modD = mx.mod.SequentialModule()
modD.add(modD1).add(modD2, take_labels=True, auto_wiring=True)
modD.bind(data_shapes=train_iter.provide_data,
label_shapes=[('label', (batch_size,))],
inputs_need_grad=True)
modD.init_params(initializer=mx.init.Normal(0.02))
modD.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-3,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods.append(modD)
# =============module DL=============
symDL = DiscriminatorLayerLoss()
modDL = mx.mod.Module(symbol=symDL, data_names=('data',), label_names=('label',), context=ctx)
modDL.bind(data_shapes=[('data', (batch_size,nef * 4,4,4))], ################################################################################################################################ fix 512 here
label_shapes=[('label', (batch_size,nef * 4,4,4))],
inputs_need_grad=True)
modDL.init_params(initializer=mx.init.Normal(0.02))
modDL.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
# =============module KL=============
symKL = KLDivergenceLoss()
modKL = mx.mod.Module(symbol=symKL, data_names=('data',), label_names=None, context=ctx)
modKL.bind(data_shapes=[('data', (batch_size*2,Z))],
inputs_need_grad=True)
modKL.init_params(initializer=mx.init.Normal(0.02))
modKL.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods.append(modKL)
def norm_stat(d):
return mx.nd.norm(d)/np.sqrt(d.size)
mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True)
mon = None
if mon is not None:
for mod in mods:
pass
def facc(label, pred):
'''calculating prediction accuracy
'''
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
def fentropy(label, pred):
'''calculating binary cross-entropy loss
'''
pred = pred.ravel()
label = label.ravel()
return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean()
def kldivergence(label, pred):
'''calculating KL divergence loss
'''
mean, log_var = np.split(pred, 2, axis=0)
var = np.exp(log_var)
KLLoss = -0.5 * np.sum(1 + log_var - np.power(mean, 2) - var)
KLLoss = KLLoss / nElements
return KLLoss
mG = mx.metric.CustomMetric(fentropy)
mD = mx.metric.CustomMetric(fentropy)
mE = mx.metric.CustomMetric(kldivergence)
mACC = mx.metric.CustomMetric(facc)
print('Training...')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
# =============train===============
for epoch in range(num_epoch):
train_iter.reset()
for t, batch in enumerate(train_iter):
rbatch = rand_iter.next()
if mon is not None:
mon.tic()
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
# update discriminator on fake
label[:] = 0
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
gradD11 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays]
gradD12 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
#update discriminator on decoded
modE.forward(batch, is_train=True)
mu, lv, z = modE.get_outputs()
z = z.reshape((batch_size, Z, 1, 1))
sample = mx.io.DataBatch([z], label=None, provide_data = [('rand', (batch_size, Z, 1, 1))])
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 0
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
#modD.update()
gradD21 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays]
gradD22 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update discriminator on real
label[:] = 1
batch.label = [label]
modD.forward(batch, is_train=True)
lx = [out.copyto(out.context) for out in modD1.get_outputs()]
modD.backward()
for gradsr, gradsf, gradsd in zip(modD1._exec_group.grad_arrays, gradD11, gradD21):
for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd):
gradr += 0.5 * (gradf + gradd)
for gradsr, gradsf, gradsd in zip(modD2._exec_group.grad_arrays, gradD12, gradD22):
for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd):
gradr += 0.5 * (gradf + gradd)
modD.update()
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2):
for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2):
grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2)
modG.update()
mG.update([label], modD.get_outputs())
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2):
for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2):
grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2)
modG.update()
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
#update generator
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
DLloss = modDL.get_outputs()
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
#update encoder
nElements = batch_size
modKL.forward(mx.io.DataBatch([mx.ndarray.concat(mu,lv, dim=0)]), is_train=True)
KLloss = modKL.get_outputs()
modKL.backward()
gradKLLoss = modKL.get_input_grads()
diffG = modG.get_input_grads()
diffG = diffG[0].reshape((batch_size, Z))
modE.backward(mx.ndarray.split(gradKLLoss[0], num_outputs=2, axis=0) + [diffG])
modE.update()
pred = mx.ndarray.concat(mu,lv, dim=0)
mE.update([pred], [pred])
if mon is not None:
mon.toc_print()
t += 1
if t % show_after_every == 0:
print('epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get(), mE.get(), KLloss[0].asnumpy(), DLloss[0].asnumpy())
mACC.reset()
mG.reset()
mD.reset()
mE.reset()
if epoch % visualize_after_every == 0:
visual(output_path +'gout'+str(epoch), outG[0].asnumpy(), activation)
visual(output_path + 'data'+str(epoch), batch.data[0].asnumpy(), activation)
if check_point and epoch % save_after_every == 0:
print('Saving...')
modG.save_params(checkpoint_path + '/%s_G-%04d.params'%(dataset, epoch))
modD.save_params(checkpoint_path + '/%s_D-%04d.params'%(dataset, epoch))
modE.save_params(checkpoint_path + '/%s_E-%04d.params'%(dataset, epoch))
|
python
|
def train(dataset, nef, ndf, ngf, nc, batch_size, Z, lr, beta1, epsilon, ctx, check_point, g_dl_weight, output_path, checkpoint_path, data_path, activation,num_epoch, save_after_every, visualize_after_every, show_after_every):
'''adversarial training of the VAE
'''
#encoder
z_mu, z_lv, z = encoder(nef, Z, batch_size)
symE = mx.sym.Group([z_mu, z_lv, z])
#generator
symG = generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim = Z, activation=activation )
#discriminator
h = discriminator1(ndf)
dloss = discriminator2(ndf)
symD1 = h
symD2 = dloss
# ==============data==============
X_train, _ = get_data(data_path, activation)
train_iter = mx.io.NDArrayIter(X_train, batch_size=batch_size, shuffle=True)
rand_iter = RandIter(batch_size, Z)
label = mx.nd.zeros((batch_size,), ctx=ctx)
# =============module E=============
modE = mx.mod.Module(symbol=symE, data_names=('data',), label_names=None, context=ctx)
modE.bind(data_shapes=train_iter.provide_data)
modE.init_params(initializer=mx.init.Normal(0.02))
modE.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-6,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods = [modE]
# =============module G=============
modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx)
modG.bind(data_shapes=rand_iter.provide_data, inputs_need_grad=True)
modG.init_params(initializer=mx.init.Normal(0.02))
modG.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-6,
'beta1': beta1,
'epsilon': epsilon,
})
mods.append(modG)
# =============module D=============
modD1 = mx.mod.Module(symD1, label_names=[], context=ctx)
modD2 = mx.mod.Module(symD2, label_names=('label',), context=ctx)
modD = mx.mod.SequentialModule()
modD.add(modD1).add(modD2, take_labels=True, auto_wiring=True)
modD.bind(data_shapes=train_iter.provide_data,
label_shapes=[('label', (batch_size,))],
inputs_need_grad=True)
modD.init_params(initializer=mx.init.Normal(0.02))
modD.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-3,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods.append(modD)
# =============module DL=============
symDL = DiscriminatorLayerLoss()
modDL = mx.mod.Module(symbol=symDL, data_names=('data',), label_names=('label',), context=ctx)
modDL.bind(data_shapes=[('data', (batch_size,nef * 4,4,4))], ################################################################################################################################ fix 512 here
label_shapes=[('label', (batch_size,nef * 4,4,4))],
inputs_need_grad=True)
modDL.init_params(initializer=mx.init.Normal(0.02))
modDL.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
# =============module KL=============
symKL = KLDivergenceLoss()
modKL = mx.mod.Module(symbol=symKL, data_names=('data',), label_names=None, context=ctx)
modKL.bind(data_shapes=[('data', (batch_size*2,Z))],
inputs_need_grad=True)
modKL.init_params(initializer=mx.init.Normal(0.02))
modKL.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods.append(modKL)
def norm_stat(d):
return mx.nd.norm(d)/np.sqrt(d.size)
mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True)
mon = None
if mon is not None:
for mod in mods:
pass
def facc(label, pred):
'''calculating prediction accuracy
'''
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
def fentropy(label, pred):
'''calculating binary cross-entropy loss
'''
pred = pred.ravel()
label = label.ravel()
return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean()
def kldivergence(label, pred):
'''calculating KL divergence loss
'''
mean, log_var = np.split(pred, 2, axis=0)
var = np.exp(log_var)
KLLoss = -0.5 * np.sum(1 + log_var - np.power(mean, 2) - var)
KLLoss = KLLoss / nElements
return KLLoss
mG = mx.metric.CustomMetric(fentropy)
mD = mx.metric.CustomMetric(fentropy)
mE = mx.metric.CustomMetric(kldivergence)
mACC = mx.metric.CustomMetric(facc)
print('Training...')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
# =============train===============
for epoch in range(num_epoch):
train_iter.reset()
for t, batch in enumerate(train_iter):
rbatch = rand_iter.next()
if mon is not None:
mon.tic()
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
# update discriminator on fake
label[:] = 0
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
gradD11 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays]
gradD12 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
#update discriminator on decoded
modE.forward(batch, is_train=True)
mu, lv, z = modE.get_outputs()
z = z.reshape((batch_size, Z, 1, 1))
sample = mx.io.DataBatch([z], label=None, provide_data = [('rand', (batch_size, Z, 1, 1))])
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 0
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
#modD.update()
gradD21 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays]
gradD22 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update discriminator on real
label[:] = 1
batch.label = [label]
modD.forward(batch, is_train=True)
lx = [out.copyto(out.context) for out in modD1.get_outputs()]
modD.backward()
for gradsr, gradsf, gradsd in zip(modD1._exec_group.grad_arrays, gradD11, gradD21):
for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd):
gradr += 0.5 * (gradf + gradd)
for gradsr, gradsf, gradsd in zip(modD2._exec_group.grad_arrays, gradD12, gradD22):
for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd):
gradr += 0.5 * (gradf + gradd)
modD.update()
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2):
for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2):
grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2)
modG.update()
mG.update([label], modD.get_outputs())
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2):
for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2):
grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2)
modG.update()
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
#update generator
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
DLloss = modDL.get_outputs()
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
#update encoder
nElements = batch_size
modKL.forward(mx.io.DataBatch([mx.ndarray.concat(mu,lv, dim=0)]), is_train=True)
KLloss = modKL.get_outputs()
modKL.backward()
gradKLLoss = modKL.get_input_grads()
diffG = modG.get_input_grads()
diffG = diffG[0].reshape((batch_size, Z))
modE.backward(mx.ndarray.split(gradKLLoss[0], num_outputs=2, axis=0) + [diffG])
modE.update()
pred = mx.ndarray.concat(mu,lv, dim=0)
mE.update([pred], [pred])
if mon is not None:
mon.toc_print()
t += 1
if t % show_after_every == 0:
print('epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get(), mE.get(), KLloss[0].asnumpy(), DLloss[0].asnumpy())
mACC.reset()
mG.reset()
mD.reset()
mE.reset()
if epoch % visualize_after_every == 0:
visual(output_path +'gout'+str(epoch), outG[0].asnumpy(), activation)
visual(output_path + 'data'+str(epoch), batch.data[0].asnumpy(), activation)
if check_point and epoch % save_after_every == 0:
print('Saving...')
modG.save_params(checkpoint_path + '/%s_G-%04d.params'%(dataset, epoch))
modD.save_params(checkpoint_path + '/%s_D-%04d.params'%(dataset, epoch))
modE.save_params(checkpoint_path + '/%s_E-%04d.params'%(dataset, epoch))
|
[
"def",
"train",
"(",
"dataset",
",",
"nef",
",",
"ndf",
",",
"ngf",
",",
"nc",
",",
"batch_size",
",",
"Z",
",",
"lr",
",",
"beta1",
",",
"epsilon",
",",
"ctx",
",",
"check_point",
",",
"g_dl_weight",
",",
"output_path",
",",
"checkpoint_path",
",",
"data_path",
",",
"activation",
",",
"num_epoch",
",",
"save_after_every",
",",
"visualize_after_every",
",",
"show_after_every",
")",
":",
"#encoder",
"z_mu",
",",
"z_lv",
",",
"z",
"=",
"encoder",
"(",
"nef",
",",
"Z",
",",
"batch_size",
")",
"symE",
"=",
"mx",
".",
"sym",
".",
"Group",
"(",
"[",
"z_mu",
",",
"z_lv",
",",
"z",
"]",
")",
"#generator",
"symG",
"=",
"generator",
"(",
"ngf",
",",
"nc",
",",
"no_bias",
"=",
"True",
",",
"fix_gamma",
"=",
"True",
",",
"eps",
"=",
"1e-5",
"+",
"1e-12",
",",
"z_dim",
"=",
"Z",
",",
"activation",
"=",
"activation",
")",
"#discriminator",
"h",
"=",
"discriminator1",
"(",
"ndf",
")",
"dloss",
"=",
"discriminator2",
"(",
"ndf",
")",
"symD1",
"=",
"h",
"symD2",
"=",
"dloss",
"# ==============data==============",
"X_train",
",",
"_",
"=",
"get_data",
"(",
"data_path",
",",
"activation",
")",
"train_iter",
"=",
"mx",
".",
"io",
".",
"NDArrayIter",
"(",
"X_train",
",",
"batch_size",
"=",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
"rand_iter",
"=",
"RandIter",
"(",
"batch_size",
",",
"Z",
")",
"label",
"=",
"mx",
".",
"nd",
".",
"zeros",
"(",
"(",
"batch_size",
",",
")",
",",
"ctx",
"=",
"ctx",
")",
"# =============module E=============",
"modE",
"=",
"mx",
".",
"mod",
".",
"Module",
"(",
"symbol",
"=",
"symE",
",",
"data_names",
"=",
"(",
"'data'",
",",
")",
",",
"label_names",
"=",
"None",
",",
"context",
"=",
"ctx",
")",
"modE",
".",
"bind",
"(",
"data_shapes",
"=",
"train_iter",
".",
"provide_data",
")",
"modE",
".",
"init_params",
"(",
"initializer",
"=",
"mx",
".",
"init",
".",
"Normal",
"(",
"0.02",
")",
")",
"modE",
".",
"init_optimizer",
"(",
"optimizer",
"=",
"'adam'",
",",
"optimizer_params",
"=",
"{",
"'learning_rate'",
":",
"lr",
",",
"'wd'",
":",
"1e-6",
",",
"'beta1'",
":",
"beta1",
",",
"'epsilon'",
":",
"epsilon",
",",
"'rescale_grad'",
":",
"(",
"1.0",
"/",
"batch_size",
")",
"}",
")",
"mods",
"=",
"[",
"modE",
"]",
"# =============module G=============",
"modG",
"=",
"mx",
".",
"mod",
".",
"Module",
"(",
"symbol",
"=",
"symG",
",",
"data_names",
"=",
"(",
"'rand'",
",",
")",
",",
"label_names",
"=",
"None",
",",
"context",
"=",
"ctx",
")",
"modG",
".",
"bind",
"(",
"data_shapes",
"=",
"rand_iter",
".",
"provide_data",
",",
"inputs_need_grad",
"=",
"True",
")",
"modG",
".",
"init_params",
"(",
"initializer",
"=",
"mx",
".",
"init",
".",
"Normal",
"(",
"0.02",
")",
")",
"modG",
".",
"init_optimizer",
"(",
"optimizer",
"=",
"'adam'",
",",
"optimizer_params",
"=",
"{",
"'learning_rate'",
":",
"lr",
",",
"'wd'",
":",
"1e-6",
",",
"'beta1'",
":",
"beta1",
",",
"'epsilon'",
":",
"epsilon",
",",
"}",
")",
"mods",
".",
"append",
"(",
"modG",
")",
"# =============module D=============",
"modD1",
"=",
"mx",
".",
"mod",
".",
"Module",
"(",
"symD1",
",",
"label_names",
"=",
"[",
"]",
",",
"context",
"=",
"ctx",
")",
"modD2",
"=",
"mx",
".",
"mod",
".",
"Module",
"(",
"symD2",
",",
"label_names",
"=",
"(",
"'label'",
",",
")",
",",
"context",
"=",
"ctx",
")",
"modD",
"=",
"mx",
".",
"mod",
".",
"SequentialModule",
"(",
")",
"modD",
".",
"add",
"(",
"modD1",
")",
".",
"add",
"(",
"modD2",
",",
"take_labels",
"=",
"True",
",",
"auto_wiring",
"=",
"True",
")",
"modD",
".",
"bind",
"(",
"data_shapes",
"=",
"train_iter",
".",
"provide_data",
",",
"label_shapes",
"=",
"[",
"(",
"'label'",
",",
"(",
"batch_size",
",",
")",
")",
"]",
",",
"inputs_need_grad",
"=",
"True",
")",
"modD",
".",
"init_params",
"(",
"initializer",
"=",
"mx",
".",
"init",
".",
"Normal",
"(",
"0.02",
")",
")",
"modD",
".",
"init_optimizer",
"(",
"optimizer",
"=",
"'adam'",
",",
"optimizer_params",
"=",
"{",
"'learning_rate'",
":",
"lr",
",",
"'wd'",
":",
"1e-3",
",",
"'beta1'",
":",
"beta1",
",",
"'epsilon'",
":",
"epsilon",
",",
"'rescale_grad'",
":",
"(",
"1.0",
"/",
"batch_size",
")",
"}",
")",
"mods",
".",
"append",
"(",
"modD",
")",
"# =============module DL=============",
"symDL",
"=",
"DiscriminatorLayerLoss",
"(",
")",
"modDL",
"=",
"mx",
".",
"mod",
".",
"Module",
"(",
"symbol",
"=",
"symDL",
",",
"data_names",
"=",
"(",
"'data'",
",",
")",
",",
"label_names",
"=",
"(",
"'label'",
",",
")",
",",
"context",
"=",
"ctx",
")",
"modDL",
".",
"bind",
"(",
"data_shapes",
"=",
"[",
"(",
"'data'",
",",
"(",
"batch_size",
",",
"nef",
"*",
"4",
",",
"4",
",",
"4",
")",
")",
"]",
",",
"################################################################################################################################ fix 512 here",
"label_shapes",
"=",
"[",
"(",
"'label'",
",",
"(",
"batch_size",
",",
"nef",
"*",
"4",
",",
"4",
",",
"4",
")",
")",
"]",
",",
"inputs_need_grad",
"=",
"True",
")",
"modDL",
".",
"init_params",
"(",
"initializer",
"=",
"mx",
".",
"init",
".",
"Normal",
"(",
"0.02",
")",
")",
"modDL",
".",
"init_optimizer",
"(",
"optimizer",
"=",
"'adam'",
",",
"optimizer_params",
"=",
"{",
"'learning_rate'",
":",
"lr",
",",
"'wd'",
":",
"0.",
",",
"'beta1'",
":",
"beta1",
",",
"'epsilon'",
":",
"epsilon",
",",
"'rescale_grad'",
":",
"(",
"1.0",
"/",
"batch_size",
")",
"}",
")",
"# =============module KL=============",
"symKL",
"=",
"KLDivergenceLoss",
"(",
")",
"modKL",
"=",
"mx",
".",
"mod",
".",
"Module",
"(",
"symbol",
"=",
"symKL",
",",
"data_names",
"=",
"(",
"'data'",
",",
")",
",",
"label_names",
"=",
"None",
",",
"context",
"=",
"ctx",
")",
"modKL",
".",
"bind",
"(",
"data_shapes",
"=",
"[",
"(",
"'data'",
",",
"(",
"batch_size",
"*",
"2",
",",
"Z",
")",
")",
"]",
",",
"inputs_need_grad",
"=",
"True",
")",
"modKL",
".",
"init_params",
"(",
"initializer",
"=",
"mx",
".",
"init",
".",
"Normal",
"(",
"0.02",
")",
")",
"modKL",
".",
"init_optimizer",
"(",
"optimizer",
"=",
"'adam'",
",",
"optimizer_params",
"=",
"{",
"'learning_rate'",
":",
"lr",
",",
"'wd'",
":",
"0.",
",",
"'beta1'",
":",
"beta1",
",",
"'epsilon'",
":",
"epsilon",
",",
"'rescale_grad'",
":",
"(",
"1.0",
"/",
"batch_size",
")",
"}",
")",
"mods",
".",
"append",
"(",
"modKL",
")",
"def",
"norm_stat",
"(",
"d",
")",
":",
"return",
"mx",
".",
"nd",
".",
"norm",
"(",
"d",
")",
"/",
"np",
".",
"sqrt",
"(",
"d",
".",
"size",
")",
"mon",
"=",
"mx",
".",
"mon",
".",
"Monitor",
"(",
"10",
",",
"norm_stat",
",",
"pattern",
"=",
"\".*output|d1_backward_data\"",
",",
"sort",
"=",
"True",
")",
"mon",
"=",
"None",
"if",
"mon",
"is",
"not",
"None",
":",
"for",
"mod",
"in",
"mods",
":",
"pass",
"def",
"facc",
"(",
"label",
",",
"pred",
")",
":",
"'''calculating prediction accuracy\n '''",
"pred",
"=",
"pred",
".",
"ravel",
"(",
")",
"label",
"=",
"label",
".",
"ravel",
"(",
")",
"return",
"(",
"(",
"pred",
">",
"0.5",
")",
"==",
"label",
")",
".",
"mean",
"(",
")",
"def",
"fentropy",
"(",
"label",
",",
"pred",
")",
":",
"'''calculating binary cross-entropy loss\n '''",
"pred",
"=",
"pred",
".",
"ravel",
"(",
")",
"label",
"=",
"label",
".",
"ravel",
"(",
")",
"return",
"-",
"(",
"label",
"*",
"np",
".",
"log",
"(",
"pred",
"+",
"1e-12",
")",
"+",
"(",
"1.",
"-",
"label",
")",
"*",
"np",
".",
"log",
"(",
"1.",
"-",
"pred",
"+",
"1e-12",
")",
")",
".",
"mean",
"(",
")",
"def",
"kldivergence",
"(",
"label",
",",
"pred",
")",
":",
"'''calculating KL divergence loss\n '''",
"mean",
",",
"log_var",
"=",
"np",
".",
"split",
"(",
"pred",
",",
"2",
",",
"axis",
"=",
"0",
")",
"var",
"=",
"np",
".",
"exp",
"(",
"log_var",
")",
"KLLoss",
"=",
"-",
"0.5",
"*",
"np",
".",
"sum",
"(",
"1",
"+",
"log_var",
"-",
"np",
".",
"power",
"(",
"mean",
",",
"2",
")",
"-",
"var",
")",
"KLLoss",
"=",
"KLLoss",
"/",
"nElements",
"return",
"KLLoss",
"mG",
"=",
"mx",
".",
"metric",
".",
"CustomMetric",
"(",
"fentropy",
")",
"mD",
"=",
"mx",
".",
"metric",
".",
"CustomMetric",
"(",
"fentropy",
")",
"mE",
"=",
"mx",
".",
"metric",
".",
"CustomMetric",
"(",
"kldivergence",
")",
"mACC",
"=",
"mx",
".",
"metric",
".",
"CustomMetric",
"(",
"facc",
")",
"print",
"(",
"'Training...'",
")",
"stamp",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%Y_%m_%d-%H_%M'",
")",
"# =============train===============",
"for",
"epoch",
"in",
"range",
"(",
"num_epoch",
")",
":",
"train_iter",
".",
"reset",
"(",
")",
"for",
"t",
",",
"batch",
"in",
"enumerate",
"(",
"train_iter",
")",
":",
"rbatch",
"=",
"rand_iter",
".",
"next",
"(",
")",
"if",
"mon",
"is",
"not",
"None",
":",
"mon",
".",
"tic",
"(",
")",
"modG",
".",
"forward",
"(",
"rbatch",
",",
"is_train",
"=",
"True",
")",
"outG",
"=",
"modG",
".",
"get_outputs",
"(",
")",
"# update discriminator on fake",
"label",
"[",
":",
"]",
"=",
"0",
"modD",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"outG",
",",
"[",
"label",
"]",
")",
",",
"is_train",
"=",
"True",
")",
"modD",
".",
"backward",
"(",
")",
"gradD11",
"=",
"[",
"[",
"grad",
".",
"copyto",
"(",
"grad",
".",
"context",
")",
"for",
"grad",
"in",
"grads",
"]",
"for",
"grads",
"in",
"modD1",
".",
"_exec_group",
".",
"grad_arrays",
"]",
"gradD12",
"=",
"[",
"[",
"grad",
".",
"copyto",
"(",
"grad",
".",
"context",
")",
"for",
"grad",
"in",
"grads",
"]",
"for",
"grads",
"in",
"modD2",
".",
"_exec_group",
".",
"grad_arrays",
"]",
"modD",
".",
"update_metric",
"(",
"mD",
",",
"[",
"label",
"]",
")",
"modD",
".",
"update_metric",
"(",
"mACC",
",",
"[",
"label",
"]",
")",
"#update discriminator on decoded",
"modE",
".",
"forward",
"(",
"batch",
",",
"is_train",
"=",
"True",
")",
"mu",
",",
"lv",
",",
"z",
"=",
"modE",
".",
"get_outputs",
"(",
")",
"z",
"=",
"z",
".",
"reshape",
"(",
"(",
"batch_size",
",",
"Z",
",",
"1",
",",
"1",
")",
")",
"sample",
"=",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"[",
"z",
"]",
",",
"label",
"=",
"None",
",",
"provide_data",
"=",
"[",
"(",
"'rand'",
",",
"(",
"batch_size",
",",
"Z",
",",
"1",
",",
"1",
")",
")",
"]",
")",
"modG",
".",
"forward",
"(",
"sample",
",",
"is_train",
"=",
"True",
")",
"xz",
"=",
"modG",
".",
"get_outputs",
"(",
")",
"label",
"[",
":",
"]",
"=",
"0",
"modD",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"xz",
",",
"[",
"label",
"]",
")",
",",
"is_train",
"=",
"True",
")",
"modD",
".",
"backward",
"(",
")",
"#modD.update()",
"gradD21",
"=",
"[",
"[",
"grad",
".",
"copyto",
"(",
"grad",
".",
"context",
")",
"for",
"grad",
"in",
"grads",
"]",
"for",
"grads",
"in",
"modD1",
".",
"_exec_group",
".",
"grad_arrays",
"]",
"gradD22",
"=",
"[",
"[",
"grad",
".",
"copyto",
"(",
"grad",
".",
"context",
")",
"for",
"grad",
"in",
"grads",
"]",
"for",
"grads",
"in",
"modD2",
".",
"_exec_group",
".",
"grad_arrays",
"]",
"modD",
".",
"update_metric",
"(",
"mD",
",",
"[",
"label",
"]",
")",
"modD",
".",
"update_metric",
"(",
"mACC",
",",
"[",
"label",
"]",
")",
"# update discriminator on real",
"label",
"[",
":",
"]",
"=",
"1",
"batch",
".",
"label",
"=",
"[",
"label",
"]",
"modD",
".",
"forward",
"(",
"batch",
",",
"is_train",
"=",
"True",
")",
"lx",
"=",
"[",
"out",
".",
"copyto",
"(",
"out",
".",
"context",
")",
"for",
"out",
"in",
"modD1",
".",
"get_outputs",
"(",
")",
"]",
"modD",
".",
"backward",
"(",
")",
"for",
"gradsr",
",",
"gradsf",
",",
"gradsd",
"in",
"zip",
"(",
"modD1",
".",
"_exec_group",
".",
"grad_arrays",
",",
"gradD11",
",",
"gradD21",
")",
":",
"for",
"gradr",
",",
"gradf",
",",
"gradd",
"in",
"zip",
"(",
"gradsr",
",",
"gradsf",
",",
"gradsd",
")",
":",
"gradr",
"+=",
"0.5",
"*",
"(",
"gradf",
"+",
"gradd",
")",
"for",
"gradsr",
",",
"gradsf",
",",
"gradsd",
"in",
"zip",
"(",
"modD2",
".",
"_exec_group",
".",
"grad_arrays",
",",
"gradD12",
",",
"gradD22",
")",
":",
"for",
"gradr",
",",
"gradf",
",",
"gradd",
"in",
"zip",
"(",
"gradsr",
",",
"gradsf",
",",
"gradsd",
")",
":",
"gradr",
"+=",
"0.5",
"*",
"(",
"gradf",
"+",
"gradd",
")",
"modD",
".",
"update",
"(",
")",
"modD",
".",
"update_metric",
"(",
"mD",
",",
"[",
"label",
"]",
")",
"modD",
".",
"update_metric",
"(",
"mACC",
",",
"[",
"label",
"]",
")",
"modG",
".",
"forward",
"(",
"rbatch",
",",
"is_train",
"=",
"True",
")",
"outG",
"=",
"modG",
".",
"get_outputs",
"(",
")",
"label",
"[",
":",
"]",
"=",
"1",
"modD",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"outG",
",",
"[",
"label",
"]",
")",
",",
"is_train",
"=",
"True",
")",
"modD",
".",
"backward",
"(",
")",
"diffD",
"=",
"modD1",
".",
"get_input_grads",
"(",
")",
"modG",
".",
"backward",
"(",
"diffD",
")",
"gradG1",
"=",
"[",
"[",
"grad",
".",
"copyto",
"(",
"grad",
".",
"context",
")",
"for",
"grad",
"in",
"grads",
"]",
"for",
"grads",
"in",
"modG",
".",
"_exec_group",
".",
"grad_arrays",
"]",
"mG",
".",
"update",
"(",
"[",
"label",
"]",
",",
"modD",
".",
"get_outputs",
"(",
")",
")",
"modG",
".",
"forward",
"(",
"sample",
",",
"is_train",
"=",
"True",
")",
"xz",
"=",
"modG",
".",
"get_outputs",
"(",
")",
"label",
"[",
":",
"]",
"=",
"1",
"modD",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"xz",
",",
"[",
"label",
"]",
")",
",",
"is_train",
"=",
"True",
")",
"modD",
".",
"backward",
"(",
")",
"diffD",
"=",
"modD1",
".",
"get_input_grads",
"(",
")",
"modG",
".",
"backward",
"(",
"diffD",
")",
"gradG2",
"=",
"[",
"[",
"grad",
".",
"copyto",
"(",
"grad",
".",
"context",
")",
"for",
"grad",
"in",
"grads",
"]",
"for",
"grads",
"in",
"modG",
".",
"_exec_group",
".",
"grad_arrays",
"]",
"mG",
".",
"update",
"(",
"[",
"label",
"]",
",",
"modD",
".",
"get_outputs",
"(",
")",
")",
"modG",
".",
"forward",
"(",
"sample",
",",
"is_train",
"=",
"True",
")",
"xz",
"=",
"modG",
".",
"get_outputs",
"(",
")",
"modD1",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"xz",
",",
"[",
"]",
")",
",",
"is_train",
"=",
"True",
")",
"outD1",
"=",
"modD1",
".",
"get_outputs",
"(",
")",
"modDL",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"outD1",
",",
"lx",
")",
",",
"is_train",
"=",
"True",
")",
"modDL",
".",
"backward",
"(",
")",
"dlGrad",
"=",
"modDL",
".",
"get_input_grads",
"(",
")",
"modD1",
".",
"backward",
"(",
"dlGrad",
")",
"diffD",
"=",
"modD1",
".",
"get_input_grads",
"(",
")",
"modG",
".",
"backward",
"(",
"diffD",
")",
"for",
"grads",
",",
"gradsG1",
",",
"gradsG2",
"in",
"zip",
"(",
"modG",
".",
"_exec_group",
".",
"grad_arrays",
",",
"gradG1",
",",
"gradG2",
")",
":",
"for",
"grad",
",",
"gradg1",
",",
"gradg2",
"in",
"zip",
"(",
"grads",
",",
"gradsG1",
",",
"gradsG2",
")",
":",
"grad",
"=",
"g_dl_weight",
"*",
"grad",
"+",
"0.5",
"*",
"(",
"gradg1",
"+",
"gradg2",
")",
"modG",
".",
"update",
"(",
")",
"mG",
".",
"update",
"(",
"[",
"label",
"]",
",",
"modD",
".",
"get_outputs",
"(",
")",
")",
"modG",
".",
"forward",
"(",
"rbatch",
",",
"is_train",
"=",
"True",
")",
"outG",
"=",
"modG",
".",
"get_outputs",
"(",
")",
"label",
"[",
":",
"]",
"=",
"1",
"modD",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"outG",
",",
"[",
"label",
"]",
")",
",",
"is_train",
"=",
"True",
")",
"modD",
".",
"backward",
"(",
")",
"diffD",
"=",
"modD1",
".",
"get_input_grads",
"(",
")",
"modG",
".",
"backward",
"(",
"diffD",
")",
"gradG1",
"=",
"[",
"[",
"grad",
".",
"copyto",
"(",
"grad",
".",
"context",
")",
"for",
"grad",
"in",
"grads",
"]",
"for",
"grads",
"in",
"modG",
".",
"_exec_group",
".",
"grad_arrays",
"]",
"mG",
".",
"update",
"(",
"[",
"label",
"]",
",",
"modD",
".",
"get_outputs",
"(",
")",
")",
"modG",
".",
"forward",
"(",
"sample",
",",
"is_train",
"=",
"True",
")",
"xz",
"=",
"modG",
".",
"get_outputs",
"(",
")",
"label",
"[",
":",
"]",
"=",
"1",
"modD",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"xz",
",",
"[",
"label",
"]",
")",
",",
"is_train",
"=",
"True",
")",
"modD",
".",
"backward",
"(",
")",
"diffD",
"=",
"modD1",
".",
"get_input_grads",
"(",
")",
"modG",
".",
"backward",
"(",
"diffD",
")",
"gradG2",
"=",
"[",
"[",
"grad",
".",
"copyto",
"(",
"grad",
".",
"context",
")",
"for",
"grad",
"in",
"grads",
"]",
"for",
"grads",
"in",
"modG",
".",
"_exec_group",
".",
"grad_arrays",
"]",
"mG",
".",
"update",
"(",
"[",
"label",
"]",
",",
"modD",
".",
"get_outputs",
"(",
")",
")",
"modG",
".",
"forward",
"(",
"sample",
",",
"is_train",
"=",
"True",
")",
"xz",
"=",
"modG",
".",
"get_outputs",
"(",
")",
"modD1",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"xz",
",",
"[",
"]",
")",
",",
"is_train",
"=",
"True",
")",
"outD1",
"=",
"modD1",
".",
"get_outputs",
"(",
")",
"modDL",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"outD1",
",",
"lx",
")",
",",
"is_train",
"=",
"True",
")",
"modDL",
".",
"backward",
"(",
")",
"dlGrad",
"=",
"modDL",
".",
"get_input_grads",
"(",
")",
"modD1",
".",
"backward",
"(",
"dlGrad",
")",
"diffD",
"=",
"modD1",
".",
"get_input_grads",
"(",
")",
"modG",
".",
"backward",
"(",
"diffD",
")",
"for",
"grads",
",",
"gradsG1",
",",
"gradsG2",
"in",
"zip",
"(",
"modG",
".",
"_exec_group",
".",
"grad_arrays",
",",
"gradG1",
",",
"gradG2",
")",
":",
"for",
"grad",
",",
"gradg1",
",",
"gradg2",
"in",
"zip",
"(",
"grads",
",",
"gradsG1",
",",
"gradsG2",
")",
":",
"grad",
"=",
"g_dl_weight",
"*",
"grad",
"+",
"0.5",
"*",
"(",
"gradg1",
"+",
"gradg2",
")",
"modG",
".",
"update",
"(",
")",
"mG",
".",
"update",
"(",
"[",
"label",
"]",
",",
"modD",
".",
"get_outputs",
"(",
")",
")",
"modG",
".",
"forward",
"(",
"sample",
",",
"is_train",
"=",
"True",
")",
"xz",
"=",
"modG",
".",
"get_outputs",
"(",
")",
"#update generator",
"modD1",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"xz",
",",
"[",
"]",
")",
",",
"is_train",
"=",
"True",
")",
"outD1",
"=",
"modD1",
".",
"get_outputs",
"(",
")",
"modDL",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"outD1",
",",
"lx",
")",
",",
"is_train",
"=",
"True",
")",
"DLloss",
"=",
"modDL",
".",
"get_outputs",
"(",
")",
"modDL",
".",
"backward",
"(",
")",
"dlGrad",
"=",
"modDL",
".",
"get_input_grads",
"(",
")",
"modD1",
".",
"backward",
"(",
"dlGrad",
")",
"diffD",
"=",
"modD1",
".",
"get_input_grads",
"(",
")",
"modG",
".",
"backward",
"(",
"diffD",
")",
"#update encoder",
"nElements",
"=",
"batch_size",
"modKL",
".",
"forward",
"(",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"[",
"mx",
".",
"ndarray",
".",
"concat",
"(",
"mu",
",",
"lv",
",",
"dim",
"=",
"0",
")",
"]",
")",
",",
"is_train",
"=",
"True",
")",
"KLloss",
"=",
"modKL",
".",
"get_outputs",
"(",
")",
"modKL",
".",
"backward",
"(",
")",
"gradKLLoss",
"=",
"modKL",
".",
"get_input_grads",
"(",
")",
"diffG",
"=",
"modG",
".",
"get_input_grads",
"(",
")",
"diffG",
"=",
"diffG",
"[",
"0",
"]",
".",
"reshape",
"(",
"(",
"batch_size",
",",
"Z",
")",
")",
"modE",
".",
"backward",
"(",
"mx",
".",
"ndarray",
".",
"split",
"(",
"gradKLLoss",
"[",
"0",
"]",
",",
"num_outputs",
"=",
"2",
",",
"axis",
"=",
"0",
")",
"+",
"[",
"diffG",
"]",
")",
"modE",
".",
"update",
"(",
")",
"pred",
"=",
"mx",
".",
"ndarray",
".",
"concat",
"(",
"mu",
",",
"lv",
",",
"dim",
"=",
"0",
")",
"mE",
".",
"update",
"(",
"[",
"pred",
"]",
",",
"[",
"pred",
"]",
")",
"if",
"mon",
"is",
"not",
"None",
":",
"mon",
".",
"toc_print",
"(",
")",
"t",
"+=",
"1",
"if",
"t",
"%",
"show_after_every",
"==",
"0",
":",
"print",
"(",
"'epoch:'",
",",
"epoch",
",",
"'iter:'",
",",
"t",
",",
"'metric:'",
",",
"mACC",
".",
"get",
"(",
")",
",",
"mG",
".",
"get",
"(",
")",
",",
"mD",
".",
"get",
"(",
")",
",",
"mE",
".",
"get",
"(",
")",
",",
"KLloss",
"[",
"0",
"]",
".",
"asnumpy",
"(",
")",
",",
"DLloss",
"[",
"0",
"]",
".",
"asnumpy",
"(",
")",
")",
"mACC",
".",
"reset",
"(",
")",
"mG",
".",
"reset",
"(",
")",
"mD",
".",
"reset",
"(",
")",
"mE",
".",
"reset",
"(",
")",
"if",
"epoch",
"%",
"visualize_after_every",
"==",
"0",
":",
"visual",
"(",
"output_path",
"+",
"'gout'",
"+",
"str",
"(",
"epoch",
")",
",",
"outG",
"[",
"0",
"]",
".",
"asnumpy",
"(",
")",
",",
"activation",
")",
"visual",
"(",
"output_path",
"+",
"'data'",
"+",
"str",
"(",
"epoch",
")",
",",
"batch",
".",
"data",
"[",
"0",
"]",
".",
"asnumpy",
"(",
")",
",",
"activation",
")",
"if",
"check_point",
"and",
"epoch",
"%",
"save_after_every",
"==",
"0",
":",
"print",
"(",
"'Saving...'",
")",
"modG",
".",
"save_params",
"(",
"checkpoint_path",
"+",
"'/%s_G-%04d.params'",
"%",
"(",
"dataset",
",",
"epoch",
")",
")",
"modD",
".",
"save_params",
"(",
"checkpoint_path",
"+",
"'/%s_D-%04d.params'",
"%",
"(",
"dataset",
",",
"epoch",
")",
")",
"modE",
".",
"save_params",
"(",
"checkpoint_path",
"+",
"'/%s_E-%04d.params'",
"%",
"(",
"dataset",
",",
"epoch",
")",
")"
] |
adversarial training of the VAE
|
[
"adversarial",
"training",
"of",
"the",
"VAE"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L288-L613
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
create_and_validate_dir
|
def create_and_validate_dir(data_dir):
'''Creates/Validates dir
'''
if data_dir != "":
if not os.path.exists(data_dir):
try:
logging.info('create directory %s', data_dir)
os.makedirs(data_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise OSError('failed to create ' + data_dir)
|
python
|
def create_and_validate_dir(data_dir):
'''Creates/Validates dir
'''
if data_dir != "":
if not os.path.exists(data_dir):
try:
logging.info('create directory %s', data_dir)
os.makedirs(data_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise OSError('failed to create ' + data_dir)
|
[
"def",
"create_and_validate_dir",
"(",
"data_dir",
")",
":",
"if",
"data_dir",
"!=",
"\"\"",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"data_dir",
")",
":",
"try",
":",
"logging",
".",
"info",
"(",
"'create directory %s'",
",",
"data_dir",
")",
"os",
".",
"makedirs",
"(",
"data_dir",
")",
"except",
"OSError",
"as",
"exc",
":",
"if",
"exc",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise",
"OSError",
"(",
"'failed to create '",
"+",
"data_dir",
")"
] |
Creates/Validates dir
|
[
"Creates",
"/",
"Validates",
"dir"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L660-L670
|
train
|
apache/incubator-mxnet
|
example/vae-gan/vaegan_mxnet.py
|
parse_args
|
def parse_args():
'''Parse args
'''
parser = argparse.ArgumentParser(description='Train and Test an Adversarial Variatiional Encoder')
parser.add_argument('--train', help='train the network', action='store_true')
parser.add_argument('--test', help='test the network', action='store_true')
parser.add_argument('--save_embedding', help='saves the shape embedding of each input image', action='store_true')
parser.add_argument('--dataset', help='dataset name', default='caltech', type=str)
parser.add_argument('--activation', help='activation i.e. sigmoid or tanh', default='sigmoid', type=str)
parser.add_argument('--training_data_path', help='training data path', default='datasets/caltech101/data/images32x32', type=str)
parser.add_argument('--testing_data_path', help='testing data path', default='datasets/caltech101/test_data', type=str)
parser.add_argument('--pretrained_encoder_path', help='pretrained encoder model path', default='checkpoints32x32_sigmoid/caltech_E-0045.params', type=str)
parser.add_argument('--pretrained_generator_path', help='pretrained generator model path', default='checkpoints32x32_sigmoid/caltech_G-0045.params', type=str)
parser.add_argument('--output_path', help='output path for the generated images', default='outputs32x32_sigmoid', type=str)
parser.add_argument('--embedding_path', help='output path for the generated embeddings', default='outputs32x32_sigmoid', type=str)
parser.add_argument('--checkpoint_path', help='checkpoint saving path ', default='checkpoints32x32_sigmoid', type=str)
parser.add_argument('--nef', help='encoder filter count in the first layer', default=64, type=int)
parser.add_argument('--ndf', help='discriminator filter count in the first layer', default=64, type=int)
parser.add_argument('--ngf', help='generator filter count in the second last layer', default=64, type=int)
parser.add_argument('--nc', help='generator filter count in the last layer i.e. 1 for grayscale image, 3 for RGB image', default=1, type=int)
parser.add_argument('--batch_size', help='batch size, keep it 1 during testing', default=64, type=int)
parser.add_argument('--Z', help='embedding size', default=100, type=int)
parser.add_argument('--lr', help='learning rate', default=0.0002, type=float)
parser.add_argument('--beta1', help='beta1 for adam optimizer', default=0.5, type=float)
parser.add_argument('--epsilon', help='epsilon for adam optimizer', default=1e-5, type=float)
parser.add_argument('--g_dl_weight', help='discriminator layer loss weight', default=1e-1, type=float)
parser.add_argument('--gpu', help='gpu index', default=0, type=int)
parser.add_argument('--use_cpu', help='use cpu', action='store_true')
parser.add_argument('--num_epoch', help='number of maximum epochs ', default=45, type=int)
parser.add_argument('--save_after_every', help='save checkpoint after every this number of epochs ', default=5, type=int)
parser.add_argument('--visualize_after_every', help='save output images after every this number of epochs', default=5, type=int)
parser.add_argument('--show_after_every', help='show metrics after this number of iterations', default=10, type=int)
args = parser.parse_args()
return args
|
python
|
def parse_args():
'''Parse args
'''
parser = argparse.ArgumentParser(description='Train and Test an Adversarial Variatiional Encoder')
parser.add_argument('--train', help='train the network', action='store_true')
parser.add_argument('--test', help='test the network', action='store_true')
parser.add_argument('--save_embedding', help='saves the shape embedding of each input image', action='store_true')
parser.add_argument('--dataset', help='dataset name', default='caltech', type=str)
parser.add_argument('--activation', help='activation i.e. sigmoid or tanh', default='sigmoid', type=str)
parser.add_argument('--training_data_path', help='training data path', default='datasets/caltech101/data/images32x32', type=str)
parser.add_argument('--testing_data_path', help='testing data path', default='datasets/caltech101/test_data', type=str)
parser.add_argument('--pretrained_encoder_path', help='pretrained encoder model path', default='checkpoints32x32_sigmoid/caltech_E-0045.params', type=str)
parser.add_argument('--pretrained_generator_path', help='pretrained generator model path', default='checkpoints32x32_sigmoid/caltech_G-0045.params', type=str)
parser.add_argument('--output_path', help='output path for the generated images', default='outputs32x32_sigmoid', type=str)
parser.add_argument('--embedding_path', help='output path for the generated embeddings', default='outputs32x32_sigmoid', type=str)
parser.add_argument('--checkpoint_path', help='checkpoint saving path ', default='checkpoints32x32_sigmoid', type=str)
parser.add_argument('--nef', help='encoder filter count in the first layer', default=64, type=int)
parser.add_argument('--ndf', help='discriminator filter count in the first layer', default=64, type=int)
parser.add_argument('--ngf', help='generator filter count in the second last layer', default=64, type=int)
parser.add_argument('--nc', help='generator filter count in the last layer i.e. 1 for grayscale image, 3 for RGB image', default=1, type=int)
parser.add_argument('--batch_size', help='batch size, keep it 1 during testing', default=64, type=int)
parser.add_argument('--Z', help='embedding size', default=100, type=int)
parser.add_argument('--lr', help='learning rate', default=0.0002, type=float)
parser.add_argument('--beta1', help='beta1 for adam optimizer', default=0.5, type=float)
parser.add_argument('--epsilon', help='epsilon for adam optimizer', default=1e-5, type=float)
parser.add_argument('--g_dl_weight', help='discriminator layer loss weight', default=1e-1, type=float)
parser.add_argument('--gpu', help='gpu index', default=0, type=int)
parser.add_argument('--use_cpu', help='use cpu', action='store_true')
parser.add_argument('--num_epoch', help='number of maximum epochs ', default=45, type=int)
parser.add_argument('--save_after_every', help='save checkpoint after every this number of epochs ', default=5, type=int)
parser.add_argument('--visualize_after_every', help='save output images after every this number of epochs', default=5, type=int)
parser.add_argument('--show_after_every', help='show metrics after this number of iterations', default=10, type=int)
args = parser.parse_args()
return args
|
[
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Train and Test an Adversarial Variatiional Encoder'",
")",
"parser",
".",
"add_argument",
"(",
"'--train'",
",",
"help",
"=",
"'train the network'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'--test'",
",",
"help",
"=",
"'test the network'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'--save_embedding'",
",",
"help",
"=",
"'saves the shape embedding of each input image'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'--dataset'",
",",
"help",
"=",
"'dataset name'",
",",
"default",
"=",
"'caltech'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--activation'",
",",
"help",
"=",
"'activation i.e. sigmoid or tanh'",
",",
"default",
"=",
"'sigmoid'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--training_data_path'",
",",
"help",
"=",
"'training data path'",
",",
"default",
"=",
"'datasets/caltech101/data/images32x32'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--testing_data_path'",
",",
"help",
"=",
"'testing data path'",
",",
"default",
"=",
"'datasets/caltech101/test_data'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--pretrained_encoder_path'",
",",
"help",
"=",
"'pretrained encoder model path'",
",",
"default",
"=",
"'checkpoints32x32_sigmoid/caltech_E-0045.params'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--pretrained_generator_path'",
",",
"help",
"=",
"'pretrained generator model path'",
",",
"default",
"=",
"'checkpoints32x32_sigmoid/caltech_G-0045.params'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--output_path'",
",",
"help",
"=",
"'output path for the generated images'",
",",
"default",
"=",
"'outputs32x32_sigmoid'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--embedding_path'",
",",
"help",
"=",
"'output path for the generated embeddings'",
",",
"default",
"=",
"'outputs32x32_sigmoid'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--checkpoint_path'",
",",
"help",
"=",
"'checkpoint saving path '",
",",
"default",
"=",
"'checkpoints32x32_sigmoid'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--nef'",
",",
"help",
"=",
"'encoder filter count in the first layer'",
",",
"default",
"=",
"64",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--ndf'",
",",
"help",
"=",
"'discriminator filter count in the first layer'",
",",
"default",
"=",
"64",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--ngf'",
",",
"help",
"=",
"'generator filter count in the second last layer'",
",",
"default",
"=",
"64",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--nc'",
",",
"help",
"=",
"'generator filter count in the last layer i.e. 1 for grayscale image, 3 for RGB image'",
",",
"default",
"=",
"1",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--batch_size'",
",",
"help",
"=",
"'batch size, keep it 1 during testing'",
",",
"default",
"=",
"64",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--Z'",
",",
"help",
"=",
"'embedding size'",
",",
"default",
"=",
"100",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--lr'",
",",
"help",
"=",
"'learning rate'",
",",
"default",
"=",
"0.0002",
",",
"type",
"=",
"float",
")",
"parser",
".",
"add_argument",
"(",
"'--beta1'",
",",
"help",
"=",
"'beta1 for adam optimizer'",
",",
"default",
"=",
"0.5",
",",
"type",
"=",
"float",
")",
"parser",
".",
"add_argument",
"(",
"'--epsilon'",
",",
"help",
"=",
"'epsilon for adam optimizer'",
",",
"default",
"=",
"1e-5",
",",
"type",
"=",
"float",
")",
"parser",
".",
"add_argument",
"(",
"'--g_dl_weight'",
",",
"help",
"=",
"'discriminator layer loss weight'",
",",
"default",
"=",
"1e-1",
",",
"type",
"=",
"float",
")",
"parser",
".",
"add_argument",
"(",
"'--gpu'",
",",
"help",
"=",
"'gpu index'",
",",
"default",
"=",
"0",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--use_cpu'",
",",
"help",
"=",
"'use cpu'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'--num_epoch'",
",",
"help",
"=",
"'number of maximum epochs '",
",",
"default",
"=",
"45",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--save_after_every'",
",",
"help",
"=",
"'save checkpoint after every this number of epochs '",
",",
"default",
"=",
"5",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--visualize_after_every'",
",",
"help",
"=",
"'save output images after every this number of epochs'",
",",
"default",
"=",
"5",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--show_after_every'",
",",
"help",
"=",
"'show metrics after this number of iterations'",
",",
"default",
"=",
"10",
",",
"type",
"=",
"int",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"args"
] |
Parse args
|
[
"Parse",
"args"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L673-L708
|
train
|
apache/incubator-mxnet
|
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
|
get_rmse_log
|
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
|
python
|
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
|
[
"def",
"get_rmse_log",
"(",
"net",
",",
"X_train",
",",
"y_train",
")",
":",
"num_train",
"=",
"X_train",
".",
"shape",
"[",
"0",
"]",
"clipped_preds",
"=",
"nd",
".",
"clip",
"(",
"net",
"(",
"X_train",
")",
",",
"1",
",",
"float",
"(",
"'inf'",
")",
")",
"return",
"np",
".",
"sqrt",
"(",
"2",
"*",
"nd",
".",
"sum",
"(",
"square_loss",
"(",
"nd",
".",
"log",
"(",
"clipped_preds",
")",
",",
"nd",
".",
"log",
"(",
"y_train",
")",
")",
")",
".",
"asscalar",
"(",
")",
"/",
"num_train",
")"
] |
Gets root mse between the logarithms of the prediction and the truth.
|
[
"Gets",
"root",
"mse",
"between",
"the",
"logarithms",
"of",
"the",
"prediction",
"and",
"the",
"truth",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L66-L71
|
train
|
apache/incubator-mxnet
|
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
|
get_net
|
def get_net():
"""Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(50, activation="relu"))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
|
python
|
def get_net():
"""Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(50, activation="relu"))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
|
[
"def",
"get_net",
"(",
")",
":",
"net",
"=",
"gluon",
".",
"nn",
".",
"Sequential",
"(",
")",
"with",
"net",
".",
"name_scope",
"(",
")",
":",
"net",
".",
"add",
"(",
"gluon",
".",
"nn",
".",
"Dense",
"(",
"50",
",",
"activation",
"=",
"\"relu\"",
")",
")",
"net",
".",
"add",
"(",
"gluon",
".",
"nn",
".",
"Dense",
"(",
"1",
")",
")",
"net",
".",
"initialize",
"(",
")",
"return",
"net"
] |
Gets a neural network. Better results are obtained with modifications.
|
[
"Gets",
"a",
"neural",
"network",
".",
"Better",
"results",
"are",
"obtained",
"with",
"modifications",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L73-L80
|
train
|
apache/incubator-mxnet
|
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
|
train
|
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss
|
python
|
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss
|
[
"def",
"train",
"(",
"net",
",",
"X_train",
",",
"y_train",
",",
"epochs",
",",
"verbose_epoch",
",",
"learning_rate",
",",
"weight_decay",
",",
"batch_size",
")",
":",
"dataset_train",
"=",
"gluon",
".",
"data",
".",
"ArrayDataset",
"(",
"X_train",
",",
"y_train",
")",
"data_iter_train",
"=",
"gluon",
".",
"data",
".",
"DataLoader",
"(",
"dataset_train",
",",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
"trainer",
"=",
"gluon",
".",
"Trainer",
"(",
"net",
".",
"collect_params",
"(",
")",
",",
"'adam'",
",",
"{",
"'learning_rate'",
":",
"learning_rate",
",",
"'wd'",
":",
"weight_decay",
"}",
")",
"net",
".",
"initialize",
"(",
"force_reinit",
"=",
"True",
")",
"for",
"epoch",
"in",
"range",
"(",
"epochs",
")",
":",
"for",
"data",
",",
"label",
"in",
"data_iter_train",
":",
"with",
"autograd",
".",
"record",
"(",
")",
":",
"output",
"=",
"net",
"(",
"data",
")",
"loss",
"=",
"square_loss",
"(",
"output",
",",
"label",
")",
"loss",
".",
"backward",
"(",
")",
"trainer",
".",
"step",
"(",
"batch_size",
")",
"avg_loss",
"=",
"get_rmse_log",
"(",
"net",
",",
"X_train",
",",
"y_train",
")",
"if",
"epoch",
">",
"verbose_epoch",
":",
"print",
"(",
"\"Epoch %d, train loss: %f\"",
"%",
"(",
"epoch",
",",
"avg_loss",
")",
")",
"return",
"avg_loss"
] |
Trains the model.
|
[
"Trains",
"the",
"model",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L82-L102
|
train
|
apache/incubator-mxnet
|
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
|
k_fold_cross_valid
|
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
|
python
|
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
|
[
"def",
"k_fold_cross_valid",
"(",
"k",
",",
"epochs",
",",
"verbose_epoch",
",",
"X_train",
",",
"y_train",
",",
"learning_rate",
",",
"weight_decay",
",",
"batch_size",
")",
":",
"assert",
"k",
">",
"1",
"fold_size",
"=",
"X_train",
".",
"shape",
"[",
"0",
"]",
"//",
"k",
"train_loss_sum",
"=",
"0.0",
"test_loss_sum",
"=",
"0.0",
"for",
"test_idx",
"in",
"range",
"(",
"k",
")",
":",
"X_val_test",
"=",
"X_train",
"[",
"test_idx",
"*",
"fold_size",
":",
"(",
"test_idx",
"+",
"1",
")",
"*",
"fold_size",
",",
":",
"]",
"y_val_test",
"=",
"y_train",
"[",
"test_idx",
"*",
"fold_size",
":",
"(",
"test_idx",
"+",
"1",
")",
"*",
"fold_size",
"]",
"val_train_defined",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"if",
"i",
"!=",
"test_idx",
":",
"X_cur_fold",
"=",
"X_train",
"[",
"i",
"*",
"fold_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"fold_size",
",",
":",
"]",
"y_cur_fold",
"=",
"y_train",
"[",
"i",
"*",
"fold_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"fold_size",
"]",
"if",
"not",
"val_train_defined",
":",
"X_val_train",
"=",
"X_cur_fold",
"y_val_train",
"=",
"y_cur_fold",
"val_train_defined",
"=",
"True",
"else",
":",
"X_val_train",
"=",
"nd",
".",
"concat",
"(",
"X_val_train",
",",
"X_cur_fold",
",",
"dim",
"=",
"0",
")",
"y_val_train",
"=",
"nd",
".",
"concat",
"(",
"y_val_train",
",",
"y_cur_fold",
",",
"dim",
"=",
"0",
")",
"net",
"=",
"get_net",
"(",
")",
"train_loss",
"=",
"train",
"(",
"net",
",",
"X_val_train",
",",
"y_val_train",
",",
"epochs",
",",
"verbose_epoch",
",",
"learning_rate",
",",
"weight_decay",
",",
"batch_size",
")",
"train_loss_sum",
"+=",
"train_loss",
"test_loss",
"=",
"get_rmse_log",
"(",
"net",
",",
"X_val_test",
",",
"y_val_test",
")",
"print",
"(",
"\"Test loss: %f\"",
"%",
"test_loss",
")",
"test_loss_sum",
"+=",
"test_loss",
"return",
"train_loss_sum",
"/",
"k",
",",
"test_loss_sum",
"/",
"k"
] |
Conducts k-fold cross validation for the model.
|
[
"Conducts",
"k",
"-",
"fold",
"cross",
"validation",
"for",
"the",
"model",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L104-L135
|
train
|
apache/incubator-mxnet
|
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
|
learn
|
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
|
python
|
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
|
[
"def",
"learn",
"(",
"epochs",
",",
"verbose_epoch",
",",
"X_train",
",",
"y_train",
",",
"test",
",",
"learning_rate",
",",
"weight_decay",
",",
"batch_size",
")",
":",
"net",
"=",
"get_net",
"(",
")",
"_",
"=",
"train",
"(",
"net",
",",
"X_train",
",",
"y_train",
",",
"epochs",
",",
"verbose_epoch",
",",
"learning_rate",
",",
"weight_decay",
",",
"batch_size",
")",
"preds",
"=",
"net",
"(",
"X_test",
")",
".",
"asnumpy",
"(",
")",
"test",
"[",
"'SalePrice'",
"]",
"=",
"pd",
".",
"Series",
"(",
"preds",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"[",
"0",
"]",
")",
"submission",
"=",
"pd",
".",
"concat",
"(",
"[",
"test",
"[",
"'Id'",
"]",
",",
"test",
"[",
"'SalePrice'",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"submission",
".",
"to_csv",
"(",
"'submission.csv'",
",",
"index",
"=",
"False",
")"
] |
Trains the model and predicts on the test data set.
|
[
"Trains",
"the",
"model",
"and",
"predicts",
"on",
"the",
"test",
"data",
"set",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L152-L161
|
train
|
apache/incubator-mxnet
|
example/capsnet/capsulenet.py
|
capsnet
|
def capsnet(batch_size, n_class, num_routing, recon_loss_weight):
"""Create CapsNet"""
# data.shape = [batch_size, 1, 28, 28]
data = mx.sym.Variable('data')
input_shape = (1, 28, 28)
# Conv2D layer
# net.shape = [batch_size, 256, 20, 20]
conv1 = mx.sym.Convolution(data=data,
num_filter=256,
kernel=(9, 9),
layout='NCHW',
name='conv1')
conv1 = mx.sym.Activation(data=conv1, act_type='relu', name='conv1_act')
# net.shape = [batch_size, 256, 6, 6]
primarycaps = primary_caps(data=conv1,
dim_vector=8,
n_channels=32,
kernel=(9, 9),
strides=[2, 2],
name='primarycaps')
primarycaps.infer_shape(data=(batch_size, 1, 28, 28))
# CapsuleLayer
kernel_initializer = mx.init.Xavier(rnd_type='uniform', factor_type='avg', magnitude=3)
bias_initializer = mx.init.Zero()
digitcaps = CapsuleLayer(num_capsule=10,
dim_vector=16,
batch_size=batch_size,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
num_routing=num_routing)(primarycaps)
# out_caps : (batch_size, 10)
out_caps = mx.sym.sqrt(data=mx.sym.sum(mx.sym.square(digitcaps), 2))
out_caps.infer_shape(data=(batch_size, 1, 28, 28))
y = mx.sym.Variable('softmax_label', shape=(batch_size,))
y_onehot = mx.sym.one_hot(y, n_class)
y_reshaped = mx.sym.Reshape(data=y_onehot, shape=(batch_size, -4, n_class, -1))
y_reshaped.infer_shape(softmax_label=(batch_size,))
# inputs_masked : (batch_size, 16)
inputs_masked = mx.sym.linalg_gemm2(y_reshaped, digitcaps, transpose_a=True)
inputs_masked = mx.sym.Reshape(data=inputs_masked, shape=(-3, 0))
x_recon = mx.sym.FullyConnected(data=inputs_masked, num_hidden=512, name='x_recon')
x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act')
x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=1024, name='x_recon2')
x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act2')
x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=np.prod(input_shape), name='x_recon3')
x_recon = mx.sym.Activation(data=x_recon, act_type='sigmoid', name='x_recon_act3')
data_flatten = mx.sym.flatten(data=data)
squared_error = mx.sym.square(x_recon-data_flatten)
recon_error = mx.sym.mean(squared_error)
recon_error_stopped = recon_error
recon_error_stopped = mx.sym.BlockGrad(recon_error_stopped)
loss = mx.symbol.MakeLoss((1-recon_loss_weight)*margin_loss(y_onehot, out_caps)+recon_loss_weight*recon_error)
out_caps_blocked = out_caps
out_caps_blocked = mx.sym.BlockGrad(out_caps_blocked)
return mx.sym.Group([out_caps_blocked, loss, recon_error_stopped])
|
python
|
def capsnet(batch_size, n_class, num_routing, recon_loss_weight):
"""Create CapsNet"""
# data.shape = [batch_size, 1, 28, 28]
data = mx.sym.Variable('data')
input_shape = (1, 28, 28)
# Conv2D layer
# net.shape = [batch_size, 256, 20, 20]
conv1 = mx.sym.Convolution(data=data,
num_filter=256,
kernel=(9, 9),
layout='NCHW',
name='conv1')
conv1 = mx.sym.Activation(data=conv1, act_type='relu', name='conv1_act')
# net.shape = [batch_size, 256, 6, 6]
primarycaps = primary_caps(data=conv1,
dim_vector=8,
n_channels=32,
kernel=(9, 9),
strides=[2, 2],
name='primarycaps')
primarycaps.infer_shape(data=(batch_size, 1, 28, 28))
# CapsuleLayer
kernel_initializer = mx.init.Xavier(rnd_type='uniform', factor_type='avg', magnitude=3)
bias_initializer = mx.init.Zero()
digitcaps = CapsuleLayer(num_capsule=10,
dim_vector=16,
batch_size=batch_size,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
num_routing=num_routing)(primarycaps)
# out_caps : (batch_size, 10)
out_caps = mx.sym.sqrt(data=mx.sym.sum(mx.sym.square(digitcaps), 2))
out_caps.infer_shape(data=(batch_size, 1, 28, 28))
y = mx.sym.Variable('softmax_label', shape=(batch_size,))
y_onehot = mx.sym.one_hot(y, n_class)
y_reshaped = mx.sym.Reshape(data=y_onehot, shape=(batch_size, -4, n_class, -1))
y_reshaped.infer_shape(softmax_label=(batch_size,))
# inputs_masked : (batch_size, 16)
inputs_masked = mx.sym.linalg_gemm2(y_reshaped, digitcaps, transpose_a=True)
inputs_masked = mx.sym.Reshape(data=inputs_masked, shape=(-3, 0))
x_recon = mx.sym.FullyConnected(data=inputs_masked, num_hidden=512, name='x_recon')
x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act')
x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=1024, name='x_recon2')
x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act2')
x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=np.prod(input_shape), name='x_recon3')
x_recon = mx.sym.Activation(data=x_recon, act_type='sigmoid', name='x_recon_act3')
data_flatten = mx.sym.flatten(data=data)
squared_error = mx.sym.square(x_recon-data_flatten)
recon_error = mx.sym.mean(squared_error)
recon_error_stopped = recon_error
recon_error_stopped = mx.sym.BlockGrad(recon_error_stopped)
loss = mx.symbol.MakeLoss((1-recon_loss_weight)*margin_loss(y_onehot, out_caps)+recon_loss_weight*recon_error)
out_caps_blocked = out_caps
out_caps_blocked = mx.sym.BlockGrad(out_caps_blocked)
return mx.sym.Group([out_caps_blocked, loss, recon_error_stopped])
|
[
"def",
"capsnet",
"(",
"batch_size",
",",
"n_class",
",",
"num_routing",
",",
"recon_loss_weight",
")",
":",
"# data.shape = [batch_size, 1, 28, 28]",
"data",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'data'",
")",
"input_shape",
"=",
"(",
"1",
",",
"28",
",",
"28",
")",
"# Conv2D layer",
"# net.shape = [batch_size, 256, 20, 20]",
"conv1",
"=",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"data",
"=",
"data",
",",
"num_filter",
"=",
"256",
",",
"kernel",
"=",
"(",
"9",
",",
"9",
")",
",",
"layout",
"=",
"'NCHW'",
",",
"name",
"=",
"'conv1'",
")",
"conv1",
"=",
"mx",
".",
"sym",
".",
"Activation",
"(",
"data",
"=",
"conv1",
",",
"act_type",
"=",
"'relu'",
",",
"name",
"=",
"'conv1_act'",
")",
"# net.shape = [batch_size, 256, 6, 6]",
"primarycaps",
"=",
"primary_caps",
"(",
"data",
"=",
"conv1",
",",
"dim_vector",
"=",
"8",
",",
"n_channels",
"=",
"32",
",",
"kernel",
"=",
"(",
"9",
",",
"9",
")",
",",
"strides",
"=",
"[",
"2",
",",
"2",
"]",
",",
"name",
"=",
"'primarycaps'",
")",
"primarycaps",
".",
"infer_shape",
"(",
"data",
"=",
"(",
"batch_size",
",",
"1",
",",
"28",
",",
"28",
")",
")",
"# CapsuleLayer",
"kernel_initializer",
"=",
"mx",
".",
"init",
".",
"Xavier",
"(",
"rnd_type",
"=",
"'uniform'",
",",
"factor_type",
"=",
"'avg'",
",",
"magnitude",
"=",
"3",
")",
"bias_initializer",
"=",
"mx",
".",
"init",
".",
"Zero",
"(",
")",
"digitcaps",
"=",
"CapsuleLayer",
"(",
"num_capsule",
"=",
"10",
",",
"dim_vector",
"=",
"16",
",",
"batch_size",
"=",
"batch_size",
",",
"kernel_initializer",
"=",
"kernel_initializer",
",",
"bias_initializer",
"=",
"bias_initializer",
",",
"num_routing",
"=",
"num_routing",
")",
"(",
"primarycaps",
")",
"# out_caps : (batch_size, 10)",
"out_caps",
"=",
"mx",
".",
"sym",
".",
"sqrt",
"(",
"data",
"=",
"mx",
".",
"sym",
".",
"sum",
"(",
"mx",
".",
"sym",
".",
"square",
"(",
"digitcaps",
")",
",",
"2",
")",
")",
"out_caps",
".",
"infer_shape",
"(",
"data",
"=",
"(",
"batch_size",
",",
"1",
",",
"28",
",",
"28",
")",
")",
"y",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"'softmax_label'",
",",
"shape",
"=",
"(",
"batch_size",
",",
")",
")",
"y_onehot",
"=",
"mx",
".",
"sym",
".",
"one_hot",
"(",
"y",
",",
"n_class",
")",
"y_reshaped",
"=",
"mx",
".",
"sym",
".",
"Reshape",
"(",
"data",
"=",
"y_onehot",
",",
"shape",
"=",
"(",
"batch_size",
",",
"-",
"4",
",",
"n_class",
",",
"-",
"1",
")",
")",
"y_reshaped",
".",
"infer_shape",
"(",
"softmax_label",
"=",
"(",
"batch_size",
",",
")",
")",
"# inputs_masked : (batch_size, 16)",
"inputs_masked",
"=",
"mx",
".",
"sym",
".",
"linalg_gemm2",
"(",
"y_reshaped",
",",
"digitcaps",
",",
"transpose_a",
"=",
"True",
")",
"inputs_masked",
"=",
"mx",
".",
"sym",
".",
"Reshape",
"(",
"data",
"=",
"inputs_masked",
",",
"shape",
"=",
"(",
"-",
"3",
",",
"0",
")",
")",
"x_recon",
"=",
"mx",
".",
"sym",
".",
"FullyConnected",
"(",
"data",
"=",
"inputs_masked",
",",
"num_hidden",
"=",
"512",
",",
"name",
"=",
"'x_recon'",
")",
"x_recon",
"=",
"mx",
".",
"sym",
".",
"Activation",
"(",
"data",
"=",
"x_recon",
",",
"act_type",
"=",
"'relu'",
",",
"name",
"=",
"'x_recon_act'",
")",
"x_recon",
"=",
"mx",
".",
"sym",
".",
"FullyConnected",
"(",
"data",
"=",
"x_recon",
",",
"num_hidden",
"=",
"1024",
",",
"name",
"=",
"'x_recon2'",
")",
"x_recon",
"=",
"mx",
".",
"sym",
".",
"Activation",
"(",
"data",
"=",
"x_recon",
",",
"act_type",
"=",
"'relu'",
",",
"name",
"=",
"'x_recon_act2'",
")",
"x_recon",
"=",
"mx",
".",
"sym",
".",
"FullyConnected",
"(",
"data",
"=",
"x_recon",
",",
"num_hidden",
"=",
"np",
".",
"prod",
"(",
"input_shape",
")",
",",
"name",
"=",
"'x_recon3'",
")",
"x_recon",
"=",
"mx",
".",
"sym",
".",
"Activation",
"(",
"data",
"=",
"x_recon",
",",
"act_type",
"=",
"'sigmoid'",
",",
"name",
"=",
"'x_recon_act3'",
")",
"data_flatten",
"=",
"mx",
".",
"sym",
".",
"flatten",
"(",
"data",
"=",
"data",
")",
"squared_error",
"=",
"mx",
".",
"sym",
".",
"square",
"(",
"x_recon",
"-",
"data_flatten",
")",
"recon_error",
"=",
"mx",
".",
"sym",
".",
"mean",
"(",
"squared_error",
")",
"recon_error_stopped",
"=",
"recon_error",
"recon_error_stopped",
"=",
"mx",
".",
"sym",
".",
"BlockGrad",
"(",
"recon_error_stopped",
")",
"loss",
"=",
"mx",
".",
"symbol",
".",
"MakeLoss",
"(",
"(",
"1",
"-",
"recon_loss_weight",
")",
"*",
"margin_loss",
"(",
"y_onehot",
",",
"out_caps",
")",
"+",
"recon_loss_weight",
"*",
"recon_error",
")",
"out_caps_blocked",
"=",
"out_caps",
"out_caps_blocked",
"=",
"mx",
".",
"sym",
".",
"BlockGrad",
"(",
"out_caps_blocked",
")",
"return",
"mx",
".",
"sym",
".",
"Group",
"(",
"[",
"out_caps_blocked",
",",
"loss",
",",
"recon_error_stopped",
"]",
")"
] |
Create CapsNet
|
[
"Create",
"CapsNet"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/capsnet/capsulenet.py#L39-L100
|
train
|
apache/incubator-mxnet
|
example/capsnet/capsulenet.py
|
do_training
|
def do_training(num_epoch, optimizer, kvstore, learning_rate, model_prefix, decay):
"""Perform CapsNet training"""
summary_writer = SummaryWriter(args.tblog_dir)
lr_scheduler = SimpleLRScheduler(learning_rate)
optimizer_params = {'lr_scheduler': lr_scheduler}
module.init_params()
module.init_optimizer(kvstore=kvstore,
optimizer=optimizer,
optimizer_params=optimizer_params)
n_epoch = 0
while True:
if n_epoch >= num_epoch:
break
train_iter.reset()
val_iter.reset()
loss_metric.reset()
for n_batch, data_batch in enumerate(train_iter):
module.forward_backward(data_batch)
module.update()
module.update_metric(loss_metric, data_batch.label)
loss_metric.get_batch_log(n_batch)
train_acc, train_loss, train_recon_err = loss_metric.get_name_value()
loss_metric.reset()
for n_batch, data_batch in enumerate(val_iter):
module.forward(data_batch)
module.update_metric(loss_metric, data_batch.label)
loss_metric.get_batch_log(n_batch)
val_acc, val_loss, val_recon_err = loss_metric.get_name_value()
summary_writer.add_scalar('train_acc', train_acc, n_epoch)
summary_writer.add_scalar('train_loss', train_loss, n_epoch)
summary_writer.add_scalar('train_recon_err', train_recon_err, n_epoch)
summary_writer.add_scalar('val_acc', val_acc, n_epoch)
summary_writer.add_scalar('val_loss', val_loss, n_epoch)
summary_writer.add_scalar('val_recon_err', val_recon_err, n_epoch)
print('Epoch[%d] train acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, train_acc, train_loss,
train_recon_err))
print('Epoch[%d] val acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, val_acc, val_loss, val_recon_err))
print('SAVE CHECKPOINT')
module.save_checkpoint(prefix=model_prefix, epoch=n_epoch)
n_epoch += 1
lr_scheduler.learning_rate = learning_rate * (decay ** n_epoch)
|
python
|
def do_training(num_epoch, optimizer, kvstore, learning_rate, model_prefix, decay):
"""Perform CapsNet training"""
summary_writer = SummaryWriter(args.tblog_dir)
lr_scheduler = SimpleLRScheduler(learning_rate)
optimizer_params = {'lr_scheduler': lr_scheduler}
module.init_params()
module.init_optimizer(kvstore=kvstore,
optimizer=optimizer,
optimizer_params=optimizer_params)
n_epoch = 0
while True:
if n_epoch >= num_epoch:
break
train_iter.reset()
val_iter.reset()
loss_metric.reset()
for n_batch, data_batch in enumerate(train_iter):
module.forward_backward(data_batch)
module.update()
module.update_metric(loss_metric, data_batch.label)
loss_metric.get_batch_log(n_batch)
train_acc, train_loss, train_recon_err = loss_metric.get_name_value()
loss_metric.reset()
for n_batch, data_batch in enumerate(val_iter):
module.forward(data_batch)
module.update_metric(loss_metric, data_batch.label)
loss_metric.get_batch_log(n_batch)
val_acc, val_loss, val_recon_err = loss_metric.get_name_value()
summary_writer.add_scalar('train_acc', train_acc, n_epoch)
summary_writer.add_scalar('train_loss', train_loss, n_epoch)
summary_writer.add_scalar('train_recon_err', train_recon_err, n_epoch)
summary_writer.add_scalar('val_acc', val_acc, n_epoch)
summary_writer.add_scalar('val_loss', val_loss, n_epoch)
summary_writer.add_scalar('val_recon_err', val_recon_err, n_epoch)
print('Epoch[%d] train acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, train_acc, train_loss,
train_recon_err))
print('Epoch[%d] val acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, val_acc, val_loss, val_recon_err))
print('SAVE CHECKPOINT')
module.save_checkpoint(prefix=model_prefix, epoch=n_epoch)
n_epoch += 1
lr_scheduler.learning_rate = learning_rate * (decay ** n_epoch)
|
[
"def",
"do_training",
"(",
"num_epoch",
",",
"optimizer",
",",
"kvstore",
",",
"learning_rate",
",",
"model_prefix",
",",
"decay",
")",
":",
"summary_writer",
"=",
"SummaryWriter",
"(",
"args",
".",
"tblog_dir",
")",
"lr_scheduler",
"=",
"SimpleLRScheduler",
"(",
"learning_rate",
")",
"optimizer_params",
"=",
"{",
"'lr_scheduler'",
":",
"lr_scheduler",
"}",
"module",
".",
"init_params",
"(",
")",
"module",
".",
"init_optimizer",
"(",
"kvstore",
"=",
"kvstore",
",",
"optimizer",
"=",
"optimizer",
",",
"optimizer_params",
"=",
"optimizer_params",
")",
"n_epoch",
"=",
"0",
"while",
"True",
":",
"if",
"n_epoch",
">=",
"num_epoch",
":",
"break",
"train_iter",
".",
"reset",
"(",
")",
"val_iter",
".",
"reset",
"(",
")",
"loss_metric",
".",
"reset",
"(",
")",
"for",
"n_batch",
",",
"data_batch",
"in",
"enumerate",
"(",
"train_iter",
")",
":",
"module",
".",
"forward_backward",
"(",
"data_batch",
")",
"module",
".",
"update",
"(",
")",
"module",
".",
"update_metric",
"(",
"loss_metric",
",",
"data_batch",
".",
"label",
")",
"loss_metric",
".",
"get_batch_log",
"(",
"n_batch",
")",
"train_acc",
",",
"train_loss",
",",
"train_recon_err",
"=",
"loss_metric",
".",
"get_name_value",
"(",
")",
"loss_metric",
".",
"reset",
"(",
")",
"for",
"n_batch",
",",
"data_batch",
"in",
"enumerate",
"(",
"val_iter",
")",
":",
"module",
".",
"forward",
"(",
"data_batch",
")",
"module",
".",
"update_metric",
"(",
"loss_metric",
",",
"data_batch",
".",
"label",
")",
"loss_metric",
".",
"get_batch_log",
"(",
"n_batch",
")",
"val_acc",
",",
"val_loss",
",",
"val_recon_err",
"=",
"loss_metric",
".",
"get_name_value",
"(",
")",
"summary_writer",
".",
"add_scalar",
"(",
"'train_acc'",
",",
"train_acc",
",",
"n_epoch",
")",
"summary_writer",
".",
"add_scalar",
"(",
"'train_loss'",
",",
"train_loss",
",",
"n_epoch",
")",
"summary_writer",
".",
"add_scalar",
"(",
"'train_recon_err'",
",",
"train_recon_err",
",",
"n_epoch",
")",
"summary_writer",
".",
"add_scalar",
"(",
"'val_acc'",
",",
"val_acc",
",",
"n_epoch",
")",
"summary_writer",
".",
"add_scalar",
"(",
"'val_loss'",
",",
"val_loss",
",",
"n_epoch",
")",
"summary_writer",
".",
"add_scalar",
"(",
"'val_recon_err'",
",",
"val_recon_err",
",",
"n_epoch",
")",
"print",
"(",
"'Epoch[%d] train acc: %.4f loss: %.6f recon_err: %.6f'",
"%",
"(",
"n_epoch",
",",
"train_acc",
",",
"train_loss",
",",
"train_recon_err",
")",
")",
"print",
"(",
"'Epoch[%d] val acc: %.4f loss: %.6f recon_err: %.6f'",
"%",
"(",
"n_epoch",
",",
"val_acc",
",",
"val_loss",
",",
"val_recon_err",
")",
")",
"print",
"(",
"'SAVE CHECKPOINT'",
")",
"module",
".",
"save_checkpoint",
"(",
"prefix",
"=",
"model_prefix",
",",
"epoch",
"=",
"n_epoch",
")",
"n_epoch",
"+=",
"1",
"lr_scheduler",
".",
"learning_rate",
"=",
"learning_rate",
"*",
"(",
"decay",
"**",
"n_epoch",
")"
] |
Perform CapsNet training
|
[
"Perform",
"CapsNet",
"training"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/capsnet/capsulenet.py#L195-L238
|
train
|
apache/incubator-mxnet
|
example/capsnet/capsulenet.py
|
_shuffle
|
def _shuffle(data, idx):
"""Shuffle the data."""
shuffle_data = []
for idx_k, idx_v in data:
shuffle_data.append((idx_k, mx.ndarray.array(idx_v.asnumpy()[idx], idx_v.context)))
return shuffle_data
|
python
|
def _shuffle(data, idx):
"""Shuffle the data."""
shuffle_data = []
for idx_k, idx_v in data:
shuffle_data.append((idx_k, mx.ndarray.array(idx_v.asnumpy()[idx], idx_v.context)))
return shuffle_data
|
[
"def",
"_shuffle",
"(",
"data",
",",
"idx",
")",
":",
"shuffle_data",
"=",
"[",
"]",
"for",
"idx_k",
",",
"idx_v",
"in",
"data",
":",
"shuffle_data",
".",
"append",
"(",
"(",
"idx_k",
",",
"mx",
".",
"ndarray",
".",
"array",
"(",
"idx_v",
".",
"asnumpy",
"(",
")",
"[",
"idx",
"]",
",",
"idx_v",
".",
"context",
")",
")",
")",
"return",
"shuffle_data"
] |
Shuffle the data.
|
[
"Shuffle",
"the",
"data",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/capsnet/capsulenet.py#L268-L275
|
train
|
apache/incubator-mxnet
|
example/capsnet/capsulenet.py
|
LossMetric.update
|
def update(self, labels, preds):
"""Update the hyper-parameters and loss of CapsNet"""
batch_sum_metric = 0
batch_num_inst = 0
for label, pred_outcaps in zip(labels[0], preds[0]):
label_np = int(label.asnumpy())
pred_label = int(np.argmax(pred_outcaps.asnumpy()))
batch_sum_metric += int(label_np == pred_label)
batch_num_inst += 1
batch_loss = preds[1].asnumpy()
recon_loss = preds[2].asnumpy()
self.sum_metric += batch_sum_metric
self.num_inst += batch_num_inst
self.loss += batch_loss
self.recon_loss += recon_loss
self.batch_sum_metric = batch_sum_metric
self.batch_num_inst = batch_num_inst
self.batch_loss = batch_loss
self.n_batch += 1
|
python
|
def update(self, labels, preds):
"""Update the hyper-parameters and loss of CapsNet"""
batch_sum_metric = 0
batch_num_inst = 0
for label, pred_outcaps in zip(labels[0], preds[0]):
label_np = int(label.asnumpy())
pred_label = int(np.argmax(pred_outcaps.asnumpy()))
batch_sum_metric += int(label_np == pred_label)
batch_num_inst += 1
batch_loss = preds[1].asnumpy()
recon_loss = preds[2].asnumpy()
self.sum_metric += batch_sum_metric
self.num_inst += batch_num_inst
self.loss += batch_loss
self.recon_loss += recon_loss
self.batch_sum_metric = batch_sum_metric
self.batch_num_inst = batch_num_inst
self.batch_loss = batch_loss
self.n_batch += 1
|
[
"def",
"update",
"(",
"self",
",",
"labels",
",",
"preds",
")",
":",
"batch_sum_metric",
"=",
"0",
"batch_num_inst",
"=",
"0",
"for",
"label",
",",
"pred_outcaps",
"in",
"zip",
"(",
"labels",
"[",
"0",
"]",
",",
"preds",
"[",
"0",
"]",
")",
":",
"label_np",
"=",
"int",
"(",
"label",
".",
"asnumpy",
"(",
")",
")",
"pred_label",
"=",
"int",
"(",
"np",
".",
"argmax",
"(",
"pred_outcaps",
".",
"asnumpy",
"(",
")",
")",
")",
"batch_sum_metric",
"+=",
"int",
"(",
"label_np",
"==",
"pred_label",
")",
"batch_num_inst",
"+=",
"1",
"batch_loss",
"=",
"preds",
"[",
"1",
"]",
".",
"asnumpy",
"(",
")",
"recon_loss",
"=",
"preds",
"[",
"2",
"]",
".",
"asnumpy",
"(",
")",
"self",
".",
"sum_metric",
"+=",
"batch_sum_metric",
"self",
".",
"num_inst",
"+=",
"batch_num_inst",
"self",
".",
"loss",
"+=",
"batch_loss",
"self",
".",
"recon_loss",
"+=",
"recon_loss",
"self",
".",
"batch_sum_metric",
"=",
"batch_sum_metric",
"self",
".",
"batch_num_inst",
"=",
"batch_num_inst",
"self",
".",
"batch_loss",
"=",
"batch_loss",
"self",
".",
"n_batch",
"+=",
"1"
] |
Update the hyper-parameters and loss of CapsNet
|
[
"Update",
"the",
"hyper",
"-",
"parameters",
"and",
"loss",
"of",
"CapsNet"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/capsnet/capsulenet.py#L140-L158
|
train
|
apache/incubator-mxnet
|
example/capsnet/capsulenet.py
|
MNISTCustomIter.reset
|
def reset(self):
"""Reset class MNISTCustomIter(mx.io.NDArrayIter):"""
# shuffle data
if self.is_train:
np.random.shuffle(self.idx)
self.data = _shuffle(self.data, self.idx)
self.label = _shuffle(self.label, self.idx)
if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data:
self.cursor = -self.batch_size + (self.cursor % self.num_data) % self.batch_size
else:
self.cursor = -self.batch_size
|
python
|
def reset(self):
"""Reset class MNISTCustomIter(mx.io.NDArrayIter):"""
# shuffle data
if self.is_train:
np.random.shuffle(self.idx)
self.data = _shuffle(self.data, self.idx)
self.label = _shuffle(self.label, self.idx)
if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data:
self.cursor = -self.batch_size + (self.cursor % self.num_data) % self.batch_size
else:
self.cursor = -self.batch_size
|
[
"def",
"reset",
"(",
"self",
")",
":",
"# shuffle data",
"if",
"self",
".",
"is_train",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"self",
".",
"idx",
")",
"self",
".",
"data",
"=",
"_shuffle",
"(",
"self",
".",
"data",
",",
"self",
".",
"idx",
")",
"self",
".",
"label",
"=",
"_shuffle",
"(",
"self",
".",
"label",
",",
"self",
".",
"idx",
")",
"if",
"self",
".",
"last_batch_handle",
"==",
"'roll_over'",
"and",
"self",
".",
"cursor",
">",
"self",
".",
"num_data",
":",
"self",
".",
"cursor",
"=",
"-",
"self",
".",
"batch_size",
"+",
"(",
"self",
".",
"cursor",
"%",
"self",
".",
"num_data",
")",
"%",
"self",
".",
"batch_size",
"else",
":",
"self",
".",
"cursor",
"=",
"-",
"self",
".",
"batch_size"
] |
Reset class MNISTCustomIter(mx.io.NDArrayIter):
|
[
"Reset",
"class",
"MNISTCustomIter",
"(",
"mx",
".",
"io",
".",
"NDArrayIter",
")",
":"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/capsnet/capsulenet.py#L287-L298
|
train
|
apache/incubator-mxnet
|
example/capsnet/capsulenet.py
|
MNISTCustomIter.next
|
def next(self):
"""Generate next of iterator"""
if self.iter_next():
if self.is_train:
data_raw_list = self.getdata()
data_shifted = []
for data_raw in data_raw_list[0]:
data_shifted.append(random_shift(data_raw.asnumpy(), 0.1, 0.1))
return mx.io.DataBatch(data=[mx.nd.array(data_shifted)], label=self.getlabel(),
pad=self.getpad(), index=None)
else:
return mx.io.DataBatch(data=self.getdata(), label=self.getlabel(), pad=self.getpad(), index=None)
else:
raise StopIteration
|
python
|
def next(self):
"""Generate next of iterator"""
if self.iter_next():
if self.is_train:
data_raw_list = self.getdata()
data_shifted = []
for data_raw in data_raw_list[0]:
data_shifted.append(random_shift(data_raw.asnumpy(), 0.1, 0.1))
return mx.io.DataBatch(data=[mx.nd.array(data_shifted)], label=self.getlabel(),
pad=self.getpad(), index=None)
else:
return mx.io.DataBatch(data=self.getdata(), label=self.getlabel(), pad=self.getpad(), index=None)
else:
raise StopIteration
|
[
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"iter_next",
"(",
")",
":",
"if",
"self",
".",
"is_train",
":",
"data_raw_list",
"=",
"self",
".",
"getdata",
"(",
")",
"data_shifted",
"=",
"[",
"]",
"for",
"data_raw",
"in",
"data_raw_list",
"[",
"0",
"]",
":",
"data_shifted",
".",
"append",
"(",
"random_shift",
"(",
"data_raw",
".",
"asnumpy",
"(",
")",
",",
"0.1",
",",
"0.1",
")",
")",
"return",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"data",
"=",
"[",
"mx",
".",
"nd",
".",
"array",
"(",
"data_shifted",
")",
"]",
",",
"label",
"=",
"self",
".",
"getlabel",
"(",
")",
",",
"pad",
"=",
"self",
".",
"getpad",
"(",
")",
",",
"index",
"=",
"None",
")",
"else",
":",
"return",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"data",
"=",
"self",
".",
"getdata",
"(",
")",
",",
"label",
"=",
"self",
".",
"getlabel",
"(",
")",
",",
"pad",
"=",
"self",
".",
"getpad",
"(",
")",
",",
"index",
"=",
"None",
")",
"else",
":",
"raise",
"StopIteration"
] |
Generate next of iterator
|
[
"Generate",
"next",
"of",
"iterator"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/capsnet/capsulenet.py#L304-L318
|
train
|
apache/incubator-mxnet
|
python/mxnet/attribute.py
|
AttrScope.get
|
def get(self, attr):
"""
Get the attribute dict given the attribute set by the symbol.
Parameters
----------
attr : dict of string to string
The attribute passed in by user during symbol creation.
Returns
-------
attr : dict of string to string
Updated attributes to add other scope related attributes.
"""
if self._attr:
ret = self._attr.copy()
if attr:
ret.update(attr)
return ret
else:
return attr if attr else {}
|
python
|
def get(self, attr):
"""
Get the attribute dict given the attribute set by the symbol.
Parameters
----------
attr : dict of string to string
The attribute passed in by user during symbol creation.
Returns
-------
attr : dict of string to string
Updated attributes to add other scope related attributes.
"""
if self._attr:
ret = self._attr.copy()
if attr:
ret.update(attr)
return ret
else:
return attr if attr else {}
|
[
"def",
"get",
"(",
"self",
",",
"attr",
")",
":",
"if",
"self",
".",
"_attr",
":",
"ret",
"=",
"self",
".",
"_attr",
".",
"copy",
"(",
")",
"if",
"attr",
":",
"ret",
".",
"update",
"(",
"attr",
")",
"return",
"ret",
"else",
":",
"return",
"attr",
"if",
"attr",
"else",
"{",
"}"
] |
Get the attribute dict given the attribute set by the symbol.
Parameters
----------
attr : dict of string to string
The attribute passed in by user during symbol creation.
Returns
-------
attr : dict of string to string
Updated attributes to add other scope related attributes.
|
[
"Get",
"the",
"attribute",
"dict",
"given",
"the",
"attribute",
"set",
"by",
"the",
"symbol",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/attribute.py#L47-L67
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
_create_sparse_kvstore
|
def _create_sparse_kvstore(kvstore):
"""Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
"""
# always update on kvstore
update_on_kvstore = True
if isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
kv = kvs.create(kvstore)
else:
raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. "
"The type must be KVStore or str." % kvstore)
return (kv, update_on_kvstore)
|
python
|
def _create_sparse_kvstore(kvstore):
"""Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
"""
# always update on kvstore
update_on_kvstore = True
if isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
kv = kvs.create(kvstore)
else:
raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. "
"The type must be KVStore or str." % kvstore)
return (kv, update_on_kvstore)
|
[
"def",
"_create_sparse_kvstore",
"(",
"kvstore",
")",
":",
"# always update on kvstore",
"update_on_kvstore",
"=",
"True",
"if",
"isinstance",
"(",
"kvstore",
",",
"kvs",
".",
"KVStore",
")",
":",
"kv",
"=",
"kvstore",
"elif",
"isinstance",
"(",
"kvstore",
",",
"str",
")",
":",
"kv",
"=",
"kvs",
".",
"create",
"(",
"kvstore",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Cannot create '%s' KVStore with row_sparse parameters. \"",
"\"The type must be KVStore or str.\"",
"%",
"kvstore",
")",
"return",
"(",
"kv",
",",
"update_on_kvstore",
")"
] |
Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
|
[
"Create",
"kvstore",
"assuming",
"some",
"parameters",
"storage",
"types",
"are",
"row_sparse",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L58-L80
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
_create_kvstore
|
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device == 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
|
python
|
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device == 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
|
[
"def",
"_create_kvstore",
"(",
"kvstore",
",",
"num_device",
",",
"arg_params",
")",
":",
"update_on_kvstore",
"=",
"bool",
"(",
"int",
"(",
"os",
".",
"getenv",
"(",
"'MXNET_UPDATE_ON_KVSTORE'",
",",
"\"1\"",
")",
")",
")",
"if",
"kvstore",
"is",
"None",
":",
"kv",
"=",
"None",
"elif",
"isinstance",
"(",
"kvstore",
",",
"kvs",
".",
"KVStore",
")",
":",
"kv",
"=",
"kvstore",
"elif",
"isinstance",
"(",
"kvstore",
",",
"str",
")",
":",
"# create kvstore using the string type",
"if",
"num_device",
"==",
"1",
"and",
"'dist'",
"not",
"in",
"kvstore",
":",
"# no need to use kv for single device and single machine",
"kv",
"=",
"None",
"else",
":",
"kv",
"=",
"kvs",
".",
"create",
"(",
"kvstore",
")",
"if",
"kvstore",
"==",
"'local'",
":",
"# automatically select a proper local",
"max_size",
"=",
"max",
"(",
"np",
".",
"prod",
"(",
"param",
".",
"shape",
")",
"for",
"param",
"in",
"arg_params",
".",
"values",
"(",
")",
")",
"if",
"max_size",
">",
"1024",
"*",
"1024",
"*",
"16",
":",
"update_on_kvstore",
"=",
"False",
"else",
":",
"raise",
"TypeError",
"(",
"'kvstore must be KVStore, str or None'",
")",
"if",
"kv",
"is",
"None",
":",
"update_on_kvstore",
"=",
"False",
"return",
"(",
"kv",
",",
"update_on_kvstore",
")"
] |
Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
|
[
"Create",
"kvstore",
"This",
"function",
"select",
"and",
"create",
"a",
"proper",
"kvstore",
"if",
"given",
"the",
"kvstore",
"type",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L82-L119
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
_initialize_kvstore
|
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
|
python
|
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
|
[
"def",
"_initialize_kvstore",
"(",
"kvstore",
",",
"param_arrays",
",",
"arg_params",
",",
"param_names",
",",
"update_on_kvstore",
")",
":",
"for",
"idx",
",",
"param_on_devs",
"in",
"enumerate",
"(",
"param_arrays",
")",
":",
"name",
"=",
"param_names",
"[",
"idx",
"]",
"kvstore",
".",
"init",
"(",
"name",
",",
"arg_params",
"[",
"name",
"]",
")",
"if",
"update_on_kvstore",
":",
"kvstore",
".",
"pull",
"(",
"name",
",",
"param_on_devs",
",",
"priority",
"=",
"-",
"idx",
")"
] |
Initialize kvstore
|
[
"Initialize",
"kvstore"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L121-L128
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
_update_params_on_kvstore_nccl
|
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on NCCL kvstore."""
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
# Use aggregation by default only with NCCL
default_batch = '16'
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
# push gradient, priority is negative index
kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)
# pull back the weights
kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)
start = end
|
python
|
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on NCCL kvstore."""
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
# Use aggregation by default only with NCCL
default_batch = '16'
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
# push gradient, priority is negative index
kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)
# pull back the weights
kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)
start = end
|
[
"def",
"_update_params_on_kvstore_nccl",
"(",
"param_arrays",
",",
"grad_arrays",
",",
"kvstore",
",",
"param_names",
")",
":",
"valid_indices",
"=",
"[",
"index",
"for",
"index",
",",
"grad_list",
"in",
"enumerate",
"(",
"grad_arrays",
")",
"if",
"grad_list",
"[",
"0",
"]",
"is",
"not",
"None",
"]",
"valid_grad_arrays",
"=",
"[",
"grad_arrays",
"[",
"i",
"]",
"for",
"i",
"in",
"valid_indices",
"]",
"valid_param_arrays",
"=",
"[",
"param_arrays",
"[",
"i",
"]",
"for",
"i",
"in",
"valid_indices",
"]",
"valid_param_names",
"=",
"[",
"param_names",
"[",
"i",
"]",
"for",
"i",
"in",
"valid_indices",
"]",
"size",
"=",
"len",
"(",
"valid_grad_arrays",
")",
"start",
"=",
"0",
"# Use aggregation by default only with NCCL",
"default_batch",
"=",
"'16'",
"batch",
"=",
"int",
"(",
"os",
".",
"getenv",
"(",
"'MXNET_UPDATE_AGGREGATION_SIZE'",
",",
"default_batch",
")",
")",
"while",
"start",
"<",
"size",
":",
"end",
"=",
"start",
"+",
"batch",
"if",
"start",
"+",
"batch",
"<",
"size",
"else",
"size",
"# push gradient, priority is negative index",
"kvstore",
".",
"push",
"(",
"valid_param_names",
"[",
"start",
":",
"end",
"]",
",",
"valid_grad_arrays",
"[",
"start",
":",
"end",
"]",
",",
"priority",
"=",
"-",
"start",
")",
"# pull back the weights",
"kvstore",
".",
"pull",
"(",
"valid_param_names",
"[",
"start",
":",
"end",
"]",
",",
"valid_param_arrays",
"[",
"start",
":",
"end",
"]",
",",
"priority",
"=",
"-",
"start",
")",
"start",
"=",
"end"
] |
Perform update of param_arrays from grad_arrays on NCCL kvstore.
|
[
"Perform",
"update",
"of",
"param_arrays",
"from",
"grad_arrays",
"on",
"NCCL",
"kvstore",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L130-L148
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
_update_params_on_kvstore
|
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
|
python
|
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
|
[
"def",
"_update_params_on_kvstore",
"(",
"param_arrays",
",",
"grad_arrays",
",",
"kvstore",
",",
"param_names",
")",
":",
"for",
"index",
",",
"pair",
"in",
"enumerate",
"(",
"zip",
"(",
"param_arrays",
",",
"grad_arrays",
")",
")",
":",
"arg_list",
",",
"grad_list",
"=",
"pair",
"if",
"grad_list",
"[",
"0",
"]",
"is",
"None",
":",
"continue",
"name",
"=",
"param_names",
"[",
"index",
"]",
"# push gradient, priority is negative index",
"kvstore",
".",
"push",
"(",
"name",
",",
"grad_list",
",",
"priority",
"=",
"-",
"index",
")",
"# pull back the weights",
"kvstore",
".",
"pull",
"(",
"name",
",",
"arg_list",
",",
"priority",
"=",
"-",
"index",
")"
] |
Perform update of param_arrays from grad_arrays on kvstore.
|
[
"Perform",
"update",
"of",
"param_arrays",
"from",
"grad_arrays",
"on",
"kvstore",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L150-L160
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
_update_params
|
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
updates = [[] for _ in range(num_device)]
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updates[k].append((index*num_device+k, g, w))
for dev_updates in updates:
# update params if param_arrays and grad_arrays are not empty
if dev_updates:
i, w, g = zip(*dev_updates)
updater(i, w, g)
|
python
|
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
updates = [[] for _ in range(num_device)]
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updates[k].append((index*num_device+k, g, w))
for dev_updates in updates:
# update params if param_arrays and grad_arrays are not empty
if dev_updates:
i, w, g = zip(*dev_updates)
updater(i, w, g)
|
[
"def",
"_update_params",
"(",
"param_arrays",
",",
"grad_arrays",
",",
"updater",
",",
"num_device",
",",
"kvstore",
"=",
"None",
",",
"param_names",
"=",
"None",
")",
":",
"updates",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"num_device",
")",
"]",
"for",
"i",
",",
"pair",
"in",
"enumerate",
"(",
"zip",
"(",
"param_arrays",
",",
"grad_arrays",
")",
")",
":",
"arg_list",
",",
"grad_list",
"=",
"pair",
"if",
"grad_list",
"[",
"0",
"]",
"is",
"None",
":",
"continue",
"index",
"=",
"i",
"if",
"kvstore",
":",
"name",
"=",
"param_names",
"[",
"index",
"]",
"# push gradient, priority is negative index",
"kvstore",
".",
"push",
"(",
"name",
",",
"grad_list",
",",
"priority",
"=",
"-",
"index",
")",
"# pull back the sum gradients, to the same locations.",
"kvstore",
".",
"pull",
"(",
"name",
",",
"grad_list",
",",
"priority",
"=",
"-",
"index",
")",
"for",
"k",
",",
"p",
"in",
"enumerate",
"(",
"zip",
"(",
"arg_list",
",",
"grad_list",
")",
")",
":",
"# faked an index here, to make optimizer create diff",
"# state for the same index but on diff devs, TODO(mli)",
"# use a better solution later",
"w",
",",
"g",
"=",
"p",
"updates",
"[",
"k",
"]",
".",
"append",
"(",
"(",
"index",
"*",
"num_device",
"+",
"k",
",",
"g",
",",
"w",
")",
")",
"for",
"dev_updates",
"in",
"updates",
":",
"# update params if param_arrays and grad_arrays are not empty",
"if",
"dev_updates",
":",
"i",
",",
"w",
",",
"g",
"=",
"zip",
"(",
"*",
"dev_updates",
")",
"updater",
"(",
"i",
",",
"w",
",",
"g",
")"
] |
Perform update of param_arrays from grad_arrays not on kvstore.
|
[
"Perform",
"update",
"of",
"param_arrays",
"from",
"grad_arrays",
"not",
"on",
"kvstore",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L162-L187
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
_multiple_callbacks
|
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
|
python
|
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
|
[
"def",
"_multiple_callbacks",
"(",
"callbacks",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"callbacks",
",",
"list",
")",
":",
"for",
"cb",
"in",
"callbacks",
":",
"cb",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"if",
"callbacks",
":",
"callbacks",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
|
[
"Sends",
"args",
"and",
"kwargs",
"to",
"any",
"configured",
"callbacks",
".",
"This",
"handles",
"the",
"cases",
"where",
"the",
"callbacks",
"variable",
"is",
"None",
"a",
"single",
"function",
"or",
"a",
"list",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L190-L200
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
_train_multi_device
|
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
else:
kvstore.set_optimizer(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
if 'nccl' in kvstore.type:
_update_params_on_kvstore_nccl(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
|
python
|
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
else:
kvstore.set_optimizer(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
if 'nccl' in kvstore.type:
_update_params_on_kvstore_nccl(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
|
[
"def",
"_train_multi_device",
"(",
"symbol",
",",
"ctx",
",",
"arg_names",
",",
"param_names",
",",
"aux_names",
",",
"arg_params",
",",
"aux_params",
",",
"begin_epoch",
",",
"end_epoch",
",",
"epoch_size",
",",
"optimizer",
",",
"kvstore",
",",
"update_on_kvstore",
",",
"train_data",
",",
"eval_data",
"=",
"None",
",",
"eval_metric",
"=",
"None",
",",
"epoch_end_callback",
"=",
"None",
",",
"batch_end_callback",
"=",
"None",
",",
"logger",
"=",
"None",
",",
"work_load_list",
"=",
"None",
",",
"monitor",
"=",
"None",
",",
"eval_end_callback",
"=",
"None",
",",
"eval_batch_end_callback",
"=",
"None",
",",
"sym_gen",
"=",
"None",
")",
":",
"if",
"logger",
"is",
"None",
":",
"logger",
"=",
"logging",
"executor_manager",
"=",
"DataParallelExecutorManager",
"(",
"symbol",
"=",
"symbol",
",",
"sym_gen",
"=",
"sym_gen",
",",
"ctx",
"=",
"ctx",
",",
"train_data",
"=",
"train_data",
",",
"param_names",
"=",
"param_names",
",",
"arg_names",
"=",
"arg_names",
",",
"aux_names",
"=",
"aux_names",
",",
"work_load_list",
"=",
"work_load_list",
",",
"logger",
"=",
"logger",
")",
"if",
"monitor",
":",
"executor_manager",
".",
"install_monitor",
"(",
"monitor",
")",
"executor_manager",
".",
"set_params",
"(",
"arg_params",
",",
"aux_params",
")",
"if",
"not",
"update_on_kvstore",
":",
"updater",
"=",
"get_updater",
"(",
"optimizer",
")",
"else",
":",
"kvstore",
".",
"set_optimizer",
"(",
"optimizer",
")",
"if",
"kvstore",
":",
"_initialize_kvstore",
"(",
"kvstore",
"=",
"kvstore",
",",
"param_arrays",
"=",
"executor_manager",
".",
"param_arrays",
",",
"arg_params",
"=",
"arg_params",
",",
"param_names",
"=",
"executor_manager",
".",
"param_names",
",",
"update_on_kvstore",
"=",
"update_on_kvstore",
")",
"# Now start training",
"train_data",
".",
"reset",
"(",
")",
"for",
"epoch",
"in",
"range",
"(",
"begin_epoch",
",",
"end_epoch",
")",
":",
"# Training phase",
"tic",
"=",
"time",
".",
"time",
"(",
")",
"eval_metric",
".",
"reset",
"(",
")",
"nbatch",
"=",
"0",
"# Iterate over training data.",
"while",
"True",
":",
"do_reset",
"=",
"True",
"for",
"data_batch",
"in",
"train_data",
":",
"executor_manager",
".",
"load_data_batch",
"(",
"data_batch",
")",
"if",
"monitor",
"is",
"not",
"None",
":",
"monitor",
".",
"tic",
"(",
")",
"executor_manager",
".",
"forward",
"(",
"is_train",
"=",
"True",
")",
"executor_manager",
".",
"backward",
"(",
")",
"if",
"update_on_kvstore",
":",
"if",
"'nccl'",
"in",
"kvstore",
".",
"type",
":",
"_update_params_on_kvstore_nccl",
"(",
"executor_manager",
".",
"param_arrays",
",",
"executor_manager",
".",
"grad_arrays",
",",
"kvstore",
",",
"executor_manager",
".",
"param_names",
")",
"else",
":",
"_update_params_on_kvstore",
"(",
"executor_manager",
".",
"param_arrays",
",",
"executor_manager",
".",
"grad_arrays",
",",
"kvstore",
",",
"executor_manager",
".",
"param_names",
")",
"else",
":",
"_update_params",
"(",
"executor_manager",
".",
"param_arrays",
",",
"executor_manager",
".",
"grad_arrays",
",",
"updater",
"=",
"updater",
",",
"num_device",
"=",
"len",
"(",
"ctx",
")",
",",
"kvstore",
"=",
"kvstore",
",",
"param_names",
"=",
"executor_manager",
".",
"param_names",
")",
"if",
"monitor",
"is",
"not",
"None",
":",
"monitor",
".",
"toc_print",
"(",
")",
"# evaluate at end, so we can lazy copy",
"executor_manager",
".",
"update_metric",
"(",
"eval_metric",
",",
"data_batch",
".",
"label",
")",
"nbatch",
"+=",
"1",
"# batch callback (for print purpose)",
"if",
"batch_end_callback",
"is",
"not",
"None",
":",
"batch_end_params",
"=",
"BatchEndParam",
"(",
"epoch",
"=",
"epoch",
",",
"nbatch",
"=",
"nbatch",
",",
"eval_metric",
"=",
"eval_metric",
",",
"locals",
"=",
"locals",
"(",
")",
")",
"_multiple_callbacks",
"(",
"batch_end_callback",
",",
"batch_end_params",
")",
"# this epoch is done possibly earlier",
"if",
"epoch_size",
"is",
"not",
"None",
"and",
"nbatch",
">=",
"epoch_size",
":",
"do_reset",
"=",
"False",
"break",
"if",
"do_reset",
":",
"logger",
".",
"info",
"(",
"'Epoch[%d] Resetting Data Iterator'",
",",
"epoch",
")",
"train_data",
".",
"reset",
"(",
")",
"# this epoch is done",
"if",
"epoch_size",
"is",
"None",
"or",
"nbatch",
">=",
"epoch_size",
":",
"break",
"toc",
"=",
"time",
".",
"time",
"(",
")",
"logger",
".",
"info",
"(",
"'Epoch[%d] Time cost=%.3f'",
",",
"epoch",
",",
"(",
"toc",
"-",
"tic",
")",
")",
"if",
"epoch_end_callback",
"or",
"epoch",
"+",
"1",
"==",
"end_epoch",
":",
"executor_manager",
".",
"copy_to",
"(",
"arg_params",
",",
"aux_params",
")",
"_multiple_callbacks",
"(",
"epoch_end_callback",
",",
"epoch",
",",
"symbol",
",",
"arg_params",
",",
"aux_params",
")",
"# evaluation",
"if",
"eval_data",
":",
"eval_metric",
".",
"reset",
"(",
")",
"eval_data",
".",
"reset",
"(",
")",
"total_num_batch",
"=",
"0",
"for",
"i",
",",
"eval_batch",
"in",
"enumerate",
"(",
"eval_data",
")",
":",
"executor_manager",
".",
"load_data_batch",
"(",
"eval_batch",
")",
"executor_manager",
".",
"forward",
"(",
"is_train",
"=",
"False",
")",
"executor_manager",
".",
"update_metric",
"(",
"eval_metric",
",",
"eval_batch",
".",
"label",
")",
"if",
"eval_batch_end_callback",
"is",
"not",
"None",
":",
"batch_end_params",
"=",
"BatchEndParam",
"(",
"epoch",
"=",
"epoch",
",",
"nbatch",
"=",
"i",
",",
"eval_metric",
"=",
"eval_metric",
",",
"locals",
"=",
"locals",
"(",
")",
")",
"_multiple_callbacks",
"(",
"eval_batch_end_callback",
",",
"batch_end_params",
")",
"total_num_batch",
"+=",
"1",
"if",
"eval_end_callback",
"is",
"not",
"None",
":",
"eval_end_params",
"=",
"BatchEndParam",
"(",
"epoch",
"=",
"epoch",
",",
"nbatch",
"=",
"total_num_batch",
",",
"eval_metric",
"=",
"eval_metric",
",",
"locals",
"=",
"locals",
"(",
")",
")",
"_multiple_callbacks",
"(",
"eval_end_callback",
",",
"eval_end_params",
")",
"eval_data",
".",
"reset",
"(",
")"
] |
Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
|
[
"Internal",
"training",
"function",
"on",
"multiple",
"devices",
".",
"This",
"function",
"will",
"also",
"work",
"for",
"single",
"device",
"as",
"well",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L203-L390
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
save_checkpoint
|
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
|
python
|
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
|
[
"def",
"save_checkpoint",
"(",
"prefix",
",",
"epoch",
",",
"symbol",
",",
"arg_params",
",",
"aux_params",
")",
":",
"if",
"symbol",
"is",
"not",
"None",
":",
"symbol",
".",
"save",
"(",
"'%s-symbol.json'",
"%",
"prefix",
")",
"save_dict",
"=",
"{",
"(",
"'arg:%s'",
"%",
"k",
")",
":",
"v",
".",
"as_in_context",
"(",
"cpu",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"arg_params",
".",
"items",
"(",
")",
"}",
"save_dict",
".",
"update",
"(",
"{",
"(",
"'aux:%s'",
"%",
"k",
")",
":",
"v",
".",
"as_in_context",
"(",
"cpu",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"aux_params",
".",
"items",
"(",
")",
"}",
")",
"param_name",
"=",
"'%s-%04d.params'",
"%",
"(",
"prefix",
",",
"epoch",
")",
"nd",
".",
"save",
"(",
"param_name",
",",
"save_dict",
")",
"logging",
".",
"info",
"(",
"'Saved checkpoint to \\\"%s\\\"'",
",",
"param_name",
")"
] |
Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
|
[
"Checkpoint",
"the",
"model",
"data",
"into",
"file",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L394-L421
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
load_checkpoint
|
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
|
python
|
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
|
[
"def",
"load_checkpoint",
"(",
"prefix",
",",
"epoch",
")",
":",
"symbol",
"=",
"sym",
".",
"load",
"(",
"'%s-symbol.json'",
"%",
"prefix",
")",
"save_dict",
"=",
"nd",
".",
"load",
"(",
"'%s-%04d.params'",
"%",
"(",
"prefix",
",",
"epoch",
")",
")",
"arg_params",
"=",
"{",
"}",
"aux_params",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"save_dict",
".",
"items",
"(",
")",
":",
"tp",
",",
"name",
"=",
"k",
".",
"split",
"(",
"':'",
",",
"1",
")",
"if",
"tp",
"==",
"'arg'",
":",
"arg_params",
"[",
"name",
"]",
"=",
"v",
"if",
"tp",
"==",
"'aux'",
":",
"aux_params",
"[",
"name",
"]",
"=",
"v",
"return",
"(",
"symbol",
",",
"arg_params",
",",
"aux_params",
")"
] |
Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
|
[
"Load",
"model",
"checkpoint",
"from",
"file",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L424-L458
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward._check_arguments
|
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
|
python
|
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
|
[
"def",
"_check_arguments",
"(",
"self",
")",
":",
"if",
"self",
".",
"argument_checked",
":",
"return",
"assert",
"(",
"self",
".",
"symbol",
"is",
"not",
"None",
")",
"self",
".",
"argument_checked",
"=",
"True",
"# check if symbol contain duplicated names.",
"_check_arguments",
"(",
"self",
".",
"symbol",
")",
"# rematch parameters to delete useless ones",
"if",
"self",
".",
"allow_extra_params",
":",
"if",
"self",
".",
"arg_params",
":",
"arg_names",
"=",
"set",
"(",
"self",
".",
"symbol",
".",
"list_arguments",
"(",
")",
")",
"self",
".",
"arg_params",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"arg_params",
".",
"items",
"(",
")",
"if",
"k",
"in",
"arg_names",
"}",
"if",
"self",
".",
"aux_params",
":",
"aux_names",
"=",
"set",
"(",
"self",
".",
"symbol",
".",
"list_auxiliary_states",
"(",
")",
")",
"self",
".",
"aux_params",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"aux_params",
".",
"items",
"(",
")",
"if",
"k",
"in",
"aux_names",
"}"
] |
verify the argument of the default symbol and user provided parameters
|
[
"verify",
"the",
"argument",
"of",
"the",
"default",
"symbol",
"and",
"user",
"provided",
"parameters"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L546-L565
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward._init_params
|
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
|
python
|
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
|
[
"def",
"_init_params",
"(",
"self",
",",
"inputs",
",",
"overwrite",
"=",
"False",
")",
":",
"inputs",
"=",
"[",
"x",
"if",
"isinstance",
"(",
"x",
",",
"DataDesc",
")",
"else",
"DataDesc",
"(",
"*",
"x",
")",
"for",
"x",
"in",
"inputs",
"]",
"input_shapes",
"=",
"{",
"item",
".",
"name",
":",
"item",
".",
"shape",
"for",
"item",
"in",
"inputs",
"}",
"arg_shapes",
",",
"_",
",",
"aux_shapes",
"=",
"self",
".",
"symbol",
".",
"infer_shape",
"(",
"*",
"*",
"input_shapes",
")",
"assert",
"arg_shapes",
"is",
"not",
"None",
"input_dtypes",
"=",
"{",
"item",
".",
"name",
":",
"item",
".",
"dtype",
"for",
"item",
"in",
"inputs",
"}",
"arg_dtypes",
",",
"_",
",",
"aux_dtypes",
"=",
"self",
".",
"symbol",
".",
"infer_type",
"(",
"*",
"*",
"input_dtypes",
")",
"assert",
"arg_dtypes",
"is",
"not",
"None",
"arg_names",
"=",
"self",
".",
"symbol",
".",
"list_arguments",
"(",
")",
"input_names",
"=",
"input_shapes",
".",
"keys",
"(",
")",
"param_names",
"=",
"[",
"key",
"for",
"key",
"in",
"arg_names",
"if",
"key",
"not",
"in",
"input_names",
"]",
"aux_names",
"=",
"self",
".",
"symbol",
".",
"list_auxiliary_states",
"(",
")",
"param_name_attrs",
"=",
"[",
"x",
"for",
"x",
"in",
"zip",
"(",
"arg_names",
",",
"arg_shapes",
",",
"arg_dtypes",
")",
"if",
"x",
"[",
"0",
"]",
"in",
"param_names",
"]",
"arg_params",
"=",
"{",
"k",
":",
"nd",
".",
"zeros",
"(",
"shape",
"=",
"s",
",",
"dtype",
"=",
"t",
")",
"for",
"k",
",",
"s",
",",
"t",
"in",
"param_name_attrs",
"}",
"aux_name_attrs",
"=",
"[",
"x",
"for",
"x",
"in",
"zip",
"(",
"aux_names",
",",
"aux_shapes",
",",
"aux_dtypes",
")",
"if",
"x",
"[",
"0",
"]",
"in",
"aux_names",
"]",
"aux_params",
"=",
"{",
"k",
":",
"nd",
".",
"zeros",
"(",
"shape",
"=",
"s",
",",
"dtype",
"=",
"t",
")",
"for",
"k",
",",
"s",
",",
"t",
"in",
"aux_name_attrs",
"}",
"for",
"k",
",",
"v",
"in",
"arg_params",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"arg_params",
"and",
"k",
"in",
"self",
".",
"arg_params",
"and",
"(",
"not",
"overwrite",
")",
":",
"arg_params",
"[",
"k",
"]",
"[",
":",
"]",
"=",
"self",
".",
"arg_params",
"[",
"k",
"]",
"[",
":",
"]",
"else",
":",
"self",
".",
"initializer",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"aux_params",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"aux_params",
"and",
"k",
"in",
"self",
".",
"aux_params",
"and",
"(",
"not",
"overwrite",
")",
":",
"aux_params",
"[",
"k",
"]",
"[",
":",
"]",
"=",
"self",
".",
"aux_params",
"[",
"k",
"]",
"[",
":",
"]",
"else",
":",
"self",
".",
"initializer",
"(",
"k",
",",
"v",
")",
"self",
".",
"arg_params",
"=",
"arg_params",
"self",
".",
"aux_params",
"=",
"aux_params",
"return",
"(",
"arg_names",
",",
"list",
"(",
"param_names",
")",
",",
"aux_names",
")"
] |
Initialize weight parameters and auxiliary states.
|
[
"Initialize",
"weight",
"parameters",
"and",
"auxiliary",
"states",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L573-L611
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward._init_predictor
|
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
shapes = {name: self.arg_params[name].shape for name in self.arg_params}
shapes.update(dict(input_shapes))
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**shapes)
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **shapes)
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
|
python
|
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
shapes = {name: self.arg_params[name].shape for name in self.arg_params}
shapes.update(dict(input_shapes))
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**shapes)
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **shapes)
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
|
[
"def",
"_init_predictor",
"(",
"self",
",",
"input_shapes",
",",
"type_dict",
"=",
"None",
")",
":",
"shapes",
"=",
"{",
"name",
":",
"self",
".",
"arg_params",
"[",
"name",
"]",
".",
"shape",
"for",
"name",
"in",
"self",
".",
"arg_params",
"}",
"shapes",
".",
"update",
"(",
"dict",
"(",
"input_shapes",
")",
")",
"if",
"self",
".",
"_pred_exec",
"is",
"not",
"None",
":",
"arg_shapes",
",",
"_",
",",
"_",
"=",
"self",
".",
"symbol",
".",
"infer_shape",
"(",
"*",
"*",
"shapes",
")",
"assert",
"arg_shapes",
"is",
"not",
"None",
",",
"\"Incomplete input shapes\"",
"pred_shapes",
"=",
"[",
"x",
".",
"shape",
"for",
"x",
"in",
"self",
".",
"_pred_exec",
".",
"arg_arrays",
"]",
"if",
"arg_shapes",
"==",
"pred_shapes",
":",
"return",
"# for now only use the first device",
"pred_exec",
"=",
"self",
".",
"symbol",
".",
"simple_bind",
"(",
"self",
".",
"ctx",
"[",
"0",
"]",
",",
"grad_req",
"=",
"'null'",
",",
"type_dict",
"=",
"type_dict",
",",
"*",
"*",
"shapes",
")",
"pred_exec",
".",
"copy_params_from",
"(",
"self",
".",
"arg_params",
",",
"self",
".",
"aux_params",
")",
"_check_arguments",
"(",
"self",
".",
"symbol",
")",
"self",
".",
"_pred_exec",
"=",
"pred_exec"
] |
Initialize the predictor module for running prediction.
|
[
"Initialize",
"the",
"predictor",
"module",
"for",
"running",
"prediction",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L621-L637
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward._init_iter
|
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
|
python
|
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
|
[
"def",
"_init_iter",
"(",
"self",
",",
"X",
",",
"y",
",",
"is_train",
")",
":",
"if",
"isinstance",
"(",
"X",
",",
"(",
"np",
".",
"ndarray",
",",
"nd",
".",
"NDArray",
")",
")",
":",
"if",
"y",
"is",
"None",
":",
"if",
"is_train",
":",
"raise",
"ValueError",
"(",
"'y must be specified when X is numpy.ndarray'",
")",
"else",
":",
"y",
"=",
"np",
".",
"zeros",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"if",
"not",
"isinstance",
"(",
"y",
",",
"(",
"np",
".",
"ndarray",
",",
"nd",
".",
"NDArray",
")",
")",
":",
"raise",
"TypeError",
"(",
"'y must be ndarray when X is numpy.ndarray'",
")",
"if",
"X",
".",
"shape",
"[",
"0",
"]",
"!=",
"y",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"The numbers of data points and labels not equal\"",
")",
"if",
"y",
".",
"ndim",
"==",
"2",
"and",
"y",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"y",
"=",
"y",
".",
"flatten",
"(",
")",
"if",
"y",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Label must be 1D or 2D (with 2nd dimension being 1)\"",
")",
"if",
"is_train",
":",
"return",
"io",
".",
"NDArrayIter",
"(",
"X",
",",
"y",
",",
"min",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"numpy_batch_size",
")",
",",
"shuffle",
"=",
"is_train",
",",
"last_batch_handle",
"=",
"'roll_over'",
")",
"else",
":",
"return",
"io",
".",
"NDArrayIter",
"(",
"X",
",",
"y",
",",
"min",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"numpy_batch_size",
")",
",",
"shuffle",
"=",
"False",
")",
"if",
"not",
"isinstance",
"(",
"X",
",",
"io",
".",
"DataIter",
")",
":",
"raise",
"TypeError",
"(",
"'X must be DataIter, NDArray or numpy.ndarray'",
")",
"return",
"X"
] |
Initialize the iterator given input.
|
[
"Initialize",
"the",
"iterator",
"given",
"input",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L639-L662
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward._init_eval_iter
|
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
|
python
|
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
|
[
"def",
"_init_eval_iter",
"(",
"self",
",",
"eval_data",
")",
":",
"if",
"eval_data",
"is",
"None",
":",
"return",
"eval_data",
"if",
"isinstance",
"(",
"eval_data",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"len",
"(",
"eval_data",
")",
"==",
"2",
":",
"if",
"eval_data",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"if",
"eval_data",
"[",
"1",
"]",
"is",
"None",
"and",
"isinstance",
"(",
"eval_data",
"[",
"0",
"]",
",",
"io",
".",
"DataIter",
")",
":",
"return",
"eval_data",
"[",
"0",
"]",
"input_data",
"=",
"(",
"np",
".",
"array",
"(",
"eval_data",
"[",
"0",
"]",
")",
"if",
"isinstance",
"(",
"eval_data",
"[",
"0",
"]",
",",
"list",
")",
"else",
"eval_data",
"[",
"0",
"]",
")",
"input_label",
"=",
"(",
"np",
".",
"array",
"(",
"eval_data",
"[",
"1",
"]",
")",
"if",
"isinstance",
"(",
"eval_data",
"[",
"1",
"]",
",",
"list",
")",
"else",
"eval_data",
"[",
"1",
"]",
")",
"return",
"self",
".",
"_init_iter",
"(",
"input_data",
",",
"input_label",
",",
"is_train",
"=",
"True",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Eval data is NONE\"",
")",
"if",
"not",
"isinstance",
"(",
"eval_data",
",",
"io",
".",
"DataIter",
")",
":",
"raise",
"TypeError",
"(",
"'Eval data must be DataIter, or '",
"'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)'",
")",
"return",
"eval_data"
] |
Initialize the iterator given eval_data.
|
[
"Initialize",
"the",
"iterator",
"given",
"eval_data",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L664-L682
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward.predict
|
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
|
python
|
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
|
[
"def",
"predict",
"(",
"self",
",",
"X",
",",
"num_batch",
"=",
"None",
",",
"return_data",
"=",
"False",
",",
"reset",
"=",
"True",
")",
":",
"X",
"=",
"self",
".",
"_init_iter",
"(",
"X",
",",
"None",
",",
"is_train",
"=",
"False",
")",
"if",
"reset",
":",
"X",
".",
"reset",
"(",
")",
"data_shapes",
"=",
"X",
".",
"provide_data",
"data_names",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"data_shapes",
"]",
"type_dict",
"=",
"dict",
"(",
"(",
"key",
",",
"value",
".",
"dtype",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"self",
".",
"arg_params",
".",
"items",
"(",
")",
")",
"for",
"x",
"in",
"X",
".",
"provide_data",
":",
"if",
"isinstance",
"(",
"x",
",",
"DataDesc",
")",
":",
"type_dict",
"[",
"x",
".",
"name",
"]",
"=",
"x",
".",
"dtype",
"else",
":",
"type_dict",
"[",
"x",
"[",
"0",
"]",
"]",
"=",
"mx_real_t",
"self",
".",
"_init_predictor",
"(",
"data_shapes",
",",
"type_dict",
")",
"batch_size",
"=",
"X",
".",
"batch_size",
"data_arrays",
"=",
"[",
"self",
".",
"_pred_exec",
".",
"arg_dict",
"[",
"name",
"]",
"for",
"name",
"in",
"data_names",
"]",
"output_list",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_pred_exec",
".",
"outputs",
")",
")",
"]",
"if",
"return_data",
":",
"data_list",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"X",
".",
"provide_data",
"]",
"label_list",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"X",
".",
"provide_label",
"]",
"i",
"=",
"0",
"for",
"batch",
"in",
"X",
":",
"_load_data",
"(",
"batch",
",",
"data_arrays",
")",
"self",
".",
"_pred_exec",
".",
"forward",
"(",
"is_train",
"=",
"False",
")",
"padded",
"=",
"batch",
".",
"pad",
"real_size",
"=",
"batch_size",
"-",
"padded",
"for",
"o_list",
",",
"o_nd",
"in",
"zip",
"(",
"output_list",
",",
"self",
".",
"_pred_exec",
".",
"outputs",
")",
":",
"o_list",
".",
"append",
"(",
"o_nd",
"[",
"0",
":",
"real_size",
"]",
".",
"asnumpy",
"(",
")",
")",
"if",
"return_data",
":",
"for",
"j",
",",
"x",
"in",
"enumerate",
"(",
"batch",
".",
"data",
")",
":",
"data_list",
"[",
"j",
"]",
".",
"append",
"(",
"x",
"[",
"0",
":",
"real_size",
"]",
".",
"asnumpy",
"(",
")",
")",
"for",
"j",
",",
"x",
"in",
"enumerate",
"(",
"batch",
".",
"label",
")",
":",
"label_list",
"[",
"j",
"]",
".",
"append",
"(",
"x",
"[",
"0",
":",
"real_size",
"]",
".",
"asnumpy",
"(",
")",
")",
"i",
"+=",
"1",
"if",
"num_batch",
"is",
"not",
"None",
"and",
"i",
"==",
"num_batch",
":",
"break",
"outputs",
"=",
"[",
"np",
".",
"concatenate",
"(",
"x",
")",
"for",
"x",
"in",
"output_list",
"]",
"if",
"len",
"(",
"outputs",
")",
"==",
"1",
":",
"outputs",
"=",
"outputs",
"[",
"0",
"]",
"if",
"return_data",
":",
"data",
"=",
"[",
"np",
".",
"concatenate",
"(",
"x",
")",
"for",
"x",
"in",
"data_list",
"]",
"label",
"=",
"[",
"np",
".",
"concatenate",
"(",
"x",
")",
"for",
"x",
"in",
"label_list",
"]",
"if",
"len",
"(",
"data",
")",
"==",
"1",
":",
"data",
"=",
"data",
"[",
"0",
"]",
"if",
"len",
"(",
"label",
")",
"==",
"1",
":",
"label",
"=",
"label",
"[",
"0",
"]",
"return",
"outputs",
",",
"data",
",",
"label",
"else",
":",
"return",
"outputs"
] |
Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
|
[
"Run",
"the",
"prediction",
"always",
"only",
"use",
"one",
"device",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L684-L751
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward.score
|
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
|
python
|
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
|
[
"def",
"score",
"(",
"self",
",",
"X",
",",
"eval_metric",
"=",
"'acc'",
",",
"num_batch",
"=",
"None",
",",
"batch_end_callback",
"=",
"None",
",",
"reset",
"=",
"True",
")",
":",
"# setup metric",
"if",
"not",
"isinstance",
"(",
"eval_metric",
",",
"metric",
".",
"EvalMetric",
")",
":",
"eval_metric",
"=",
"metric",
".",
"create",
"(",
"eval_metric",
")",
"X",
"=",
"self",
".",
"_init_iter",
"(",
"X",
",",
"None",
",",
"is_train",
"=",
"False",
")",
"if",
"reset",
":",
"X",
".",
"reset",
"(",
")",
"data_shapes",
"=",
"X",
".",
"provide_data",
"data_names",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"data_shapes",
"]",
"type_dict",
"=",
"dict",
"(",
"(",
"key",
",",
"value",
".",
"dtype",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"self",
".",
"arg_params",
".",
"items",
"(",
")",
")",
"for",
"x",
"in",
"X",
".",
"provide_data",
":",
"if",
"isinstance",
"(",
"x",
",",
"DataDesc",
")",
":",
"type_dict",
"[",
"x",
".",
"name",
"]",
"=",
"x",
".",
"dtype",
"else",
":",
"type_dict",
"[",
"x",
"[",
"0",
"]",
"]",
"=",
"mx_real_t",
"self",
".",
"_init_predictor",
"(",
"data_shapes",
",",
"type_dict",
")",
"data_arrays",
"=",
"[",
"self",
".",
"_pred_exec",
".",
"arg_dict",
"[",
"name",
"]",
"for",
"name",
"in",
"data_names",
"]",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"X",
")",
":",
"if",
"num_batch",
"is",
"not",
"None",
"and",
"i",
"==",
"num_batch",
":",
"break",
"_load_data",
"(",
"batch",
",",
"data_arrays",
")",
"self",
".",
"_pred_exec",
".",
"forward",
"(",
"is_train",
"=",
"False",
")",
"eval_metric",
".",
"update",
"(",
"batch",
".",
"label",
",",
"self",
".",
"_pred_exec",
".",
"outputs",
")",
"if",
"batch_end_callback",
"is",
"not",
"None",
":",
"batch_end_params",
"=",
"BatchEndParam",
"(",
"epoch",
"=",
"0",
",",
"nbatch",
"=",
"i",
",",
"eval_metric",
"=",
"eval_metric",
",",
"locals",
"=",
"locals",
"(",
")",
")",
"_multiple_callbacks",
"(",
"batch_end_callback",
",",
"batch_end_params",
")",
"return",
"eval_metric",
".",
"get",
"(",
")",
"[",
"1",
"]"
] |
Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
|
[
"Run",
"the",
"model",
"given",
"an",
"input",
"and",
"calculate",
"the",
"score",
"as",
"assessed",
"by",
"an",
"evaluation",
"metric",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L753-L802
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward.fit
|
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and '_async' not in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
if not optimizer.idx2name:
optimizer.idx2name = param_idx2name.copy()
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
|
python
|
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and '_async' not in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
if not optimizer.idx2name:
optimizer.idx2name = param_idx2name.copy()
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
|
[
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"eval_data",
"=",
"None",
",",
"eval_metric",
"=",
"'acc'",
",",
"epoch_end_callback",
"=",
"None",
",",
"batch_end_callback",
"=",
"None",
",",
"kvstore",
"=",
"'local'",
",",
"logger",
"=",
"None",
",",
"work_load_list",
"=",
"None",
",",
"monitor",
"=",
"None",
",",
"eval_end_callback",
"=",
"LogValidationMetricsCallback",
"(",
")",
",",
"eval_batch_end_callback",
"=",
"None",
")",
":",
"data",
"=",
"self",
".",
"_init_iter",
"(",
"X",
",",
"y",
",",
"is_train",
"=",
"True",
")",
"eval_data",
"=",
"self",
".",
"_init_eval_iter",
"(",
"eval_data",
")",
"if",
"self",
".",
"sym_gen",
":",
"self",
".",
"symbol",
"=",
"self",
".",
"sym_gen",
"(",
"data",
".",
"default_bucket_key",
")",
"# pylint: disable=no-member",
"self",
".",
"_check_arguments",
"(",
")",
"self",
".",
"kwargs",
"[",
"\"sym\"",
"]",
"=",
"self",
".",
"symbol",
"arg_names",
",",
"param_names",
",",
"aux_names",
"=",
"self",
".",
"_init_params",
"(",
"data",
".",
"provide_data",
"+",
"data",
".",
"provide_label",
")",
"# setup metric",
"if",
"not",
"isinstance",
"(",
"eval_metric",
",",
"metric",
".",
"EvalMetric",
")",
":",
"eval_metric",
"=",
"metric",
".",
"create",
"(",
"eval_metric",
")",
"# create kvstore",
"(",
"kvstore",
",",
"update_on_kvstore",
")",
"=",
"_create_kvstore",
"(",
"kvstore",
",",
"len",
"(",
"self",
".",
"ctx",
")",
",",
"self",
".",
"arg_params",
")",
"param_idx2name",
"=",
"{",
"}",
"if",
"update_on_kvstore",
":",
"param_idx2name",
".",
"update",
"(",
"enumerate",
"(",
"param_names",
")",
")",
"else",
":",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"param_names",
")",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"ctx",
")",
")",
":",
"param_idx2name",
"[",
"i",
"*",
"len",
"(",
"self",
".",
"ctx",
")",
"+",
"k",
"]",
"=",
"n",
"self",
".",
"kwargs",
"[",
"\"param_idx2name\"",
"]",
"=",
"param_idx2name",
"# init optmizer",
"if",
"isinstance",
"(",
"self",
".",
"optimizer",
",",
"str",
")",
":",
"batch_size",
"=",
"data",
".",
"batch_size",
"if",
"kvstore",
"and",
"'dist'",
"in",
"kvstore",
".",
"type",
"and",
"'_async'",
"not",
"in",
"kvstore",
".",
"type",
":",
"batch_size",
"*=",
"kvstore",
".",
"num_workers",
"optimizer",
"=",
"opt",
".",
"create",
"(",
"self",
".",
"optimizer",
",",
"rescale_grad",
"=",
"(",
"1.0",
"/",
"batch_size",
")",
",",
"*",
"*",
"(",
"self",
".",
"kwargs",
")",
")",
"elif",
"isinstance",
"(",
"self",
".",
"optimizer",
",",
"opt",
".",
"Optimizer",
")",
":",
"if",
"not",
"optimizer",
".",
"idx2name",
":",
"optimizer",
".",
"idx2name",
"=",
"param_idx2name",
".",
"copy",
"(",
")",
"optimizer",
"=",
"self",
".",
"optimizer",
"# do training",
"_train_multi_device",
"(",
"self",
".",
"symbol",
",",
"self",
".",
"ctx",
",",
"arg_names",
",",
"param_names",
",",
"aux_names",
",",
"self",
".",
"arg_params",
",",
"self",
".",
"aux_params",
",",
"begin_epoch",
"=",
"self",
".",
"begin_epoch",
",",
"end_epoch",
"=",
"self",
".",
"num_epoch",
",",
"epoch_size",
"=",
"self",
".",
"epoch_size",
",",
"optimizer",
"=",
"optimizer",
",",
"train_data",
"=",
"data",
",",
"eval_data",
"=",
"eval_data",
",",
"eval_metric",
"=",
"eval_metric",
",",
"epoch_end_callback",
"=",
"epoch_end_callback",
",",
"batch_end_callback",
"=",
"batch_end_callback",
",",
"kvstore",
"=",
"kvstore",
",",
"update_on_kvstore",
"=",
"update_on_kvstore",
",",
"logger",
"=",
"logger",
",",
"work_load_list",
"=",
"work_load_list",
",",
"monitor",
"=",
"monitor",
",",
"eval_end_callback",
"=",
"eval_end_callback",
",",
"eval_batch_end_callback",
"=",
"eval_batch_end_callback",
",",
"sym_gen",
"=",
"self",
".",
"sym_gen",
")"
] |
Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
|
[
"Fit",
"the",
"model",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L804-L905
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward.save
|
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
|
python
|
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
|
[
"def",
"save",
"(",
"self",
",",
"prefix",
",",
"epoch",
"=",
"None",
")",
":",
"if",
"epoch",
"is",
"None",
":",
"epoch",
"=",
"self",
".",
"num_epoch",
"assert",
"epoch",
"is",
"not",
"None",
"save_checkpoint",
"(",
"prefix",
",",
"epoch",
",",
"self",
".",
"symbol",
",",
"self",
".",
"arg_params",
",",
"self",
".",
"aux_params",
")"
] |
Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
|
[
"Checkpoint",
"the",
"model",
"checkpoint",
"into",
"file",
".",
"You",
"can",
"also",
"use",
"pickle",
"to",
"do",
"the",
"job",
"if",
"you",
"only",
"work",
"on",
"Python",
".",
"The",
"advantage",
"of",
"load",
"and",
"save",
"(",
"as",
"compared",
"to",
"pickle",
")",
"is",
"that",
"the",
"resulting",
"file",
"can",
"be",
"loaded",
"from",
"other",
"MXNet",
"language",
"bindings",
".",
"One",
"can",
"also",
"directly",
"load",
"/",
"save",
"from",
"/",
"to",
"cloud",
"storage",
"(",
"S3",
"HDFS",
")"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L908-L928
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward.load
|
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
|
python
|
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
|
[
"def",
"load",
"(",
"prefix",
",",
"epoch",
",",
"ctx",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"symbol",
",",
"arg_params",
",",
"aux_params",
"=",
"load_checkpoint",
"(",
"prefix",
",",
"epoch",
")",
"return",
"FeedForward",
"(",
"symbol",
",",
"ctx",
"=",
"ctx",
",",
"arg_params",
"=",
"arg_params",
",",
"aux_params",
"=",
"aux_params",
",",
"begin_epoch",
"=",
"epoch",
",",
"*",
"*",
"kwargs",
")"
] |
Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
|
[
"Load",
"model",
"checkpoint",
"from",
"file",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L931-L959
|
train
|
apache/incubator-mxnet
|
python/mxnet/model.py
|
FeedForward.create
|
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
|
python
|
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
|
[
"def",
"create",
"(",
"symbol",
",",
"X",
",",
"y",
"=",
"None",
",",
"ctx",
"=",
"None",
",",
"num_epoch",
"=",
"None",
",",
"epoch_size",
"=",
"None",
",",
"optimizer",
"=",
"'sgd'",
",",
"initializer",
"=",
"Uniform",
"(",
"0.01",
")",
",",
"eval_data",
"=",
"None",
",",
"eval_metric",
"=",
"'acc'",
",",
"epoch_end_callback",
"=",
"None",
",",
"batch_end_callback",
"=",
"None",
",",
"kvstore",
"=",
"'local'",
",",
"logger",
"=",
"None",
",",
"work_load_list",
"=",
"None",
",",
"eval_end_callback",
"=",
"LogValidationMetricsCallback",
"(",
")",
",",
"eval_batch_end_callback",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"model",
"=",
"FeedForward",
"(",
"symbol",
",",
"ctx",
"=",
"ctx",
",",
"num_epoch",
"=",
"num_epoch",
",",
"epoch_size",
"=",
"epoch_size",
",",
"optimizer",
"=",
"optimizer",
",",
"initializer",
"=",
"initializer",
",",
"*",
"*",
"kwargs",
")",
"model",
".",
"fit",
"(",
"X",
",",
"y",
",",
"eval_data",
"=",
"eval_data",
",",
"eval_metric",
"=",
"eval_metric",
",",
"epoch_end_callback",
"=",
"epoch_end_callback",
",",
"batch_end_callback",
"=",
"batch_end_callback",
",",
"kvstore",
"=",
"kvstore",
",",
"logger",
"=",
"logger",
",",
"work_load_list",
"=",
"work_load_list",
",",
"eval_end_callback",
"=",
"eval_end_callback",
",",
"eval_batch_end_callback",
"=",
"eval_batch_end_callback",
")",
"return",
"model"
] |
Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
|
[
"Functional",
"style",
"to",
"create",
"a",
"model",
".",
"This",
"function",
"is",
"more",
"consistent",
"with",
"functional",
"languages",
"such",
"as",
"R",
"where",
"mutation",
"is",
"not",
"allowed",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L962-L1025
|
train
|
apache/incubator-mxnet
|
ci/docker_cache.py
|
build_save_containers
|
def build_save_containers(platforms, registry, load_cache) -> int:
"""
Entry point to build and upload all built dockerimages in parallel
:param platforms: List of platforms
:param registry: Docker registry name
:param load_cache: Load cache before building
:return: 1 if error occurred, 0 otherwise
"""
from joblib import Parallel, delayed
if len(platforms) == 0:
return 0
platform_results = Parallel(n_jobs=PARALLEL_BUILDS, backend="multiprocessing")(
delayed(_build_save_container)(platform, registry, load_cache)
for platform in platforms)
is_error = False
for platform_result in platform_results:
if platform_result is not None:
logging.error('Failed to generate %s', platform_result)
is_error = True
return 1 if is_error else 0
|
python
|
def build_save_containers(platforms, registry, load_cache) -> int:
"""
Entry point to build and upload all built dockerimages in parallel
:param platforms: List of platforms
:param registry: Docker registry name
:param load_cache: Load cache before building
:return: 1 if error occurred, 0 otherwise
"""
from joblib import Parallel, delayed
if len(platforms) == 0:
return 0
platform_results = Parallel(n_jobs=PARALLEL_BUILDS, backend="multiprocessing")(
delayed(_build_save_container)(platform, registry, load_cache)
for platform in platforms)
is_error = False
for platform_result in platform_results:
if platform_result is not None:
logging.error('Failed to generate %s', platform_result)
is_error = True
return 1 if is_error else 0
|
[
"def",
"build_save_containers",
"(",
"platforms",
",",
"registry",
",",
"load_cache",
")",
"->",
"int",
":",
"from",
"joblib",
"import",
"Parallel",
",",
"delayed",
"if",
"len",
"(",
"platforms",
")",
"==",
"0",
":",
"return",
"0",
"platform_results",
"=",
"Parallel",
"(",
"n_jobs",
"=",
"PARALLEL_BUILDS",
",",
"backend",
"=",
"\"multiprocessing\"",
")",
"(",
"delayed",
"(",
"_build_save_container",
")",
"(",
"platform",
",",
"registry",
",",
"load_cache",
")",
"for",
"platform",
"in",
"platforms",
")",
"is_error",
"=",
"False",
"for",
"platform_result",
"in",
"platform_results",
":",
"if",
"platform_result",
"is",
"not",
"None",
":",
"logging",
".",
"error",
"(",
"'Failed to generate %s'",
",",
"platform_result",
")",
"is_error",
"=",
"True",
"return",
"1",
"if",
"is_error",
"else",
"0"
] |
Entry point to build and upload all built dockerimages in parallel
:param platforms: List of platforms
:param registry: Docker registry name
:param load_cache: Load cache before building
:return: 1 if error occurred, 0 otherwise
|
[
"Entry",
"point",
"to",
"build",
"and",
"upload",
"all",
"built",
"dockerimages",
"in",
"parallel",
":",
"param",
"platforms",
":",
"List",
"of",
"platforms",
":",
"param",
"registry",
":",
"Docker",
"registry",
"name",
":",
"param",
"load_cache",
":",
"Load",
"cache",
"before",
"building",
":",
"return",
":",
"1",
"if",
"error",
"occurred",
"0",
"otherwise"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/ci/docker_cache.py#L44-L66
|
train
|
apache/incubator-mxnet
|
ci/docker_cache.py
|
_build_save_container
|
def _build_save_container(platform, registry, load_cache) -> Optional[str]:
"""
Build image for passed platform and upload the cache to the specified S3 bucket
:param platform: Platform
:param registry: Docker registry name
:param load_cache: Load cache before building
:return: Platform if failed, None otherwise
"""
docker_tag = build_util.get_docker_tag(platform=platform, registry=registry)
# Preload cache
if load_cache:
load_docker_cache(registry=registry, docker_tag=docker_tag)
# Start building
logging.debug('Building %s as %s', platform, docker_tag)
try:
# Increase the number of retries for building the cache.
image_id = build_util.build_docker(docker_binary='docker', platform=platform, registry=registry, num_retries=10, no_cache=False)
logging.info('Built %s as %s', docker_tag, image_id)
# Push cache to registry
_upload_image(registry=registry, docker_tag=docker_tag, image_id=image_id)
return None
except Exception:
logging.exception('Unexpected exception during build of %s', docker_tag)
return platform
|
python
|
def _build_save_container(platform, registry, load_cache) -> Optional[str]:
"""
Build image for passed platform and upload the cache to the specified S3 bucket
:param platform: Platform
:param registry: Docker registry name
:param load_cache: Load cache before building
:return: Platform if failed, None otherwise
"""
docker_tag = build_util.get_docker_tag(platform=platform, registry=registry)
# Preload cache
if load_cache:
load_docker_cache(registry=registry, docker_tag=docker_tag)
# Start building
logging.debug('Building %s as %s', platform, docker_tag)
try:
# Increase the number of retries for building the cache.
image_id = build_util.build_docker(docker_binary='docker', platform=platform, registry=registry, num_retries=10, no_cache=False)
logging.info('Built %s as %s', docker_tag, image_id)
# Push cache to registry
_upload_image(registry=registry, docker_tag=docker_tag, image_id=image_id)
return None
except Exception:
logging.exception('Unexpected exception during build of %s', docker_tag)
return platform
|
[
"def",
"_build_save_container",
"(",
"platform",
",",
"registry",
",",
"load_cache",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"docker_tag",
"=",
"build_util",
".",
"get_docker_tag",
"(",
"platform",
"=",
"platform",
",",
"registry",
"=",
"registry",
")",
"# Preload cache",
"if",
"load_cache",
":",
"load_docker_cache",
"(",
"registry",
"=",
"registry",
",",
"docker_tag",
"=",
"docker_tag",
")",
"# Start building",
"logging",
".",
"debug",
"(",
"'Building %s as %s'",
",",
"platform",
",",
"docker_tag",
")",
"try",
":",
"# Increase the number of retries for building the cache.",
"image_id",
"=",
"build_util",
".",
"build_docker",
"(",
"docker_binary",
"=",
"'docker'",
",",
"platform",
"=",
"platform",
",",
"registry",
"=",
"registry",
",",
"num_retries",
"=",
"10",
",",
"no_cache",
"=",
"False",
")",
"logging",
".",
"info",
"(",
"'Built %s as %s'",
",",
"docker_tag",
",",
"image_id",
")",
"# Push cache to registry",
"_upload_image",
"(",
"registry",
"=",
"registry",
",",
"docker_tag",
"=",
"docker_tag",
",",
"image_id",
"=",
"image_id",
")",
"return",
"None",
"except",
"Exception",
":",
"logging",
".",
"exception",
"(",
"'Unexpected exception during build of %s'",
",",
"docker_tag",
")",
"return",
"platform"
] |
Build image for passed platform and upload the cache to the specified S3 bucket
:param platform: Platform
:param registry: Docker registry name
:param load_cache: Load cache before building
:return: Platform if failed, None otherwise
|
[
"Build",
"image",
"for",
"passed",
"platform",
"and",
"upload",
"the",
"cache",
"to",
"the",
"specified",
"S3",
"bucket",
":",
"param",
"platform",
":",
"Platform",
":",
"param",
"registry",
":",
"Docker",
"registry",
"name",
":",
"param",
"load_cache",
":",
"Load",
"cache",
"before",
"building",
":",
"return",
":",
"Platform",
"if",
"failed",
"None",
"otherwise"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/ci/docker_cache.py#L69-L95
|
train
|
apache/incubator-mxnet
|
ci/docker_cache.py
|
_upload_image
|
def _upload_image(registry, docker_tag, image_id) -> None:
"""
Upload the passed image by id, tag it with docker tag and upload to S3 bucket
:param registry: Docker registry name
:param docker_tag: Docker tag
:param image_id: Image id
:return: None
"""
# We don't have to retag the image since it is already in the right format
logging.info('Uploading %s (%s) to %s', docker_tag, image_id, registry)
push_cmd = ['docker', 'push', docker_tag]
subprocess.check_call(push_cmd)
|
python
|
def _upload_image(registry, docker_tag, image_id) -> None:
"""
Upload the passed image by id, tag it with docker tag and upload to S3 bucket
:param registry: Docker registry name
:param docker_tag: Docker tag
:param image_id: Image id
:return: None
"""
# We don't have to retag the image since it is already in the right format
logging.info('Uploading %s (%s) to %s', docker_tag, image_id, registry)
push_cmd = ['docker', 'push', docker_tag]
subprocess.check_call(push_cmd)
|
[
"def",
"_upload_image",
"(",
"registry",
",",
"docker_tag",
",",
"image_id",
")",
"->",
"None",
":",
"# We don't have to retag the image since it is already in the right format",
"logging",
".",
"info",
"(",
"'Uploading %s (%s) to %s'",
",",
"docker_tag",
",",
"image_id",
",",
"registry",
")",
"push_cmd",
"=",
"[",
"'docker'",
",",
"'push'",
",",
"docker_tag",
"]",
"subprocess",
".",
"check_call",
"(",
"push_cmd",
")"
] |
Upload the passed image by id, tag it with docker tag and upload to S3 bucket
:param registry: Docker registry name
:param docker_tag: Docker tag
:param image_id: Image id
:return: None
|
[
"Upload",
"the",
"passed",
"image",
"by",
"id",
"tag",
"it",
"with",
"docker",
"tag",
"and",
"upload",
"to",
"S3",
"bucket",
":",
"param",
"registry",
":",
"Docker",
"registry",
"name",
":",
"param",
"docker_tag",
":",
"Docker",
"tag",
":",
"param",
"image_id",
":",
"Image",
"id",
":",
"return",
":",
"None"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/ci/docker_cache.py#L100-L111
|
train
|
apache/incubator-mxnet
|
ci/docker_cache.py
|
_login_dockerhub
|
def _login_dockerhub():
"""
Login to the Docker Hub account
:return: None
"""
dockerhub_credentials = _get_dockerhub_credentials()
logging.info('Logging in to DockerHub')
# We use password-stdin instead of --password to avoid leaking passwords in case of an error.
# This method will produce the following output:
# > WARNING! Your password will be stored unencrypted in /home/jenkins_slave/.docker/config.json.
# > Configure a credential helper to remove this warning. See
# > https://docs.docker.com/engine/reference/commandline/login/#credentials-store
# Since we consider the restricted slaves a secure environment, that's fine. Also, using this will require
# third party applications which would need a review first as well.
p = subprocess.run(['docker', 'login', '--username', dockerhub_credentials['username'], '--password-stdin'],
stdout=subprocess.PIPE, input=str.encode(dockerhub_credentials['password']))
logging.info(p.stdout)
logging.info('Successfully logged in to DockerHub')
|
python
|
def _login_dockerhub():
"""
Login to the Docker Hub account
:return: None
"""
dockerhub_credentials = _get_dockerhub_credentials()
logging.info('Logging in to DockerHub')
# We use password-stdin instead of --password to avoid leaking passwords in case of an error.
# This method will produce the following output:
# > WARNING! Your password will be stored unencrypted in /home/jenkins_slave/.docker/config.json.
# > Configure a credential helper to remove this warning. See
# > https://docs.docker.com/engine/reference/commandline/login/#credentials-store
# Since we consider the restricted slaves a secure environment, that's fine. Also, using this will require
# third party applications which would need a review first as well.
p = subprocess.run(['docker', 'login', '--username', dockerhub_credentials['username'], '--password-stdin'],
stdout=subprocess.PIPE, input=str.encode(dockerhub_credentials['password']))
logging.info(p.stdout)
logging.info('Successfully logged in to DockerHub')
|
[
"def",
"_login_dockerhub",
"(",
")",
":",
"dockerhub_credentials",
"=",
"_get_dockerhub_credentials",
"(",
")",
"logging",
".",
"info",
"(",
"'Logging in to DockerHub'",
")",
"# We use password-stdin instead of --password to avoid leaking passwords in case of an error.",
"# This method will produce the following output:",
"# > WARNING! Your password will be stored unencrypted in /home/jenkins_slave/.docker/config.json.",
"# > Configure a credential helper to remove this warning. See",
"# > https://docs.docker.com/engine/reference/commandline/login/#credentials-store",
"# Since we consider the restricted slaves a secure environment, that's fine. Also, using this will require",
"# third party applications which would need a review first as well.",
"p",
"=",
"subprocess",
".",
"run",
"(",
"[",
"'docker'",
",",
"'login'",
",",
"'--username'",
",",
"dockerhub_credentials",
"[",
"'username'",
"]",
",",
"'--password-stdin'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"input",
"=",
"str",
".",
"encode",
"(",
"dockerhub_credentials",
"[",
"'password'",
"]",
")",
")",
"logging",
".",
"info",
"(",
"p",
".",
"stdout",
")",
"logging",
".",
"info",
"(",
"'Successfully logged in to DockerHub'",
")"
] |
Login to the Docker Hub account
:return: None
|
[
"Login",
"to",
"the",
"Docker",
"Hub",
"account",
":",
"return",
":",
"None"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/ci/docker_cache.py#L116-L134
|
train
|
apache/incubator-mxnet
|
ci/docker_cache.py
|
load_docker_cache
|
def load_docker_cache(registry, docker_tag) -> None:
"""
Load the precompiled docker cache from the registry
:param registry: Docker registry name
:param docker_tag: Docker tag to load
:return: None
"""
# We don't have to retag the image since it's already in the right format
if not registry:
return
assert docker_tag
logging.info('Loading Docker cache for %s from %s', docker_tag, registry)
pull_cmd = ['docker', 'pull', docker_tag]
# Don't throw an error if the image does not exist
subprocess.run(pull_cmd, timeout=DOCKER_CACHE_TIMEOUT_MINS*60)
logging.info('Successfully pulled docker cache')
|
python
|
def load_docker_cache(registry, docker_tag) -> None:
"""
Load the precompiled docker cache from the registry
:param registry: Docker registry name
:param docker_tag: Docker tag to load
:return: None
"""
# We don't have to retag the image since it's already in the right format
if not registry:
return
assert docker_tag
logging.info('Loading Docker cache for %s from %s', docker_tag, registry)
pull_cmd = ['docker', 'pull', docker_tag]
# Don't throw an error if the image does not exist
subprocess.run(pull_cmd, timeout=DOCKER_CACHE_TIMEOUT_MINS*60)
logging.info('Successfully pulled docker cache')
|
[
"def",
"load_docker_cache",
"(",
"registry",
",",
"docker_tag",
")",
"->",
"None",
":",
"# We don't have to retag the image since it's already in the right format",
"if",
"not",
"registry",
":",
"return",
"assert",
"docker_tag",
"logging",
".",
"info",
"(",
"'Loading Docker cache for %s from %s'",
",",
"docker_tag",
",",
"registry",
")",
"pull_cmd",
"=",
"[",
"'docker'",
",",
"'pull'",
",",
"docker_tag",
"]",
"# Don't throw an error if the image does not exist",
"subprocess",
".",
"run",
"(",
"pull_cmd",
",",
"timeout",
"=",
"DOCKER_CACHE_TIMEOUT_MINS",
"*",
"60",
")",
"logging",
".",
"info",
"(",
"'Successfully pulled docker cache'",
")"
] |
Load the precompiled docker cache from the registry
:param registry: Docker registry name
:param docker_tag: Docker tag to load
:return: None
|
[
"Load",
"the",
"precompiled",
"docker",
"cache",
"from",
"the",
"registry",
":",
"param",
"registry",
":",
"Docker",
"registry",
"name",
":",
"param",
"docker_tag",
":",
"Docker",
"tag",
"to",
"load",
":",
"return",
":",
"None"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/ci/docker_cache.py#L149-L166
|
train
|
apache/incubator-mxnet
|
ci/docker_cache.py
|
delete_local_docker_cache
|
def delete_local_docker_cache(docker_tag):
"""
Delete the local docker cache for the entire docker image chain
:param docker_tag: Docker tag
:return: None
"""
history_cmd = ['docker', 'history', '-q', docker_tag]
try:
image_ids_b = subprocess.check_output(history_cmd)
image_ids_str = image_ids_b.decode('utf-8').strip()
layer_ids = [id.strip() for id in image_ids_str.split('\n') if id != '<missing>']
delete_cmd = ['docker', 'image', 'rm', '--force']
delete_cmd.extend(layer_ids)
subprocess.check_call(delete_cmd)
except subprocess.CalledProcessError as error:
# Could be caused by the image not being present
logging.debug('Error during local cache deletion %s', error)
|
python
|
def delete_local_docker_cache(docker_tag):
"""
Delete the local docker cache for the entire docker image chain
:param docker_tag: Docker tag
:return: None
"""
history_cmd = ['docker', 'history', '-q', docker_tag]
try:
image_ids_b = subprocess.check_output(history_cmd)
image_ids_str = image_ids_b.decode('utf-8').strip()
layer_ids = [id.strip() for id in image_ids_str.split('\n') if id != '<missing>']
delete_cmd = ['docker', 'image', 'rm', '--force']
delete_cmd.extend(layer_ids)
subprocess.check_call(delete_cmd)
except subprocess.CalledProcessError as error:
# Could be caused by the image not being present
logging.debug('Error during local cache deletion %s', error)
|
[
"def",
"delete_local_docker_cache",
"(",
"docker_tag",
")",
":",
"history_cmd",
"=",
"[",
"'docker'",
",",
"'history'",
",",
"'-q'",
",",
"docker_tag",
"]",
"try",
":",
"image_ids_b",
"=",
"subprocess",
".",
"check_output",
"(",
"history_cmd",
")",
"image_ids_str",
"=",
"image_ids_b",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
")",
"layer_ids",
"=",
"[",
"id",
".",
"strip",
"(",
")",
"for",
"id",
"in",
"image_ids_str",
".",
"split",
"(",
"'\\n'",
")",
"if",
"id",
"!=",
"'<missing>'",
"]",
"delete_cmd",
"=",
"[",
"'docker'",
",",
"'image'",
",",
"'rm'",
",",
"'--force'",
"]",
"delete_cmd",
".",
"extend",
"(",
"layer_ids",
")",
"subprocess",
".",
"check_call",
"(",
"delete_cmd",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"error",
":",
"# Could be caused by the image not being present",
"logging",
".",
"debug",
"(",
"'Error during local cache deletion %s'",
",",
"error",
")"
] |
Delete the local docker cache for the entire docker image chain
:param docker_tag: Docker tag
:return: None
|
[
"Delete",
"the",
"local",
"docker",
"cache",
"for",
"the",
"entire",
"docker",
"image",
"chain",
":",
"param",
"docker_tag",
":",
"Docker",
"tag",
":",
"return",
":",
"None"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/ci/docker_cache.py#L169-L187
|
train
|
apache/incubator-mxnet
|
ci/docker_cache.py
|
main
|
def main() -> int:
"""
Utility to create and publish the Docker cache to Docker Hub
:return:
"""
# We need to be in the same directory than the script so the commands in the dockerfiles work as
# expected. But the script can be invoked from a different path
base = os.path.split(os.path.realpath(__file__))[0]
os.chdir(base)
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('urllib3').setLevel(logging.INFO)
logging.getLogger('s3transfer').setLevel(logging.INFO)
def script_name() -> str:
return os.path.split(sys.argv[0])[1]
logging.basicConfig(format='{}: %(asctime)-15s %(message)s'.format(script_name()))
parser = argparse.ArgumentParser(description="Utility for preserving and loading Docker cache", epilog="")
parser.add_argument("--docker-registry",
help="Docker hub registry name",
type=str,
required=True)
args = parser.parse_args()
platforms = build_util.get_platforms()
try:
_login_dockerhub()
return build_save_containers(platforms=platforms, registry=args.docker_registry, load_cache=True)
finally:
_logout_dockerhub()
|
python
|
def main() -> int:
"""
Utility to create and publish the Docker cache to Docker Hub
:return:
"""
# We need to be in the same directory than the script so the commands in the dockerfiles work as
# expected. But the script can be invoked from a different path
base = os.path.split(os.path.realpath(__file__))[0]
os.chdir(base)
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('urllib3').setLevel(logging.INFO)
logging.getLogger('s3transfer').setLevel(logging.INFO)
def script_name() -> str:
return os.path.split(sys.argv[0])[1]
logging.basicConfig(format='{}: %(asctime)-15s %(message)s'.format(script_name()))
parser = argparse.ArgumentParser(description="Utility for preserving and loading Docker cache", epilog="")
parser.add_argument("--docker-registry",
help="Docker hub registry name",
type=str,
required=True)
args = parser.parse_args()
platforms = build_util.get_platforms()
try:
_login_dockerhub()
return build_save_containers(platforms=platforms, registry=args.docker_registry, load_cache=True)
finally:
_logout_dockerhub()
|
[
"def",
"main",
"(",
")",
"->",
"int",
":",
"# We need to be in the same directory than the script so the commands in the dockerfiles work as",
"# expected. But the script can be invoked from a different path",
"base",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"[",
"0",
"]",
"os",
".",
"chdir",
"(",
"base",
")",
"logging",
".",
"getLogger",
"(",
")",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"logging",
".",
"getLogger",
"(",
"'botocore'",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"logging",
".",
"getLogger",
"(",
"'boto3'",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"logging",
".",
"getLogger",
"(",
"'urllib3'",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"logging",
".",
"getLogger",
"(",
"'s3transfer'",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"def",
"script_name",
"(",
")",
"->",
"str",
":",
"return",
"os",
".",
"path",
".",
"split",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
"[",
"1",
"]",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"'{}: %(asctime)-15s %(message)s'",
".",
"format",
"(",
"script_name",
"(",
")",
")",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Utility for preserving and loading Docker cache\"",
",",
"epilog",
"=",
"\"\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--docker-registry\"",
",",
"help",
"=",
"\"Docker hub registry name\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"platforms",
"=",
"build_util",
".",
"get_platforms",
"(",
")",
"try",
":",
"_login_dockerhub",
"(",
")",
"return",
"build_save_containers",
"(",
"platforms",
"=",
"platforms",
",",
"registry",
"=",
"args",
".",
"docker_registry",
",",
"load_cache",
"=",
"True",
")",
"finally",
":",
"_logout_dockerhub",
"(",
")"
] |
Utility to create and publish the Docker cache to Docker Hub
:return:
|
[
"Utility",
"to",
"create",
"and",
"publish",
"the",
"Docker",
"cache",
"to",
"Docker",
"Hub",
":",
"return",
":"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/ci/docker_cache.py#L221-L255
|
train
|
apache/incubator-mxnet
|
example/cnn_chinese_text_classification/data_helpers.py
|
get_chinese_text
|
def get_chinese_text():
"""Download the chinese_text dataset and unzip it"""
if not os.path.isdir("data/"):
os.system("mkdir data/")
if (not os.path.exists('data/pos.txt')) or \
(not os.path.exists('data/neg')):
os.system("wget -q https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/chinese_text.zip "
"-P data/")
os.chdir("./data")
os.system("unzip -u chinese_text.zip")
os.chdir("..")
|
python
|
def get_chinese_text():
"""Download the chinese_text dataset and unzip it"""
if not os.path.isdir("data/"):
os.system("mkdir data/")
if (not os.path.exists('data/pos.txt')) or \
(not os.path.exists('data/neg')):
os.system("wget -q https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/chinese_text.zip "
"-P data/")
os.chdir("./data")
os.system("unzip -u chinese_text.zip")
os.chdir("..")
|
[
"def",
"get_chinese_text",
"(",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"\"data/\"",
")",
":",
"os",
".",
"system",
"(",
"\"mkdir data/\"",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"'data/pos.txt'",
")",
")",
"or",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"'data/neg'",
")",
")",
":",
"os",
".",
"system",
"(",
"\"wget -q https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/chinese_text.zip \"",
"\"-P data/\"",
")",
"os",
".",
"chdir",
"(",
"\"./data\"",
")",
"os",
".",
"system",
"(",
"\"unzip -u chinese_text.zip\"",
")",
"os",
".",
"chdir",
"(",
"\"..\"",
")"
] |
Download the chinese_text dataset and unzip it
|
[
"Download",
"the",
"chinese_text",
"dataset",
"and",
"unzip",
"it"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_chinese_text_classification/data_helpers.py#L51-L61
|
train
|
apache/incubator-mxnet
|
example/cnn_chinese_text_classification/data_helpers.py
|
load_data_and_labels
|
def load_data_and_labels():
"""Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# download dataset
get_chinese_text()
# Load data from files
positive_examples = list(codecs.open("./data/pos.txt", "r", "utf-8").readlines())
positive_examples = [s.strip() for s in positive_examples]
positive_examples = [pe for pe in positive_examples if len(pe) < 100]
negative_examples = list(codecs.open("./data/neg.txt", "r", "utf-8").readlines())
negative_examples = [s.strip() for s in negative_examples]
negative_examples = [ne for ne in negative_examples if len(ne) < 100]
# Split by words
x_text = positive_examples + negative_examples
# x_text = [clean_str(sent) for sent in x_text]
x_text = [list(s) for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
|
python
|
def load_data_and_labels():
"""Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# download dataset
get_chinese_text()
# Load data from files
positive_examples = list(codecs.open("./data/pos.txt", "r", "utf-8").readlines())
positive_examples = [s.strip() for s in positive_examples]
positive_examples = [pe for pe in positive_examples if len(pe) < 100]
negative_examples = list(codecs.open("./data/neg.txt", "r", "utf-8").readlines())
negative_examples = [s.strip() for s in negative_examples]
negative_examples = [ne for ne in negative_examples if len(ne) < 100]
# Split by words
x_text = positive_examples + negative_examples
# x_text = [clean_str(sent) for sent in x_text]
x_text = [list(s) for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
|
[
"def",
"load_data_and_labels",
"(",
")",
":",
"# download dataset",
"get_chinese_text",
"(",
")",
"# Load data from files",
"positive_examples",
"=",
"list",
"(",
"codecs",
".",
"open",
"(",
"\"./data/pos.txt\"",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
".",
"readlines",
"(",
")",
")",
"positive_examples",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"positive_examples",
"]",
"positive_examples",
"=",
"[",
"pe",
"for",
"pe",
"in",
"positive_examples",
"if",
"len",
"(",
"pe",
")",
"<",
"100",
"]",
"negative_examples",
"=",
"list",
"(",
"codecs",
".",
"open",
"(",
"\"./data/neg.txt\"",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
".",
"readlines",
"(",
")",
")",
"negative_examples",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"negative_examples",
"]",
"negative_examples",
"=",
"[",
"ne",
"for",
"ne",
"in",
"negative_examples",
"if",
"len",
"(",
"ne",
")",
"<",
"100",
"]",
"# Split by words",
"x_text",
"=",
"positive_examples",
"+",
"negative_examples",
"# x_text = [clean_str(sent) for sent in x_text]",
"x_text",
"=",
"[",
"list",
"(",
"s",
")",
"for",
"s",
"in",
"x_text",
"]",
"# Generate labels",
"positive_labels",
"=",
"[",
"[",
"0",
",",
"1",
"]",
"for",
"_",
"in",
"positive_examples",
"]",
"negative_labels",
"=",
"[",
"[",
"1",
",",
"0",
"]",
"for",
"_",
"in",
"negative_examples",
"]",
"y",
"=",
"np",
".",
"concatenate",
"(",
"[",
"positive_labels",
",",
"negative_labels",
"]",
",",
"0",
")",
"return",
"[",
"x_text",
",",
"y",
"]"
] |
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
|
[
"Loads",
"MR",
"polarity",
"data",
"from",
"files",
"splits",
"the",
"data",
"into",
"words",
"and",
"generates",
"labels",
".",
"Returns",
"split",
"sentences",
"and",
"labels",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_chinese_text_classification/data_helpers.py#L64-L87
|
train
|
apache/incubator-mxnet
|
example/ssd/train/metric.py
|
MultiBoxMetric.reset
|
def reset(self):
"""
override reset behavior
"""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num
|
python
|
def reset(self):
"""
override reset behavior
"""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num
|
[
"def",
"reset",
"(",
"self",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'num'",
",",
"None",
")",
"is",
"None",
":",
"self",
".",
"num_inst",
"=",
"0",
"self",
".",
"sum_metric",
"=",
"0.0",
"else",
":",
"self",
".",
"num_inst",
"=",
"[",
"0",
"]",
"*",
"self",
".",
"num",
"self",
".",
"sum_metric",
"=",
"[",
"0.0",
"]",
"*",
"self",
".",
"num"
] |
override reset behavior
|
[
"override",
"reset",
"behavior"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/train/metric.py#L31-L40
|
train
|
apache/incubator-mxnet
|
example/ssd/train/metric.py
|
MultiBoxMetric.reset_local
|
def reset_local(self):
"""
override reset behavior
"""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num
|
python
|
def reset_local(self):
"""
override reset behavior
"""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num
|
[
"def",
"reset_local",
"(",
"self",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'num'",
",",
"None",
")",
"is",
"None",
":",
"self",
".",
"num_inst",
"=",
"0",
"self",
".",
"sum_metric",
"=",
"0.0",
"else",
":",
"self",
".",
"num_inst",
"=",
"[",
"0",
"]",
"*",
"self",
".",
"num",
"self",
".",
"sum_metric",
"=",
"[",
"0.0",
"]",
"*",
"self",
".",
"num"
] |
override reset behavior
|
[
"override",
"reset",
"behavior"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/train/metric.py#L42-L51
|
train
|
apache/incubator-mxnet
|
example/ssd/train/metric.py
|
MultiBoxMetric.update
|
def update(self, labels, preds):
"""
Implementation of updating metrics
"""
# get generated multi label from network
cls_prob = preds[0].asnumpy()
loc_loss = preds[1].asnumpy()
cls_label = preds[2].asnumpy()
valid_count = np.sum(cls_label >= 0)
# overall accuracy & object accuracy
label = cls_label.flatten()
mask = np.where(label >= 0)[0]
indices = np.int64(label[mask])
prob = cls_prob.transpose((0, 2, 1)).reshape((-1, cls_prob.shape[1]))
prob = prob[mask, indices]
self.sum_metric[0] += (-np.log(prob + self.eps)).sum()
self.num_inst[0] += valid_count
# smoothl1loss
self.sum_metric[1] += np.sum(loc_loss)
self.num_inst[1] += valid_count
|
python
|
def update(self, labels, preds):
"""
Implementation of updating metrics
"""
# get generated multi label from network
cls_prob = preds[0].asnumpy()
loc_loss = preds[1].asnumpy()
cls_label = preds[2].asnumpy()
valid_count = np.sum(cls_label >= 0)
# overall accuracy & object accuracy
label = cls_label.flatten()
mask = np.where(label >= 0)[0]
indices = np.int64(label[mask])
prob = cls_prob.transpose((0, 2, 1)).reshape((-1, cls_prob.shape[1]))
prob = prob[mask, indices]
self.sum_metric[0] += (-np.log(prob + self.eps)).sum()
self.num_inst[0] += valid_count
# smoothl1loss
self.sum_metric[1] += np.sum(loc_loss)
self.num_inst[1] += valid_count
|
[
"def",
"update",
"(",
"self",
",",
"labels",
",",
"preds",
")",
":",
"# get generated multi label from network",
"cls_prob",
"=",
"preds",
"[",
"0",
"]",
".",
"asnumpy",
"(",
")",
"loc_loss",
"=",
"preds",
"[",
"1",
"]",
".",
"asnumpy",
"(",
")",
"cls_label",
"=",
"preds",
"[",
"2",
"]",
".",
"asnumpy",
"(",
")",
"valid_count",
"=",
"np",
".",
"sum",
"(",
"cls_label",
">=",
"0",
")",
"# overall accuracy & object accuracy",
"label",
"=",
"cls_label",
".",
"flatten",
"(",
")",
"mask",
"=",
"np",
".",
"where",
"(",
"label",
">=",
"0",
")",
"[",
"0",
"]",
"indices",
"=",
"np",
".",
"int64",
"(",
"label",
"[",
"mask",
"]",
")",
"prob",
"=",
"cls_prob",
".",
"transpose",
"(",
"(",
"0",
",",
"2",
",",
"1",
")",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"cls_prob",
".",
"shape",
"[",
"1",
"]",
")",
")",
"prob",
"=",
"prob",
"[",
"mask",
",",
"indices",
"]",
"self",
".",
"sum_metric",
"[",
"0",
"]",
"+=",
"(",
"-",
"np",
".",
"log",
"(",
"prob",
"+",
"self",
".",
"eps",
")",
")",
".",
"sum",
"(",
")",
"self",
".",
"num_inst",
"[",
"0",
"]",
"+=",
"valid_count",
"# smoothl1loss",
"self",
".",
"sum_metric",
"[",
"1",
"]",
"+=",
"np",
".",
"sum",
"(",
"loc_loss",
")",
"self",
".",
"num_inst",
"[",
"1",
"]",
"+=",
"valid_count"
] |
Implementation of updating metrics
|
[
"Implementation",
"of",
"updating",
"metrics"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/train/metric.py#L53-L72
|
train
|
apache/incubator-mxnet
|
example/ssd/train/metric.py
|
MultiBoxMetric.get
|
def get(self):
"""Get the current evaluation result.
Override the default behavior
Returns
-------
name : str
Name of the metric.
value : float
Value of the evaluation.
"""
if self.num is None:
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.sum_metric / self.num_inst)
else:
names = ['%s'%(self.name[i]) for i in range(self.num)]
values = [x / y if y != 0 else float('nan') \
for x, y in zip(self.sum_metric, self.num_inst)]
return (names, values)
|
python
|
def get(self):
"""Get the current evaluation result.
Override the default behavior
Returns
-------
name : str
Name of the metric.
value : float
Value of the evaluation.
"""
if self.num is None:
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.sum_metric / self.num_inst)
else:
names = ['%s'%(self.name[i]) for i in range(self.num)]
values = [x / y if y != 0 else float('nan') \
for x, y in zip(self.sum_metric, self.num_inst)]
return (names, values)
|
[
"def",
"get",
"(",
"self",
")",
":",
"if",
"self",
".",
"num",
"is",
"None",
":",
"if",
"self",
".",
"num_inst",
"==",
"0",
":",
"return",
"(",
"self",
".",
"name",
",",
"float",
"(",
"'nan'",
")",
")",
"else",
":",
"return",
"(",
"self",
".",
"name",
",",
"self",
".",
"sum_metric",
"/",
"self",
".",
"num_inst",
")",
"else",
":",
"names",
"=",
"[",
"'%s'",
"%",
"(",
"self",
".",
"name",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num",
")",
"]",
"values",
"=",
"[",
"x",
"/",
"y",
"if",
"y",
"!=",
"0",
"else",
"float",
"(",
"'nan'",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"self",
".",
"sum_metric",
",",
"self",
".",
"num_inst",
")",
"]",
"return",
"(",
"names",
",",
"values",
")"
] |
Get the current evaluation result.
Override the default behavior
Returns
-------
name : str
Name of the metric.
value : float
Value of the evaluation.
|
[
"Get",
"the",
"current",
"evaluation",
"result",
".",
"Override",
"the",
"default",
"behavior"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/train/metric.py#L74-L94
|
train
|
apache/incubator-mxnet
|
example/reinforcement-learning/dqn/operators.py
|
dqn_sym_nips
|
def dqn_sym_nips(action_num, data=None, name='dqn'):
"""Structure of the Deep Q Network in the NIPS 2013 workshop paper:
Playing Atari with Deep Reinforcement Learning (https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf)
Parameters
----------
action_num : int
data : mxnet.sym.Symbol, optional
name : str, optional
"""
if data is None:
net = mx.symbol.Variable('data')
else:
net = data
net = mx.symbol.Convolution(data=net, name='conv1', kernel=(8, 8), stride=(4, 4), num_filter=16)
net = mx.symbol.Activation(data=net, name='relu1', act_type="relu")
net = mx.symbol.Convolution(data=net, name='conv2', kernel=(4, 4), stride=(2, 2), num_filter=32)
net = mx.symbol.Activation(data=net, name='relu2', act_type="relu")
net = mx.symbol.Flatten(data=net)
net = mx.symbol.FullyConnected(data=net, name='fc3', num_hidden=256)
net = mx.symbol.Activation(data=net, name='relu3', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='fc4', num_hidden=action_num)
net = mx.symbol.Custom(data=net, name=name, op_type='DQNOutput')
return net
|
python
|
def dqn_sym_nips(action_num, data=None, name='dqn'):
"""Structure of the Deep Q Network in the NIPS 2013 workshop paper:
Playing Atari with Deep Reinforcement Learning (https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf)
Parameters
----------
action_num : int
data : mxnet.sym.Symbol, optional
name : str, optional
"""
if data is None:
net = mx.symbol.Variable('data')
else:
net = data
net = mx.symbol.Convolution(data=net, name='conv1', kernel=(8, 8), stride=(4, 4), num_filter=16)
net = mx.symbol.Activation(data=net, name='relu1', act_type="relu")
net = mx.symbol.Convolution(data=net, name='conv2', kernel=(4, 4), stride=(2, 2), num_filter=32)
net = mx.symbol.Activation(data=net, name='relu2', act_type="relu")
net = mx.symbol.Flatten(data=net)
net = mx.symbol.FullyConnected(data=net, name='fc3', num_hidden=256)
net = mx.symbol.Activation(data=net, name='relu3', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='fc4', num_hidden=action_num)
net = mx.symbol.Custom(data=net, name=name, op_type='DQNOutput')
return net
|
[
"def",
"dqn_sym_nips",
"(",
"action_num",
",",
"data",
"=",
"None",
",",
"name",
"=",
"'dqn'",
")",
":",
"if",
"data",
"is",
"None",
":",
"net",
"=",
"mx",
".",
"symbol",
".",
"Variable",
"(",
"'data'",
")",
"else",
":",
"net",
"=",
"data",
"net",
"=",
"mx",
".",
"symbol",
".",
"Convolution",
"(",
"data",
"=",
"net",
",",
"name",
"=",
"'conv1'",
",",
"kernel",
"=",
"(",
"8",
",",
"8",
")",
",",
"stride",
"=",
"(",
"4",
",",
"4",
")",
",",
"num_filter",
"=",
"16",
")",
"net",
"=",
"mx",
".",
"symbol",
".",
"Activation",
"(",
"data",
"=",
"net",
",",
"name",
"=",
"'relu1'",
",",
"act_type",
"=",
"\"relu\"",
")",
"net",
"=",
"mx",
".",
"symbol",
".",
"Convolution",
"(",
"data",
"=",
"net",
",",
"name",
"=",
"'conv2'",
",",
"kernel",
"=",
"(",
"4",
",",
"4",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
",",
"num_filter",
"=",
"32",
")",
"net",
"=",
"mx",
".",
"symbol",
".",
"Activation",
"(",
"data",
"=",
"net",
",",
"name",
"=",
"'relu2'",
",",
"act_type",
"=",
"\"relu\"",
")",
"net",
"=",
"mx",
".",
"symbol",
".",
"Flatten",
"(",
"data",
"=",
"net",
")",
"net",
"=",
"mx",
".",
"symbol",
".",
"FullyConnected",
"(",
"data",
"=",
"net",
",",
"name",
"=",
"'fc3'",
",",
"num_hidden",
"=",
"256",
")",
"net",
"=",
"mx",
".",
"symbol",
".",
"Activation",
"(",
"data",
"=",
"net",
",",
"name",
"=",
"'relu3'",
",",
"act_type",
"=",
"\"relu\"",
")",
"net",
"=",
"mx",
".",
"symbol",
".",
"FullyConnected",
"(",
"data",
"=",
"net",
",",
"name",
"=",
"'fc4'",
",",
"num_hidden",
"=",
"action_num",
")",
"net",
"=",
"mx",
".",
"symbol",
".",
"Custom",
"(",
"data",
"=",
"net",
",",
"name",
"=",
"name",
",",
"op_type",
"=",
"'DQNOutput'",
")",
"return",
"net"
] |
Structure of the Deep Q Network in the NIPS 2013 workshop paper:
Playing Atari with Deep Reinforcement Learning (https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf)
Parameters
----------
action_num : int
data : mxnet.sym.Symbol, optional
name : str, optional
|
[
"Structure",
"of",
"the",
"Deep",
"Q",
"Network",
"in",
"the",
"NIPS",
"2013",
"workshop",
"paper",
":",
"Playing",
"Atari",
"with",
"Deep",
"Reinforcement",
"Learning",
"(",
"https",
":",
"//",
"www",
".",
"cs",
".",
"toronto",
".",
"edu",
"/",
"~vmnih",
"/",
"docs",
"/",
"dqn",
".",
"pdf",
")"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/reinforcement-learning/dqn/operators.py#L98-L121
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
_monitor_callback_wrapper
|
def _monitor_callback_wrapper(callback):
"""A wrapper for the user-defined handle."""
def callback_handle(name, array, _):
""" ctypes function """
callback(name, array)
return callback_handle
|
python
|
def _monitor_callback_wrapper(callback):
"""A wrapper for the user-defined handle."""
def callback_handle(name, array, _):
""" ctypes function """
callback(name, array)
return callback_handle
|
[
"def",
"_monitor_callback_wrapper",
"(",
"callback",
")",
":",
"def",
"callback_handle",
"(",
"name",
",",
"array",
",",
"_",
")",
":",
"\"\"\" ctypes function \"\"\"",
"callback",
"(",
"name",
",",
"array",
")",
"return",
"callback_handle"
] |
A wrapper for the user-defined handle.
|
[
"A",
"wrapper",
"for",
"the",
"user",
"-",
"defined",
"handle",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L38-L43
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor._get_dict
|
def _get_dict(names, ndarrays):
"""Get the dictionary given name and ndarray pairs."""
nset = set()
for nm in names:
if nm in nset:
raise ValueError('Duplicate names detected, %s' % str(names))
nset.add(nm)
return dict(zip(names, ndarrays))
|
python
|
def _get_dict(names, ndarrays):
"""Get the dictionary given name and ndarray pairs."""
nset = set()
for nm in names:
if nm in nset:
raise ValueError('Duplicate names detected, %s' % str(names))
nset.add(nm)
return dict(zip(names, ndarrays))
|
[
"def",
"_get_dict",
"(",
"names",
",",
"ndarrays",
")",
":",
"nset",
"=",
"set",
"(",
")",
"for",
"nm",
"in",
"names",
":",
"if",
"nm",
"in",
"nset",
":",
"raise",
"ValueError",
"(",
"'Duplicate names detected, %s'",
"%",
"str",
"(",
"names",
")",
")",
"nset",
".",
"add",
"(",
"nm",
")",
"return",
"dict",
"(",
"zip",
"(",
"names",
",",
"ndarrays",
")",
")"
] |
Get the dictionary given name and ndarray pairs.
|
[
"Get",
"the",
"dictionary",
"given",
"name",
"and",
"ndarray",
"pairs",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L90-L97
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor._get_outputs
|
def _get_outputs(self):
"""List all the output NDArray.
Returns
-------
A list of ndarray bound to the heads of executor.
"""
out_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
check_call(_LIB.MXExecutorOutputs(self.handle,
ctypes.byref(out_size), ctypes.byref(handles)))
num_output = out_size.value
outputs = [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(num_output)]
return outputs
|
python
|
def _get_outputs(self):
"""List all the output NDArray.
Returns
-------
A list of ndarray bound to the heads of executor.
"""
out_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
check_call(_LIB.MXExecutorOutputs(self.handle,
ctypes.byref(out_size), ctypes.byref(handles)))
num_output = out_size.value
outputs = [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(num_output)]
return outputs
|
[
"def",
"_get_outputs",
"(",
"self",
")",
":",
"out_size",
"=",
"mx_uint",
"(",
")",
"handles",
"=",
"ctypes",
".",
"POINTER",
"(",
"NDArrayHandle",
")",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXExecutorOutputs",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"out_size",
")",
",",
"ctypes",
".",
"byref",
"(",
"handles",
")",
")",
")",
"num_output",
"=",
"out_size",
".",
"value",
"outputs",
"=",
"[",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"handles",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_output",
")",
"]",
"return",
"outputs"
] |
List all the output NDArray.
Returns
-------
A list of ndarray bound to the heads of executor.
|
[
"List",
"all",
"the",
"output",
"NDArray",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L99-L112
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor.forward
|
def forward(self, is_train=False, **kwargs):
"""Calculate the outputs specified by the bound symbol.
Parameters
----------
is_train: bool, optional
Whether this forward is for evaluation purpose. If True,
a backward call is expected to follow.
**kwargs
Additional specification of input arguments.
Examples
--------
>>> # doing forward by specifying data
>>> texec.forward(is_train=True, data=mydata)
>>> # doing forward by not specifying things, but copy to the executor before hand
>>> mydata.copyto(texec.arg_dict['data'])
>>> texec.forward(is_train=True)
>>> # doing forward by specifying data and get outputs
>>> outputs = texec.forward(is_train=True, data=mydata)
>>> print(outputs[0].asnumpy())
"""
if len(kwargs) != 0:
arg_dict = self.arg_dict
for name, array in kwargs.items():
if not isinstance(array, (NDArray, np.ndarray)):
raise ValueError('only accept keyword argument of NDArrays and numpy.ndarray')
if name not in arg_dict:
raise TypeError('Unknown argument %s' % name)
if arg_dict[name].shape != array.shape:
raise ValueError('Shape not match! Argument %s, need: %s, received: %s'
%(name, str(arg_dict[name].shape), str(array.shape)))
arg_dict[name][:] = array
check_call(_LIB.MXExecutorForward(
self.handle,
ctypes.c_int(int(is_train))))
return self.outputs
|
python
|
def forward(self, is_train=False, **kwargs):
"""Calculate the outputs specified by the bound symbol.
Parameters
----------
is_train: bool, optional
Whether this forward is for evaluation purpose. If True,
a backward call is expected to follow.
**kwargs
Additional specification of input arguments.
Examples
--------
>>> # doing forward by specifying data
>>> texec.forward(is_train=True, data=mydata)
>>> # doing forward by not specifying things, but copy to the executor before hand
>>> mydata.copyto(texec.arg_dict['data'])
>>> texec.forward(is_train=True)
>>> # doing forward by specifying data and get outputs
>>> outputs = texec.forward(is_train=True, data=mydata)
>>> print(outputs[0].asnumpy())
"""
if len(kwargs) != 0:
arg_dict = self.arg_dict
for name, array in kwargs.items():
if not isinstance(array, (NDArray, np.ndarray)):
raise ValueError('only accept keyword argument of NDArrays and numpy.ndarray')
if name not in arg_dict:
raise TypeError('Unknown argument %s' % name)
if arg_dict[name].shape != array.shape:
raise ValueError('Shape not match! Argument %s, need: %s, received: %s'
%(name, str(arg_dict[name].shape), str(array.shape)))
arg_dict[name][:] = array
check_call(_LIB.MXExecutorForward(
self.handle,
ctypes.c_int(int(is_train))))
return self.outputs
|
[
"def",
"forward",
"(",
"self",
",",
"is_train",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"kwargs",
")",
"!=",
"0",
":",
"arg_dict",
"=",
"self",
".",
"arg_dict",
"for",
"name",
",",
"array",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"array",
",",
"(",
"NDArray",
",",
"np",
".",
"ndarray",
")",
")",
":",
"raise",
"ValueError",
"(",
"'only accept keyword argument of NDArrays and numpy.ndarray'",
")",
"if",
"name",
"not",
"in",
"arg_dict",
":",
"raise",
"TypeError",
"(",
"'Unknown argument %s'",
"%",
"name",
")",
"if",
"arg_dict",
"[",
"name",
"]",
".",
"shape",
"!=",
"array",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'Shape not match! Argument %s, need: %s, received: %s'",
"%",
"(",
"name",
",",
"str",
"(",
"arg_dict",
"[",
"name",
"]",
".",
"shape",
")",
",",
"str",
"(",
"array",
".",
"shape",
")",
")",
")",
"arg_dict",
"[",
"name",
"]",
"[",
":",
"]",
"=",
"array",
"check_call",
"(",
"_LIB",
".",
"MXExecutorForward",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"is_train",
")",
")",
")",
")",
"return",
"self",
".",
"outputs"
] |
Calculate the outputs specified by the bound symbol.
Parameters
----------
is_train: bool, optional
Whether this forward is for evaluation purpose. If True,
a backward call is expected to follow.
**kwargs
Additional specification of input arguments.
Examples
--------
>>> # doing forward by specifying data
>>> texec.forward(is_train=True, data=mydata)
>>> # doing forward by not specifying things, but copy to the executor before hand
>>> mydata.copyto(texec.arg_dict['data'])
>>> texec.forward(is_train=True)
>>> # doing forward by specifying data and get outputs
>>> outputs = texec.forward(is_train=True, data=mydata)
>>> print(outputs[0].asnumpy())
|
[
"Calculate",
"the",
"outputs",
"specified",
"by",
"the",
"bound",
"symbol",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L114-L153
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor.backward
|
def backward(self, out_grads=None, is_train=True):
"""Do backward pass to get the gradient of arguments.
Parameters
----------
out_grads : NDArray or list of NDArray or dict of str to NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
is_train : bool, default True
Whether this backward is for training or inference. Note that in rare
cases you want to call backward with is_train=False to get gradient
during inference.
Examples
--------
>>> # Example for binding on loss function symbol, which gives the loss value of the model.
>>> # Equivalently it gives the head gradient for backward pass.
>>> # In this example the built-in SoftmaxOutput is used as loss function.
>>> # MakeLoss can be used to define customized loss function symbol.
>>> net = mx.sym.Variable('data')
>>> net = mx.sym.FullyConnected(net, name='fc', num_hidden=6)
>>> net = mx.sym.Activation(net, name='relu', act_type="relu")
>>> net = mx.sym.SoftmaxOutput(net, name='softmax')
>>> args = {'data': mx.nd.ones((1, 4)), 'fc_weight': mx.nd.ones((6, 4)),
>>> 'fc_bias': mx.nd.array((1, 4, 4, 4, 5, 6)), 'softmax_label': mx.nd.ones((1))}
>>> args_grad = {'fc_weight': mx.nd.zeros((6, 4)), 'fc_bias': mx.nd.zeros((6))}
>>> texec = net.bind(ctx=mx.cpu(), args=args, args_grad=args_grad)
>>> out = texec.forward(is_train=True)[0].copy()
>>> print out.asnumpy()
[[ 0.00378404 0.07600445 0.07600445 0.07600445 0.20660152 0.5616011 ]]
>>> texec.backward()
>>> print(texec.grad_arrays[1].asnumpy())
[[ 0.00378404 0.00378404 0.00378404 0.00378404]
[-0.92399555 -0.92399555 -0.92399555 -0.92399555]
[ 0.07600445 0.07600445 0.07600445 0.07600445]
[ 0.07600445 0.07600445 0.07600445 0.07600445]
[ 0.20660152 0.20660152 0.20660152 0.20660152]
[ 0.5616011 0.5616011 0.5616011 0.5616011 ]]
>>>
>>> # Example for binding on non-loss function symbol.
>>> # Here the binding symbol is neither built-in loss function
>>> # nor customized loss created by MakeLoss.
>>> # As a result the head gradient is not automatically provided.
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> # c is not a loss function symbol
>>> c = 2 * a + b
>>> args = {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])}
>>> args_grad = {'a': mx.nd.zeros((2)), 'b': mx.nd.zeros((2))}
>>> texec = c.bind(ctx=mx.cpu(), args=args, args_grad=args_grad)
>>> out = texec.forward(is_train=True)[0].copy()
>>> print(out.asnumpy())
[ 4. 7.]
>>> # out_grads is the head gradient in backward pass.
>>> # Here we define 'c' as loss function.
>>> # Then 'out' is passed as head gradient of backward pass.
>>> texec.backward(out)
>>> print(texec.grad_arrays[0].asnumpy())
[ 8. 14.]
>>> print(texec.grad_arrays[1].asnumpy())
[ 4. 7.]
"""
if out_grads is None:
out_grads = []
elif isinstance(out_grads, NDArray):
out_grads = [out_grads]
elif isinstance(out_grads, dict):
out_grads = [out_grads[k] for k in self._symbol.list_outputs()]
for obj in out_grads:
if not isinstance(obj, NDArray):
raise TypeError("inputs must be NDArray")
ndarray = c_handle_array(out_grads)
check_call(_LIB.MXExecutorBackwardEx(
self.handle,
mx_uint(len(out_grads)),
ndarray,
ctypes.c_int(is_train)))
|
python
|
def backward(self, out_grads=None, is_train=True):
"""Do backward pass to get the gradient of arguments.
Parameters
----------
out_grads : NDArray or list of NDArray or dict of str to NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
is_train : bool, default True
Whether this backward is for training or inference. Note that in rare
cases you want to call backward with is_train=False to get gradient
during inference.
Examples
--------
>>> # Example for binding on loss function symbol, which gives the loss value of the model.
>>> # Equivalently it gives the head gradient for backward pass.
>>> # In this example the built-in SoftmaxOutput is used as loss function.
>>> # MakeLoss can be used to define customized loss function symbol.
>>> net = mx.sym.Variable('data')
>>> net = mx.sym.FullyConnected(net, name='fc', num_hidden=6)
>>> net = mx.sym.Activation(net, name='relu', act_type="relu")
>>> net = mx.sym.SoftmaxOutput(net, name='softmax')
>>> args = {'data': mx.nd.ones((1, 4)), 'fc_weight': mx.nd.ones((6, 4)),
>>> 'fc_bias': mx.nd.array((1, 4, 4, 4, 5, 6)), 'softmax_label': mx.nd.ones((1))}
>>> args_grad = {'fc_weight': mx.nd.zeros((6, 4)), 'fc_bias': mx.nd.zeros((6))}
>>> texec = net.bind(ctx=mx.cpu(), args=args, args_grad=args_grad)
>>> out = texec.forward(is_train=True)[0].copy()
>>> print out.asnumpy()
[[ 0.00378404 0.07600445 0.07600445 0.07600445 0.20660152 0.5616011 ]]
>>> texec.backward()
>>> print(texec.grad_arrays[1].asnumpy())
[[ 0.00378404 0.00378404 0.00378404 0.00378404]
[-0.92399555 -0.92399555 -0.92399555 -0.92399555]
[ 0.07600445 0.07600445 0.07600445 0.07600445]
[ 0.07600445 0.07600445 0.07600445 0.07600445]
[ 0.20660152 0.20660152 0.20660152 0.20660152]
[ 0.5616011 0.5616011 0.5616011 0.5616011 ]]
>>>
>>> # Example for binding on non-loss function symbol.
>>> # Here the binding symbol is neither built-in loss function
>>> # nor customized loss created by MakeLoss.
>>> # As a result the head gradient is not automatically provided.
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> # c is not a loss function symbol
>>> c = 2 * a + b
>>> args = {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])}
>>> args_grad = {'a': mx.nd.zeros((2)), 'b': mx.nd.zeros((2))}
>>> texec = c.bind(ctx=mx.cpu(), args=args, args_grad=args_grad)
>>> out = texec.forward(is_train=True)[0].copy()
>>> print(out.asnumpy())
[ 4. 7.]
>>> # out_grads is the head gradient in backward pass.
>>> # Here we define 'c' as loss function.
>>> # Then 'out' is passed as head gradient of backward pass.
>>> texec.backward(out)
>>> print(texec.grad_arrays[0].asnumpy())
[ 8. 14.]
>>> print(texec.grad_arrays[1].asnumpy())
[ 4. 7.]
"""
if out_grads is None:
out_grads = []
elif isinstance(out_grads, NDArray):
out_grads = [out_grads]
elif isinstance(out_grads, dict):
out_grads = [out_grads[k] for k in self._symbol.list_outputs()]
for obj in out_grads:
if not isinstance(obj, NDArray):
raise TypeError("inputs must be NDArray")
ndarray = c_handle_array(out_grads)
check_call(_LIB.MXExecutorBackwardEx(
self.handle,
mx_uint(len(out_grads)),
ndarray,
ctypes.c_int(is_train)))
|
[
"def",
"backward",
"(",
"self",
",",
"out_grads",
"=",
"None",
",",
"is_train",
"=",
"True",
")",
":",
"if",
"out_grads",
"is",
"None",
":",
"out_grads",
"=",
"[",
"]",
"elif",
"isinstance",
"(",
"out_grads",
",",
"NDArray",
")",
":",
"out_grads",
"=",
"[",
"out_grads",
"]",
"elif",
"isinstance",
"(",
"out_grads",
",",
"dict",
")",
":",
"out_grads",
"=",
"[",
"out_grads",
"[",
"k",
"]",
"for",
"k",
"in",
"self",
".",
"_symbol",
".",
"list_outputs",
"(",
")",
"]",
"for",
"obj",
"in",
"out_grads",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"NDArray",
")",
":",
"raise",
"TypeError",
"(",
"\"inputs must be NDArray\"",
")",
"ndarray",
"=",
"c_handle_array",
"(",
"out_grads",
")",
"check_call",
"(",
"_LIB",
".",
"MXExecutorBackwardEx",
"(",
"self",
".",
"handle",
",",
"mx_uint",
"(",
"len",
"(",
"out_grads",
")",
")",
",",
"ndarray",
",",
"ctypes",
".",
"c_int",
"(",
"is_train",
")",
")",
")"
] |
Do backward pass to get the gradient of arguments.
Parameters
----------
out_grads : NDArray or list of NDArray or dict of str to NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
is_train : bool, default True
Whether this backward is for training or inference. Note that in rare
cases you want to call backward with is_train=False to get gradient
during inference.
Examples
--------
>>> # Example for binding on loss function symbol, which gives the loss value of the model.
>>> # Equivalently it gives the head gradient for backward pass.
>>> # In this example the built-in SoftmaxOutput is used as loss function.
>>> # MakeLoss can be used to define customized loss function symbol.
>>> net = mx.sym.Variable('data')
>>> net = mx.sym.FullyConnected(net, name='fc', num_hidden=6)
>>> net = mx.sym.Activation(net, name='relu', act_type="relu")
>>> net = mx.sym.SoftmaxOutput(net, name='softmax')
>>> args = {'data': mx.nd.ones((1, 4)), 'fc_weight': mx.nd.ones((6, 4)),
>>> 'fc_bias': mx.nd.array((1, 4, 4, 4, 5, 6)), 'softmax_label': mx.nd.ones((1))}
>>> args_grad = {'fc_weight': mx.nd.zeros((6, 4)), 'fc_bias': mx.nd.zeros((6))}
>>> texec = net.bind(ctx=mx.cpu(), args=args, args_grad=args_grad)
>>> out = texec.forward(is_train=True)[0].copy()
>>> print out.asnumpy()
[[ 0.00378404 0.07600445 0.07600445 0.07600445 0.20660152 0.5616011 ]]
>>> texec.backward()
>>> print(texec.grad_arrays[1].asnumpy())
[[ 0.00378404 0.00378404 0.00378404 0.00378404]
[-0.92399555 -0.92399555 -0.92399555 -0.92399555]
[ 0.07600445 0.07600445 0.07600445 0.07600445]
[ 0.07600445 0.07600445 0.07600445 0.07600445]
[ 0.20660152 0.20660152 0.20660152 0.20660152]
[ 0.5616011 0.5616011 0.5616011 0.5616011 ]]
>>>
>>> # Example for binding on non-loss function symbol.
>>> # Here the binding symbol is neither built-in loss function
>>> # nor customized loss created by MakeLoss.
>>> # As a result the head gradient is not automatically provided.
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> # c is not a loss function symbol
>>> c = 2 * a + b
>>> args = {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])}
>>> args_grad = {'a': mx.nd.zeros((2)), 'b': mx.nd.zeros((2))}
>>> texec = c.bind(ctx=mx.cpu(), args=args, args_grad=args_grad)
>>> out = texec.forward(is_train=True)[0].copy()
>>> print(out.asnumpy())
[ 4. 7.]
>>> # out_grads is the head gradient in backward pass.
>>> # Here we define 'c' as loss function.
>>> # Then 'out' is passed as head gradient of backward pass.
>>> texec.backward(out)
>>> print(texec.grad_arrays[0].asnumpy())
[ 8. 14.]
>>> print(texec.grad_arrays[1].asnumpy())
[ 4. 7.]
|
[
"Do",
"backward",
"pass",
"to",
"get",
"the",
"gradient",
"of",
"arguments",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L155-L235
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor.set_monitor_callback
|
def set_monitor_callback(self, callback, monitor_all=False):
"""Install callback for monitor.
Parameters
----------
callback : function
Takes a string and an NDArrayHandle.
monitor_all : bool, default False
If true, monitor both input and output, otherwise monitor output only.
Examples
--------
>>> def mon_callback(*args, **kwargs):
>>> print("Do your stuff here.")
>>>
>>> texe.set_monitor_callback(mon_callback)
"""
cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, ctypes.c_void_p)
self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
check_call(_LIB.MXExecutorSetMonitorCallbackEX(
self.handle,
self._monitor_callback,
None,
ctypes.c_int(monitor_all)))
|
python
|
def set_monitor_callback(self, callback, monitor_all=False):
"""Install callback for monitor.
Parameters
----------
callback : function
Takes a string and an NDArrayHandle.
monitor_all : bool, default False
If true, monitor both input and output, otherwise monitor output only.
Examples
--------
>>> def mon_callback(*args, **kwargs):
>>> print("Do your stuff here.")
>>>
>>> texe.set_monitor_callback(mon_callback)
"""
cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, ctypes.c_void_p)
self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
check_call(_LIB.MXExecutorSetMonitorCallbackEX(
self.handle,
self._monitor_callback,
None,
ctypes.c_int(monitor_all)))
|
[
"def",
"set_monitor_callback",
"(",
"self",
",",
"callback",
",",
"monitor_all",
"=",
"False",
")",
":",
"cb_type",
"=",
"ctypes",
".",
"CFUNCTYPE",
"(",
"None",
",",
"ctypes",
".",
"c_char_p",
",",
"NDArrayHandle",
",",
"ctypes",
".",
"c_void_p",
")",
"self",
".",
"_monitor_callback",
"=",
"cb_type",
"(",
"_monitor_callback_wrapper",
"(",
"callback",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXExecutorSetMonitorCallbackEX",
"(",
"self",
".",
"handle",
",",
"self",
".",
"_monitor_callback",
",",
"None",
",",
"ctypes",
".",
"c_int",
"(",
"monitor_all",
")",
")",
")"
] |
Install callback for monitor.
Parameters
----------
callback : function
Takes a string and an NDArrayHandle.
monitor_all : bool, default False
If true, monitor both input and output, otherwise monitor output only.
Examples
--------
>>> def mon_callback(*args, **kwargs):
>>> print("Do your stuff here.")
>>>
>>> texe.set_monitor_callback(mon_callback)
|
[
"Install",
"callback",
"for",
"monitor",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L237-L260
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor.arg_dict
|
def arg_dict(self):
"""Get dictionary representation of argument arrrays.
Returns
-------
arg_dict : dict of str to NDArray
The dictionary that maps the names of arguments to NDArrays.
Raises
------
ValueError : if there are duplicated names in the arguments.
"""
if self._arg_dict is None:
self._arg_dict = Executor._get_dict(
self._symbol.list_arguments(), self.arg_arrays)
return self._arg_dict
|
python
|
def arg_dict(self):
"""Get dictionary representation of argument arrrays.
Returns
-------
arg_dict : dict of str to NDArray
The dictionary that maps the names of arguments to NDArrays.
Raises
------
ValueError : if there are duplicated names in the arguments.
"""
if self._arg_dict is None:
self._arg_dict = Executor._get_dict(
self._symbol.list_arguments(), self.arg_arrays)
return self._arg_dict
|
[
"def",
"arg_dict",
"(",
"self",
")",
":",
"if",
"self",
".",
"_arg_dict",
"is",
"None",
":",
"self",
".",
"_arg_dict",
"=",
"Executor",
".",
"_get_dict",
"(",
"self",
".",
"_symbol",
".",
"list_arguments",
"(",
")",
",",
"self",
".",
"arg_arrays",
")",
"return",
"self",
".",
"_arg_dict"
] |
Get dictionary representation of argument arrrays.
Returns
-------
arg_dict : dict of str to NDArray
The dictionary that maps the names of arguments to NDArrays.
Raises
------
ValueError : if there are duplicated names in the arguments.
|
[
"Get",
"dictionary",
"representation",
"of",
"argument",
"arrrays",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L263-L278
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor.grad_dict
|
def grad_dict(self):
"""Get dictionary representation of gradient arrays.
Returns
-------
grad_dict : dict of str to NDArray
The dictionary that maps name of arguments to gradient arrays.
"""
if self._grad_dict is None:
self._grad_dict = Executor._get_dict(
self._symbol.list_arguments(), self.grad_arrays)
return self._grad_dict
|
python
|
def grad_dict(self):
"""Get dictionary representation of gradient arrays.
Returns
-------
grad_dict : dict of str to NDArray
The dictionary that maps name of arguments to gradient arrays.
"""
if self._grad_dict is None:
self._grad_dict = Executor._get_dict(
self._symbol.list_arguments(), self.grad_arrays)
return self._grad_dict
|
[
"def",
"grad_dict",
"(",
"self",
")",
":",
"if",
"self",
".",
"_grad_dict",
"is",
"None",
":",
"self",
".",
"_grad_dict",
"=",
"Executor",
".",
"_get_dict",
"(",
"self",
".",
"_symbol",
".",
"list_arguments",
"(",
")",
",",
"self",
".",
"grad_arrays",
")",
"return",
"self",
".",
"_grad_dict"
] |
Get dictionary representation of gradient arrays.
Returns
-------
grad_dict : dict of str to NDArray
The dictionary that maps name of arguments to gradient arrays.
|
[
"Get",
"dictionary",
"representation",
"of",
"gradient",
"arrays",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L281-L292
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor.aux_dict
|
def aux_dict(self):
"""Get dictionary representation of auxiliary states arrays.
Returns
-------
aux_dict : dict of str to NDArray
The dictionary that maps name of auxiliary states to NDArrays.
Raises
------
ValueError : if there are duplicated names in the auxiliary states.
"""
if self._aux_dict is None:
self._aux_dict = Executor._get_dict(
self._symbol.list_auxiliary_states(), self.aux_arrays)
return self._aux_dict
|
python
|
def aux_dict(self):
"""Get dictionary representation of auxiliary states arrays.
Returns
-------
aux_dict : dict of str to NDArray
The dictionary that maps name of auxiliary states to NDArrays.
Raises
------
ValueError : if there are duplicated names in the auxiliary states.
"""
if self._aux_dict is None:
self._aux_dict = Executor._get_dict(
self._symbol.list_auxiliary_states(), self.aux_arrays)
return self._aux_dict
|
[
"def",
"aux_dict",
"(",
"self",
")",
":",
"if",
"self",
".",
"_aux_dict",
"is",
"None",
":",
"self",
".",
"_aux_dict",
"=",
"Executor",
".",
"_get_dict",
"(",
"self",
".",
"_symbol",
".",
"list_auxiliary_states",
"(",
")",
",",
"self",
".",
"aux_arrays",
")",
"return",
"self",
".",
"_aux_dict"
] |
Get dictionary representation of auxiliary states arrays.
Returns
-------
aux_dict : dict of str to NDArray
The dictionary that maps name of auxiliary states to NDArrays.
Raises
------
ValueError : if there are duplicated names in the auxiliary states.
|
[
"Get",
"dictionary",
"representation",
"of",
"auxiliary",
"states",
"arrays",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L295-L310
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor.output_dict
|
def output_dict(self):
"""Get dictionary representation of output arrays.
Returns
-------
output_dict : dict of str to NDArray
The dictionary that maps name of output names to NDArrays.
Raises
------
ValueError : if there are duplicated names in the outputs.
"""
if self._output_dict is None:
self._output_dict = Executor._get_dict(
self._symbol.list_outputs(), self.outputs)
return self._output_dict
|
python
|
def output_dict(self):
"""Get dictionary representation of output arrays.
Returns
-------
output_dict : dict of str to NDArray
The dictionary that maps name of output names to NDArrays.
Raises
------
ValueError : if there are duplicated names in the outputs.
"""
if self._output_dict is None:
self._output_dict = Executor._get_dict(
self._symbol.list_outputs(), self.outputs)
return self._output_dict
|
[
"def",
"output_dict",
"(",
"self",
")",
":",
"if",
"self",
".",
"_output_dict",
"is",
"None",
":",
"self",
".",
"_output_dict",
"=",
"Executor",
".",
"_get_dict",
"(",
"self",
".",
"_symbol",
".",
"list_outputs",
"(",
")",
",",
"self",
".",
"outputs",
")",
"return",
"self",
".",
"_output_dict"
] |
Get dictionary representation of output arrays.
Returns
-------
output_dict : dict of str to NDArray
The dictionary that maps name of output names to NDArrays.
Raises
------
ValueError : if there are duplicated names in the outputs.
|
[
"Get",
"dictionary",
"representation",
"of",
"output",
"arrays",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L313-L328
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor.copy_params_from
|
def copy_params_from(self, arg_params, aux_params=None, allow_extra_params=False):
"""Copy parameters from arg_params, aux_params into executor's internal array.
Parameters
----------
arg_params : dict of str to NDArray
Parameters, dict of name to NDArray of arguments.
aux_params : dict of str to NDArray, optional
Parameters, dict of name to NDArray of auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Raises
------
ValueError
If there is additional parameters in the dict but ``allow_extra_params=False``.
Examples
--------
>>> # set parameters with existing model checkpoint
>>> model_prefix = 'mx_mlp'
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0)
>>> texec.copy_params_from(arg_params, aux_params)
"""
for name, array in arg_params.items():
if name in self.arg_dict:
dst = self.arg_dict[name]
array.astype(dst.dtype).copyto(dst)
elif not allow_extra_params:
raise ValueError('Find name \"%s\" that is not in the arguments' % name)
if aux_params is None:
return
for name, array in aux_params.items():
if name in self.aux_dict:
dst = self.aux_dict[name]
array.astype(dst.dtype).copyto(dst)
elif not allow_extra_params:
raise ValueError('Find name %s that is not in the auxiliary states' % name)
|
python
|
def copy_params_from(self, arg_params, aux_params=None, allow_extra_params=False):
"""Copy parameters from arg_params, aux_params into executor's internal array.
Parameters
----------
arg_params : dict of str to NDArray
Parameters, dict of name to NDArray of arguments.
aux_params : dict of str to NDArray, optional
Parameters, dict of name to NDArray of auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Raises
------
ValueError
If there is additional parameters in the dict but ``allow_extra_params=False``.
Examples
--------
>>> # set parameters with existing model checkpoint
>>> model_prefix = 'mx_mlp'
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0)
>>> texec.copy_params_from(arg_params, aux_params)
"""
for name, array in arg_params.items():
if name in self.arg_dict:
dst = self.arg_dict[name]
array.astype(dst.dtype).copyto(dst)
elif not allow_extra_params:
raise ValueError('Find name \"%s\" that is not in the arguments' % name)
if aux_params is None:
return
for name, array in aux_params.items():
if name in self.aux_dict:
dst = self.aux_dict[name]
array.astype(dst.dtype).copyto(dst)
elif not allow_extra_params:
raise ValueError('Find name %s that is not in the auxiliary states' % name)
|
[
"def",
"copy_params_from",
"(",
"self",
",",
"arg_params",
",",
"aux_params",
"=",
"None",
",",
"allow_extra_params",
"=",
"False",
")",
":",
"for",
"name",
",",
"array",
"in",
"arg_params",
".",
"items",
"(",
")",
":",
"if",
"name",
"in",
"self",
".",
"arg_dict",
":",
"dst",
"=",
"self",
".",
"arg_dict",
"[",
"name",
"]",
"array",
".",
"astype",
"(",
"dst",
".",
"dtype",
")",
".",
"copyto",
"(",
"dst",
")",
"elif",
"not",
"allow_extra_params",
":",
"raise",
"ValueError",
"(",
"'Find name \\\"%s\\\" that is not in the arguments'",
"%",
"name",
")",
"if",
"aux_params",
"is",
"None",
":",
"return",
"for",
"name",
",",
"array",
"in",
"aux_params",
".",
"items",
"(",
")",
":",
"if",
"name",
"in",
"self",
".",
"aux_dict",
":",
"dst",
"=",
"self",
".",
"aux_dict",
"[",
"name",
"]",
"array",
".",
"astype",
"(",
"dst",
".",
"dtype",
")",
".",
"copyto",
"(",
"dst",
")",
"elif",
"not",
"allow_extra_params",
":",
"raise",
"ValueError",
"(",
"'Find name %s that is not in the auxiliary states'",
"%",
"name",
")"
] |
Copy parameters from arg_params, aux_params into executor's internal array.
Parameters
----------
arg_params : dict of str to NDArray
Parameters, dict of name to NDArray of arguments.
aux_params : dict of str to NDArray, optional
Parameters, dict of name to NDArray of auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Raises
------
ValueError
If there is additional parameters in the dict but ``allow_extra_params=False``.
Examples
--------
>>> # set parameters with existing model checkpoint
>>> model_prefix = 'mx_mlp'
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0)
>>> texec.copy_params_from(arg_params, aux_params)
|
[
"Copy",
"parameters",
"from",
"arg_params",
"aux_params",
"into",
"executor",
"s",
"internal",
"array",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L330-L373
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor.reshape
|
def reshape(self, partial_shaping=False, allow_up_sizing=False, **kwargs):
"""Return a new executor with the same symbol and shared memory,
but different input/output shapes.
For runtime reshaping, variable length sequences, etc.
The returned executor shares state with the current one,
and cannot be used in parallel with it.
Parameters
----------
partial_shaping : bool
Whether to allow changing the shape of unspecified arguments.
allow_up_sizing : bool
Whether to allow allocating new ndarrays that's larger than the original.
kwargs : dict of string to tuple of int
New shape for arguments.
Returns
-------
exec : Executor
A new executor that shares memory with self.
Examples
--------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = 2 * a + b
>>> texec = c.bind(mx.cpu(), {'a': mx.nd.zeros((2, 1)), 'b': mx.nd.ones((2,1))})
>>> new_shape = {'a': (4, 2), 'b': (4, 2)}
>>> texec.reshape(allow_up_sizing=True, **new_shape)
"""
# pylint: disable=too-many-branches
provided_arg_shape_data = [] # shape data
# argument shape index in sdata,
# e.g. [sdata[indptr[0]], sdata[indptr[1]]) is the shape of the first arg
provided_arg_shape_idx = [0]
provided_arg_shape_names = [] # provided argument names
for k, v in kwargs.items():
if isinstance(v, tuple):
provided_arg_shape_names.append(k)
provided_arg_shape_data.extend(v)
provided_arg_shape_idx.append(len(provided_arg_shape_data))
ctx_map_keys = []
ctx_map_dev_types = []
ctx_map_dev_ids = []
if self._group2ctx:
for key, val in self._group2ctx.items():
ctx_map_keys.append(key)
ctx_map_dev_types.append(val.device_typeid)
ctx_map_dev_ids.append(val.device_id)
handle = ExecutorHandle()
shared_handle = self.handle
num_in_args = ctypes.c_uint()
in_arg_handles = ctypes.POINTER(NDArrayHandle)()
arg_grad_handles = ctypes.POINTER(NDArrayHandle)()
num_aux_states = ctypes.c_uint()
aux_state_handles = ctypes.POINTER(NDArrayHandle)()
check_call(_LIB.MXExecutorReshapeEx(ctypes.c_int(int(partial_shaping)),
ctypes.c_int(int(allow_up_sizing)),
ctypes.c_int(self._ctx.device_typeid),
ctypes.c_int(self._ctx.device_id),
mx_uint(len(ctx_map_keys)),
c_str_array(ctx_map_keys),
c_array_buf(ctypes.c_int,
py_array('i', ctx_map_dev_types)),
c_array_buf(ctypes.c_int,
py_array('i', ctx_map_dev_ids)),
mx_uint(len(provided_arg_shape_names)),
c_str_array(provided_arg_shape_names),
c_array_buf(mx_int,
py_array('i', provided_arg_shape_data)),
c_array_buf(mx_uint,
py_array('I', provided_arg_shape_idx)),
ctypes.byref(num_in_args),
ctypes.byref(in_arg_handles),
ctypes.byref(arg_grad_handles),
ctypes.byref(num_aux_states),
ctypes.byref(aux_state_handles),
shared_handle,
ctypes.byref(handle)))
arg_arrays = [_ndarray_cls(NDArrayHandle(in_arg_handles[i]))
for i in range(num_in_args.value)]
grad_arrays = [_ndarray_cls(NDArrayHandle(arg_grad_handles[i]))
if arg_grad_handles[i] is not None
else None for i in range(num_in_args.value)]
aux_arrays = [_ndarray_cls(NDArrayHandle(aux_state_handles[i]))
for i in range(num_aux_states.value)]
executor = Executor(handle, self._symbol, self._ctx, self._grad_req, self._group2ctx)
executor.arg_arrays = arg_arrays
executor.grad_arrays = grad_arrays
executor.aux_arrays = aux_arrays
return executor
|
python
|
def reshape(self, partial_shaping=False, allow_up_sizing=False, **kwargs):
"""Return a new executor with the same symbol and shared memory,
but different input/output shapes.
For runtime reshaping, variable length sequences, etc.
The returned executor shares state with the current one,
and cannot be used in parallel with it.
Parameters
----------
partial_shaping : bool
Whether to allow changing the shape of unspecified arguments.
allow_up_sizing : bool
Whether to allow allocating new ndarrays that's larger than the original.
kwargs : dict of string to tuple of int
New shape for arguments.
Returns
-------
exec : Executor
A new executor that shares memory with self.
Examples
--------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = 2 * a + b
>>> texec = c.bind(mx.cpu(), {'a': mx.nd.zeros((2, 1)), 'b': mx.nd.ones((2,1))})
>>> new_shape = {'a': (4, 2), 'b': (4, 2)}
>>> texec.reshape(allow_up_sizing=True, **new_shape)
"""
# pylint: disable=too-many-branches
provided_arg_shape_data = [] # shape data
# argument shape index in sdata,
# e.g. [sdata[indptr[0]], sdata[indptr[1]]) is the shape of the first arg
provided_arg_shape_idx = [0]
provided_arg_shape_names = [] # provided argument names
for k, v in kwargs.items():
if isinstance(v, tuple):
provided_arg_shape_names.append(k)
provided_arg_shape_data.extend(v)
provided_arg_shape_idx.append(len(provided_arg_shape_data))
ctx_map_keys = []
ctx_map_dev_types = []
ctx_map_dev_ids = []
if self._group2ctx:
for key, val in self._group2ctx.items():
ctx_map_keys.append(key)
ctx_map_dev_types.append(val.device_typeid)
ctx_map_dev_ids.append(val.device_id)
handle = ExecutorHandle()
shared_handle = self.handle
num_in_args = ctypes.c_uint()
in_arg_handles = ctypes.POINTER(NDArrayHandle)()
arg_grad_handles = ctypes.POINTER(NDArrayHandle)()
num_aux_states = ctypes.c_uint()
aux_state_handles = ctypes.POINTER(NDArrayHandle)()
check_call(_LIB.MXExecutorReshapeEx(ctypes.c_int(int(partial_shaping)),
ctypes.c_int(int(allow_up_sizing)),
ctypes.c_int(self._ctx.device_typeid),
ctypes.c_int(self._ctx.device_id),
mx_uint(len(ctx_map_keys)),
c_str_array(ctx_map_keys),
c_array_buf(ctypes.c_int,
py_array('i', ctx_map_dev_types)),
c_array_buf(ctypes.c_int,
py_array('i', ctx_map_dev_ids)),
mx_uint(len(provided_arg_shape_names)),
c_str_array(provided_arg_shape_names),
c_array_buf(mx_int,
py_array('i', provided_arg_shape_data)),
c_array_buf(mx_uint,
py_array('I', provided_arg_shape_idx)),
ctypes.byref(num_in_args),
ctypes.byref(in_arg_handles),
ctypes.byref(arg_grad_handles),
ctypes.byref(num_aux_states),
ctypes.byref(aux_state_handles),
shared_handle,
ctypes.byref(handle)))
arg_arrays = [_ndarray_cls(NDArrayHandle(in_arg_handles[i]))
for i in range(num_in_args.value)]
grad_arrays = [_ndarray_cls(NDArrayHandle(arg_grad_handles[i]))
if arg_grad_handles[i] is not None
else None for i in range(num_in_args.value)]
aux_arrays = [_ndarray_cls(NDArrayHandle(aux_state_handles[i]))
for i in range(num_aux_states.value)]
executor = Executor(handle, self._symbol, self._ctx, self._grad_req, self._group2ctx)
executor.arg_arrays = arg_arrays
executor.grad_arrays = grad_arrays
executor.aux_arrays = aux_arrays
return executor
|
[
"def",
"reshape",
"(",
"self",
",",
"partial_shaping",
"=",
"False",
",",
"allow_up_sizing",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=too-many-branches",
"provided_arg_shape_data",
"=",
"[",
"]",
"# shape data",
"# argument shape index in sdata,",
"# e.g. [sdata[indptr[0]], sdata[indptr[1]]) is the shape of the first arg",
"provided_arg_shape_idx",
"=",
"[",
"0",
"]",
"provided_arg_shape_names",
"=",
"[",
"]",
"# provided argument names",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"tuple",
")",
":",
"provided_arg_shape_names",
".",
"append",
"(",
"k",
")",
"provided_arg_shape_data",
".",
"extend",
"(",
"v",
")",
"provided_arg_shape_idx",
".",
"append",
"(",
"len",
"(",
"provided_arg_shape_data",
")",
")",
"ctx_map_keys",
"=",
"[",
"]",
"ctx_map_dev_types",
"=",
"[",
"]",
"ctx_map_dev_ids",
"=",
"[",
"]",
"if",
"self",
".",
"_group2ctx",
":",
"for",
"key",
",",
"val",
"in",
"self",
".",
"_group2ctx",
".",
"items",
"(",
")",
":",
"ctx_map_keys",
".",
"append",
"(",
"key",
")",
"ctx_map_dev_types",
".",
"append",
"(",
"val",
".",
"device_typeid",
")",
"ctx_map_dev_ids",
".",
"append",
"(",
"val",
".",
"device_id",
")",
"handle",
"=",
"ExecutorHandle",
"(",
")",
"shared_handle",
"=",
"self",
".",
"handle",
"num_in_args",
"=",
"ctypes",
".",
"c_uint",
"(",
")",
"in_arg_handles",
"=",
"ctypes",
".",
"POINTER",
"(",
"NDArrayHandle",
")",
"(",
")",
"arg_grad_handles",
"=",
"ctypes",
".",
"POINTER",
"(",
"NDArrayHandle",
")",
"(",
")",
"num_aux_states",
"=",
"ctypes",
".",
"c_uint",
"(",
")",
"aux_state_handles",
"=",
"ctypes",
".",
"POINTER",
"(",
"NDArrayHandle",
")",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXExecutorReshapeEx",
"(",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"partial_shaping",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"allow_up_sizing",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"self",
".",
"_ctx",
".",
"device_typeid",
")",
",",
"ctypes",
".",
"c_int",
"(",
"self",
".",
"_ctx",
".",
"device_id",
")",
",",
"mx_uint",
"(",
"len",
"(",
"ctx_map_keys",
")",
")",
",",
"c_str_array",
"(",
"ctx_map_keys",
")",
",",
"c_array_buf",
"(",
"ctypes",
".",
"c_int",
",",
"py_array",
"(",
"'i'",
",",
"ctx_map_dev_types",
")",
")",
",",
"c_array_buf",
"(",
"ctypes",
".",
"c_int",
",",
"py_array",
"(",
"'i'",
",",
"ctx_map_dev_ids",
")",
")",
",",
"mx_uint",
"(",
"len",
"(",
"provided_arg_shape_names",
")",
")",
",",
"c_str_array",
"(",
"provided_arg_shape_names",
")",
",",
"c_array_buf",
"(",
"mx_int",
",",
"py_array",
"(",
"'i'",
",",
"provided_arg_shape_data",
")",
")",
",",
"c_array_buf",
"(",
"mx_uint",
",",
"py_array",
"(",
"'I'",
",",
"provided_arg_shape_idx",
")",
")",
",",
"ctypes",
".",
"byref",
"(",
"num_in_args",
")",
",",
"ctypes",
".",
"byref",
"(",
"in_arg_handles",
")",
",",
"ctypes",
".",
"byref",
"(",
"arg_grad_handles",
")",
",",
"ctypes",
".",
"byref",
"(",
"num_aux_states",
")",
",",
"ctypes",
".",
"byref",
"(",
"aux_state_handles",
")",
",",
"shared_handle",
",",
"ctypes",
".",
"byref",
"(",
"handle",
")",
")",
")",
"arg_arrays",
"=",
"[",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"in_arg_handles",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_in_args",
".",
"value",
")",
"]",
"grad_arrays",
"=",
"[",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"arg_grad_handles",
"[",
"i",
"]",
")",
")",
"if",
"arg_grad_handles",
"[",
"i",
"]",
"is",
"not",
"None",
"else",
"None",
"for",
"i",
"in",
"range",
"(",
"num_in_args",
".",
"value",
")",
"]",
"aux_arrays",
"=",
"[",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"aux_state_handles",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_aux_states",
".",
"value",
")",
"]",
"executor",
"=",
"Executor",
"(",
"handle",
",",
"self",
".",
"_symbol",
",",
"self",
".",
"_ctx",
",",
"self",
".",
"_grad_req",
",",
"self",
".",
"_group2ctx",
")",
"executor",
".",
"arg_arrays",
"=",
"arg_arrays",
"executor",
".",
"grad_arrays",
"=",
"grad_arrays",
"executor",
".",
"aux_arrays",
"=",
"aux_arrays",
"return",
"executor"
] |
Return a new executor with the same symbol and shared memory,
but different input/output shapes.
For runtime reshaping, variable length sequences, etc.
The returned executor shares state with the current one,
and cannot be used in parallel with it.
Parameters
----------
partial_shaping : bool
Whether to allow changing the shape of unspecified arguments.
allow_up_sizing : bool
Whether to allow allocating new ndarrays that's larger than the original.
kwargs : dict of string to tuple of int
New shape for arguments.
Returns
-------
exec : Executor
A new executor that shares memory with self.
Examples
--------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = 2 * a + b
>>> texec = c.bind(mx.cpu(), {'a': mx.nd.zeros((2, 1)), 'b': mx.nd.ones((2,1))})
>>> new_shape = {'a': (4, 2), 'b': (4, 2)}
>>> texec.reshape(allow_up_sizing=True, **new_shape)
|
[
"Return",
"a",
"new",
"executor",
"with",
"the",
"same",
"symbol",
"and",
"shared",
"memory",
"but",
"different",
"input",
"/",
"output",
"shapes",
".",
"For",
"runtime",
"reshaping",
"variable",
"length",
"sequences",
"etc",
".",
"The",
"returned",
"executor",
"shares",
"state",
"with",
"the",
"current",
"one",
"and",
"cannot",
"be",
"used",
"in",
"parallel",
"with",
"it",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L375-L472
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor.py
|
Executor.debug_str
|
def debug_str(self):
"""Get a debug string about internal execution plan.
Returns
-------
debug_str : string
Debug string of the executor.
Examples
--------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.sin(a)
>>> c = 2 * a + b
>>> texec = c.bind(mx.cpu(), {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])})
>>> print(texec.debug_str())
Symbol Outputs:
output[0]=_plus0(0)
Variable:a
--------------------
Op:_mul_scalar, Name=_mulscalar0
Inputs:
arg[0]=a(0) version=0
Attrs:
scalar=2
--------------------
Op:sin, Name=sin0
Inputs:
arg[0]=a(0) version=0
--------------------
Op:elemwise_add, Name=_plus0
Inputs:
arg[0]=_mulscalar0(0)
arg[1]=sin0(0)
Total 0 MB allocated
Total 11 TempSpace resource requested
"""
debug_str = ctypes.c_char_p()
check_call(_LIB.MXExecutorPrint(
self.handle, ctypes.byref(debug_str)))
return py_str(debug_str.value)
|
python
|
def debug_str(self):
"""Get a debug string about internal execution plan.
Returns
-------
debug_str : string
Debug string of the executor.
Examples
--------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.sin(a)
>>> c = 2 * a + b
>>> texec = c.bind(mx.cpu(), {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])})
>>> print(texec.debug_str())
Symbol Outputs:
output[0]=_plus0(0)
Variable:a
--------------------
Op:_mul_scalar, Name=_mulscalar0
Inputs:
arg[0]=a(0) version=0
Attrs:
scalar=2
--------------------
Op:sin, Name=sin0
Inputs:
arg[0]=a(0) version=0
--------------------
Op:elemwise_add, Name=_plus0
Inputs:
arg[0]=_mulscalar0(0)
arg[1]=sin0(0)
Total 0 MB allocated
Total 11 TempSpace resource requested
"""
debug_str = ctypes.c_char_p()
check_call(_LIB.MXExecutorPrint(
self.handle, ctypes.byref(debug_str)))
return py_str(debug_str.value)
|
[
"def",
"debug_str",
"(",
"self",
")",
":",
"debug_str",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXExecutorPrint",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"debug_str",
")",
")",
")",
"return",
"py_str",
"(",
"debug_str",
".",
"value",
")"
] |
Get a debug string about internal execution plan.
Returns
-------
debug_str : string
Debug string of the executor.
Examples
--------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.sin(a)
>>> c = 2 * a + b
>>> texec = c.bind(mx.cpu(), {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])})
>>> print(texec.debug_str())
Symbol Outputs:
output[0]=_plus0(0)
Variable:a
--------------------
Op:_mul_scalar, Name=_mulscalar0
Inputs:
arg[0]=a(0) version=0
Attrs:
scalar=2
--------------------
Op:sin, Name=sin0
Inputs:
arg[0]=a(0) version=0
--------------------
Op:elemwise_add, Name=_plus0
Inputs:
arg[0]=_mulscalar0(0)
arg[1]=sin0(0)
Total 0 MB allocated
Total 11 TempSpace resource requested
|
[
"Get",
"a",
"debug",
"string",
"about",
"internal",
"execution",
"plan",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L474-L513
|
train
|
apache/incubator-mxnet
|
example/ssd/evaluate/eval_voc.py
|
parse_voc_rec
|
def parse_voc_rec(filename):
"""
parse pascal voc record into a dictionary
:param filename: xml file path
:return: list of dict
"""
import xml.etree.ElementTree as ET
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_dict = dict()
obj_dict['name'] = obj.find('name').text
obj_dict['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_dict['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_dict)
return objects
|
python
|
def parse_voc_rec(filename):
"""
parse pascal voc record into a dictionary
:param filename: xml file path
:return: list of dict
"""
import xml.etree.ElementTree as ET
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_dict = dict()
obj_dict['name'] = obj.find('name').text
obj_dict['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_dict['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_dict)
return objects
|
[
"def",
"parse_voc_rec",
"(",
"filename",
")",
":",
"import",
"xml",
".",
"etree",
".",
"ElementTree",
"as",
"ET",
"tree",
"=",
"ET",
".",
"parse",
"(",
"filename",
")",
"objects",
"=",
"[",
"]",
"for",
"obj",
"in",
"tree",
".",
"findall",
"(",
"'object'",
")",
":",
"obj_dict",
"=",
"dict",
"(",
")",
"obj_dict",
"[",
"'name'",
"]",
"=",
"obj",
".",
"find",
"(",
"'name'",
")",
".",
"text",
"obj_dict",
"[",
"'difficult'",
"]",
"=",
"int",
"(",
"obj",
".",
"find",
"(",
"'difficult'",
")",
".",
"text",
")",
"bbox",
"=",
"obj",
".",
"find",
"(",
"'bndbox'",
")",
"obj_dict",
"[",
"'bbox'",
"]",
"=",
"[",
"int",
"(",
"bbox",
".",
"find",
"(",
"'xmin'",
")",
".",
"text",
")",
",",
"int",
"(",
"bbox",
".",
"find",
"(",
"'ymin'",
")",
".",
"text",
")",
",",
"int",
"(",
"bbox",
".",
"find",
"(",
"'xmax'",
")",
".",
"text",
")",
",",
"int",
"(",
"bbox",
".",
"find",
"(",
"'ymax'",
")",
".",
"text",
")",
"]",
"objects",
".",
"append",
"(",
"obj_dict",
")",
"return",
"objects"
] |
parse pascal voc record into a dictionary
:param filename: xml file path
:return: list of dict
|
[
"parse",
"pascal",
"voc",
"record",
"into",
"a",
"dictionary",
":",
"param",
"filename",
":",
"xml",
"file",
"path",
":",
"return",
":",
"list",
"of",
"dict"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_voc.py#L30-L49
|
train
|
apache/incubator-mxnet
|
example/ssd/evaluate/eval_voc.py
|
voc_eval
|
def voc_eval(detpath, annopath, imageset_file, classname, cache_dir, ovthresh=0.5, use_07_metric=False):
"""
pascal voc evaluation
:param detpath: detection results detpath.format(classname)
:param annopath: annotations annopath.format(classname)
:param imageset_file: text file containing list of images
:param classname: category name
:param cache_dir: caching annotations
:param ovthresh: overlap threshold
:param use_07_metric: whether to use voc07's 11 point ap computation
:return: rec, prec, ap
"""
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
cache_file = os.path.join(cache_dir, 'annotations.pkl')
with open(imageset_file, 'r') as f:
lines = f.readlines()
image_filenames = [x.strip() for x in lines]
# load annotations from cache
if not os.path.isfile(cache_file):
recs = {}
for ind, image_filename in enumerate(image_filenames):
recs[image_filename] = parse_voc_rec(annopath.format(image_filename))
if ind % 100 == 0:
print('reading annotations for {:d}/{:d}'.format(ind + 1, len(image_filenames)))
print('saving annotations cache to {:s}'.format(cache_file))
with open(cache_file, 'wb') as f:
pickle.dump(recs, f)
else:
with open(cache_file, 'rb') as f:
recs = pickle.load(f)
# extract objects in :param classname:
class_recs = {}
npos = 0
for image_filename in image_filenames:
objects = [obj for obj in recs[image_filename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in objects])
difficult = np.array([x['difficult'] for x in objects]).astype(np.bool)
det = [False] * len(objects) # stand for detected
npos = npos + sum(~difficult)
class_recs[image_filename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read detections
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
bbox = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_inds = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
bbox = bbox[sorted_inds, :]
image_ids = [image_ids[x] for x in sorted_inds]
# go down detections and mark true positives and false positives
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
r = class_recs[image_ids[d]]
bb = bbox[d, :].astype(float)
ovmax = -np.inf
bbgt = r['bbox'].astype(float)
if bbgt.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(bbgt[:, 0], bb[0])
iymin = np.maximum(bbgt[:, 1], bb[1])
ixmax = np.minimum(bbgt[:, 2], bb[2])
iymax = np.minimum(bbgt[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(bbgt[:, 2] - bbgt[:, 0] + 1.) *
(bbgt[:, 3] - bbgt[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not r['difficult'][jmax]:
if not r['det'][jmax]:
tp[d] = 1.
r['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid division by zero in case first detection matches a difficult ground ruth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
|
python
|
def voc_eval(detpath, annopath, imageset_file, classname, cache_dir, ovthresh=0.5, use_07_metric=False):
"""
pascal voc evaluation
:param detpath: detection results detpath.format(classname)
:param annopath: annotations annopath.format(classname)
:param imageset_file: text file containing list of images
:param classname: category name
:param cache_dir: caching annotations
:param ovthresh: overlap threshold
:param use_07_metric: whether to use voc07's 11 point ap computation
:return: rec, prec, ap
"""
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
cache_file = os.path.join(cache_dir, 'annotations.pkl')
with open(imageset_file, 'r') as f:
lines = f.readlines()
image_filenames = [x.strip() for x in lines]
# load annotations from cache
if not os.path.isfile(cache_file):
recs = {}
for ind, image_filename in enumerate(image_filenames):
recs[image_filename] = parse_voc_rec(annopath.format(image_filename))
if ind % 100 == 0:
print('reading annotations for {:d}/{:d}'.format(ind + 1, len(image_filenames)))
print('saving annotations cache to {:s}'.format(cache_file))
with open(cache_file, 'wb') as f:
pickle.dump(recs, f)
else:
with open(cache_file, 'rb') as f:
recs = pickle.load(f)
# extract objects in :param classname:
class_recs = {}
npos = 0
for image_filename in image_filenames:
objects = [obj for obj in recs[image_filename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in objects])
difficult = np.array([x['difficult'] for x in objects]).astype(np.bool)
det = [False] * len(objects) # stand for detected
npos = npos + sum(~difficult)
class_recs[image_filename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read detections
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
bbox = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_inds = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
bbox = bbox[sorted_inds, :]
image_ids = [image_ids[x] for x in sorted_inds]
# go down detections and mark true positives and false positives
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
r = class_recs[image_ids[d]]
bb = bbox[d, :].astype(float)
ovmax = -np.inf
bbgt = r['bbox'].astype(float)
if bbgt.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(bbgt[:, 0], bb[0])
iymin = np.maximum(bbgt[:, 1], bb[1])
ixmax = np.minimum(bbgt[:, 2], bb[2])
iymax = np.minimum(bbgt[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(bbgt[:, 2] - bbgt[:, 0] + 1.) *
(bbgt[:, 3] - bbgt[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not r['difficult'][jmax]:
if not r['det'][jmax]:
tp[d] = 1.
r['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid division by zero in case first detection matches a difficult ground ruth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
|
[
"def",
"voc_eval",
"(",
"detpath",
",",
"annopath",
",",
"imageset_file",
",",
"classname",
",",
"cache_dir",
",",
"ovthresh",
"=",
"0.5",
",",
"use_07_metric",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"cache_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"cache_dir",
")",
"cache_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cache_dir",
",",
"'annotations.pkl'",
")",
"with",
"open",
"(",
"imageset_file",
",",
"'r'",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"image_filenames",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"lines",
"]",
"# load annotations from cache",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"cache_file",
")",
":",
"recs",
"=",
"{",
"}",
"for",
"ind",
",",
"image_filename",
"in",
"enumerate",
"(",
"image_filenames",
")",
":",
"recs",
"[",
"image_filename",
"]",
"=",
"parse_voc_rec",
"(",
"annopath",
".",
"format",
"(",
"image_filename",
")",
")",
"if",
"ind",
"%",
"100",
"==",
"0",
":",
"print",
"(",
"'reading annotations for {:d}/{:d}'",
".",
"format",
"(",
"ind",
"+",
"1",
",",
"len",
"(",
"image_filenames",
")",
")",
")",
"print",
"(",
"'saving annotations cache to {:s}'",
".",
"format",
"(",
"cache_file",
")",
")",
"with",
"open",
"(",
"cache_file",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"recs",
",",
"f",
")",
"else",
":",
"with",
"open",
"(",
"cache_file",
",",
"'rb'",
")",
"as",
"f",
":",
"recs",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"# extract objects in :param classname:",
"class_recs",
"=",
"{",
"}",
"npos",
"=",
"0",
"for",
"image_filename",
"in",
"image_filenames",
":",
"objects",
"=",
"[",
"obj",
"for",
"obj",
"in",
"recs",
"[",
"image_filename",
"]",
"if",
"obj",
"[",
"'name'",
"]",
"==",
"classname",
"]",
"bbox",
"=",
"np",
".",
"array",
"(",
"[",
"x",
"[",
"'bbox'",
"]",
"for",
"x",
"in",
"objects",
"]",
")",
"difficult",
"=",
"np",
".",
"array",
"(",
"[",
"x",
"[",
"'difficult'",
"]",
"for",
"x",
"in",
"objects",
"]",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"det",
"=",
"[",
"False",
"]",
"*",
"len",
"(",
"objects",
")",
"# stand for detected",
"npos",
"=",
"npos",
"+",
"sum",
"(",
"~",
"difficult",
")",
"class_recs",
"[",
"image_filename",
"]",
"=",
"{",
"'bbox'",
":",
"bbox",
",",
"'difficult'",
":",
"difficult",
",",
"'det'",
":",
"det",
"}",
"# read detections",
"detfile",
"=",
"detpath",
".",
"format",
"(",
"classname",
")",
"with",
"open",
"(",
"detfile",
",",
"'r'",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"splitlines",
"=",
"[",
"x",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
")",
"for",
"x",
"in",
"lines",
"]",
"image_ids",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"splitlines",
"]",
"confidence",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"x",
"[",
"1",
"]",
")",
"for",
"x",
"in",
"splitlines",
"]",
")",
"bbox",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"float",
"(",
"z",
")",
"for",
"z",
"in",
"x",
"[",
"2",
":",
"]",
"]",
"for",
"x",
"in",
"splitlines",
"]",
")",
"# sort by confidence",
"sorted_inds",
"=",
"np",
".",
"argsort",
"(",
"-",
"confidence",
")",
"sorted_scores",
"=",
"np",
".",
"sort",
"(",
"-",
"confidence",
")",
"bbox",
"=",
"bbox",
"[",
"sorted_inds",
",",
":",
"]",
"image_ids",
"=",
"[",
"image_ids",
"[",
"x",
"]",
"for",
"x",
"in",
"sorted_inds",
"]",
"# go down detections and mark true positives and false positives",
"nd",
"=",
"len",
"(",
"image_ids",
")",
"tp",
"=",
"np",
".",
"zeros",
"(",
"nd",
")",
"fp",
"=",
"np",
".",
"zeros",
"(",
"nd",
")",
"for",
"d",
"in",
"range",
"(",
"nd",
")",
":",
"r",
"=",
"class_recs",
"[",
"image_ids",
"[",
"d",
"]",
"]",
"bb",
"=",
"bbox",
"[",
"d",
",",
":",
"]",
".",
"astype",
"(",
"float",
")",
"ovmax",
"=",
"-",
"np",
".",
"inf",
"bbgt",
"=",
"r",
"[",
"'bbox'",
"]",
".",
"astype",
"(",
"float",
")",
"if",
"bbgt",
".",
"size",
">",
"0",
":",
"# compute overlaps",
"# intersection",
"ixmin",
"=",
"np",
".",
"maximum",
"(",
"bbgt",
"[",
":",
",",
"0",
"]",
",",
"bb",
"[",
"0",
"]",
")",
"iymin",
"=",
"np",
".",
"maximum",
"(",
"bbgt",
"[",
":",
",",
"1",
"]",
",",
"bb",
"[",
"1",
"]",
")",
"ixmax",
"=",
"np",
".",
"minimum",
"(",
"bbgt",
"[",
":",
",",
"2",
"]",
",",
"bb",
"[",
"2",
"]",
")",
"iymax",
"=",
"np",
".",
"minimum",
"(",
"bbgt",
"[",
":",
",",
"3",
"]",
",",
"bb",
"[",
"3",
"]",
")",
"iw",
"=",
"np",
".",
"maximum",
"(",
"ixmax",
"-",
"ixmin",
"+",
"1.",
",",
"0.",
")",
"ih",
"=",
"np",
".",
"maximum",
"(",
"iymax",
"-",
"iymin",
"+",
"1.",
",",
"0.",
")",
"inters",
"=",
"iw",
"*",
"ih",
"# union",
"uni",
"=",
"(",
"(",
"bb",
"[",
"2",
"]",
"-",
"bb",
"[",
"0",
"]",
"+",
"1.",
")",
"*",
"(",
"bb",
"[",
"3",
"]",
"-",
"bb",
"[",
"1",
"]",
"+",
"1.",
")",
"+",
"(",
"bbgt",
"[",
":",
",",
"2",
"]",
"-",
"bbgt",
"[",
":",
",",
"0",
"]",
"+",
"1.",
")",
"*",
"(",
"bbgt",
"[",
":",
",",
"3",
"]",
"-",
"bbgt",
"[",
":",
",",
"1",
"]",
"+",
"1.",
")",
"-",
"inters",
")",
"overlaps",
"=",
"inters",
"/",
"uni",
"ovmax",
"=",
"np",
".",
"max",
"(",
"overlaps",
")",
"jmax",
"=",
"np",
".",
"argmax",
"(",
"overlaps",
")",
"if",
"ovmax",
">",
"ovthresh",
":",
"if",
"not",
"r",
"[",
"'difficult'",
"]",
"[",
"jmax",
"]",
":",
"if",
"not",
"r",
"[",
"'det'",
"]",
"[",
"jmax",
"]",
":",
"tp",
"[",
"d",
"]",
"=",
"1.",
"r",
"[",
"'det'",
"]",
"[",
"jmax",
"]",
"=",
"1",
"else",
":",
"fp",
"[",
"d",
"]",
"=",
"1.",
"else",
":",
"fp",
"[",
"d",
"]",
"=",
"1.",
"# compute precision recall",
"fp",
"=",
"np",
".",
"cumsum",
"(",
"fp",
")",
"tp",
"=",
"np",
".",
"cumsum",
"(",
"tp",
")",
"rec",
"=",
"tp",
"/",
"float",
"(",
"npos",
")",
"# avoid division by zero in case first detection matches a difficult ground ruth",
"prec",
"=",
"tp",
"/",
"np",
".",
"maximum",
"(",
"tp",
"+",
"fp",
",",
"np",
".",
"finfo",
"(",
"np",
".",
"float64",
")",
".",
"eps",
")",
"ap",
"=",
"voc_ap",
"(",
"rec",
",",
"prec",
",",
"use_07_metric",
")",
"return",
"rec",
",",
"prec",
",",
"ap"
] |
pascal voc evaluation
:param detpath: detection results detpath.format(classname)
:param annopath: annotations annopath.format(classname)
:param imageset_file: text file containing list of images
:param classname: category name
:param cache_dir: caching annotations
:param ovthresh: overlap threshold
:param use_07_metric: whether to use voc07's 11 point ap computation
:return: rec, prec, ap
|
[
"pascal",
"voc",
"evaluation",
":",
"param",
"detpath",
":",
"detection",
"results",
"detpath",
".",
"format",
"(",
"classname",
")",
":",
"param",
"annopath",
":",
"annotations",
"annopath",
".",
"format",
"(",
"classname",
")",
":",
"param",
"imageset_file",
":",
"text",
"file",
"containing",
"list",
"of",
"images",
":",
"param",
"classname",
":",
"category",
"name",
":",
"param",
"cache_dir",
":",
"caching",
"annotations",
":",
"param",
"ovthresh",
":",
"overlap",
"threshold",
":",
"param",
"use_07_metric",
":",
"whether",
"to",
"use",
"voc07",
"s",
"11",
"point",
"ap",
"computation",
":",
"return",
":",
"rec",
"prec",
"ap"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_voc.py#L86-L196
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
|
MXNetGraph.register
|
def register(op_name):
"""Register operators"""
def wrapper(func):
"""Helper function to map functions"""
try:
import onnx as _
MXNetGraph.registry_[op_name] = func
except ImportError:
pass
return func
return wrapper
|
python
|
def register(op_name):
"""Register operators"""
def wrapper(func):
"""Helper function to map functions"""
try:
import onnx as _
MXNetGraph.registry_[op_name] = func
except ImportError:
pass
return func
return wrapper
|
[
"def",
"register",
"(",
"op_name",
")",
":",
"def",
"wrapper",
"(",
"func",
")",
":",
"\"\"\"Helper function to map functions\"\"\"",
"try",
":",
"import",
"onnx",
"as",
"_",
"MXNetGraph",
".",
"registry_",
"[",
"op_name",
"]",
"=",
"func",
"except",
"ImportError",
":",
"pass",
"return",
"func",
"return",
"wrapper"
] |
Register operators
|
[
"Register",
"operators"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py#L72-L83
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
|
MXNetGraph.convert_layer
|
def convert_layer(node, **kwargs):
"""Convert MXNet layer to ONNX"""
op = str(node["op"])
if op not in MXNetGraph.registry_:
raise AttributeError("No conversion function registered for op type %s yet." % op)
convert_func = MXNetGraph.registry_[op]
return convert_func(node, **kwargs)
|
python
|
def convert_layer(node, **kwargs):
"""Convert MXNet layer to ONNX"""
op = str(node["op"])
if op not in MXNetGraph.registry_:
raise AttributeError("No conversion function registered for op type %s yet." % op)
convert_func = MXNetGraph.registry_[op]
return convert_func(node, **kwargs)
|
[
"def",
"convert_layer",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"op",
"=",
"str",
"(",
"node",
"[",
"\"op\"",
"]",
")",
"if",
"op",
"not",
"in",
"MXNetGraph",
".",
"registry_",
":",
"raise",
"AttributeError",
"(",
"\"No conversion function registered for op type %s yet.\"",
"%",
"op",
")",
"convert_func",
"=",
"MXNetGraph",
".",
"registry_",
"[",
"op",
"]",
"return",
"convert_func",
"(",
"node",
",",
"*",
"*",
"kwargs",
")"
] |
Convert MXNet layer to ONNX
|
[
"Convert",
"MXNet",
"layer",
"to",
"ONNX"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py#L86-L92
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
|
MXNetGraph.split_params
|
def split_params(sym, params):
"""Helper function to split params dictionary into args and aux params
Parameters
----------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
Returns
-------
arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
"""
arg_params = {}
aux_params = {}
for args in sym.list_arguments():
if args in params:
arg_params.update({args: nd.array(params[args])})
for aux in sym.list_auxiliary_states():
if aux in params:
aux_params.update({aux: nd.array(params[aux])})
return arg_params, aux_params
|
python
|
def split_params(sym, params):
"""Helper function to split params dictionary into args and aux params
Parameters
----------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
Returns
-------
arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
"""
arg_params = {}
aux_params = {}
for args in sym.list_arguments():
if args in params:
arg_params.update({args: nd.array(params[args])})
for aux in sym.list_auxiliary_states():
if aux in params:
aux_params.update({aux: nd.array(params[aux])})
return arg_params, aux_params
|
[
"def",
"split_params",
"(",
"sym",
",",
"params",
")",
":",
"arg_params",
"=",
"{",
"}",
"aux_params",
"=",
"{",
"}",
"for",
"args",
"in",
"sym",
".",
"list_arguments",
"(",
")",
":",
"if",
"args",
"in",
"params",
":",
"arg_params",
".",
"update",
"(",
"{",
"args",
":",
"nd",
".",
"array",
"(",
"params",
"[",
"args",
"]",
")",
"}",
")",
"for",
"aux",
"in",
"sym",
".",
"list_auxiliary_states",
"(",
")",
":",
"if",
"aux",
"in",
"params",
":",
"aux_params",
".",
"update",
"(",
"{",
"aux",
":",
"nd",
".",
"array",
"(",
"params",
"[",
"aux",
"]",
")",
"}",
")",
"return",
"arg_params",
",",
"aux_params"
] |
Helper function to split params dictionary into args and aux params
Parameters
----------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
Returns
-------
arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
|
[
"Helper",
"function",
"to",
"split",
"params",
"dictionary",
"into",
"args",
"and",
"aux",
"params"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py#L95-L120
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.