INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Returns a list of contexts this parameter is initialized on.
def list_ctx(self): """Returns a list of contexts this parameter is initialized on.""" if self._data is None: if self._deferred_init: return self._deferred_init[1] raise RuntimeError("Parameter '%s' has not been initialized"%self.name) return self._ctx_list
Sets gradient buffer on all contexts to 0. No action is taken if parameter is uninitialized or doesn't require gradient.
def zero_grad(self): """Sets gradient buffer on all contexts to 0. No action is taken if parameter is uninitialized or doesn't require gradient.""" if self._grad is None: return for i in self._grad: ndarray.zeros_like(i, out=i)
Returns a symbol representing this parameter.
def var(self): """Returns a symbol representing this parameter.""" if self._var is None: self._var = symbol.var(self.name, shape=self.shape, dtype=self.dtype, lr_mult=self.lr_mult, wd_mult=self.wd_mult, init=self.init, stype=self._stype) return self._var
Cast data and gradient of this Parameter to a new data type. Parameters ---------- dtype : str or numpy.dtype The new data type.
def cast(self, dtype): """Cast data and gradient of this Parameter to a new data type. Parameters ---------- dtype : str or numpy.dtype The new data type. """ self.dtype = dtype if self._data is None: return with autograd.pause(): self._data = [i.astype(dtype) for i in self._data] if self._grad is None: return self._grad = [i.astype(dtype) for i in self._grad] autograd.mark_variables(self._data, self._grad, self.grad_req)
Retrieves a :py:class:`Parameter` with name ``self.prefix+name``. If not found, :py:func:`get` will first try to retrieve it from "shared" dict. If still not found, :py:func:`get` will create a new :py:class:`Parameter` with key-word arguments and insert it to self. Parameters ---------- name : str Name of the desired Parameter. It will be prepended with this dictionary's prefix. **kwargs : dict The rest of key-word arguments for the created :py:class:`Parameter`. Returns ------- Parameter The created or retrieved :py:class:`Parameter`.
def get(self, name, **kwargs): """Retrieves a :py:class:`Parameter` with name ``self.prefix+name``. If not found, :py:func:`get` will first try to retrieve it from "shared" dict. If still not found, :py:func:`get` will create a new :py:class:`Parameter` with key-word arguments and insert it to self. Parameters ---------- name : str Name of the desired Parameter. It will be prepended with this dictionary's prefix. **kwargs : dict The rest of key-word arguments for the created :py:class:`Parameter`. Returns ------- Parameter The created or retrieved :py:class:`Parameter`. """ name = self.prefix + name param = self._get_impl(name) if param is None: # pylint: disable=too-many-nested-blocks param = Parameter(name, **kwargs) self._params[name] = param else: for k, v in kwargs.items(): if hasattr(param, k) and getattr(param, k) is not None: existing = getattr(param, k) if k == 'shape' and len(v) == len(existing): inferred_shape = [] matched = True for dim1, dim2 in zip(v, existing): if dim1 != dim2 and dim1 * dim2 != 0: matched = False break elif dim1 == dim2: inferred_shape.append(dim1) elif dim1 == 0: inferred_shape.append(dim2) else: inferred_shape.append(dim1) if matched: param._shape = tuple(inferred_shape) continue elif k == 'dtype' and np.dtype(v) == np.dtype(existing): continue assert v is None or v == existing, \ "Cannot retrieve Parameter '%s' because desired attribute " \ "does not match with stored for attribute '%s': " \ "desired '%s' vs stored '%s'."%( name, k, str(v), str(getattr(param, k))) else: setattr(param, k, v) return param
Retrieves a :py:class:`.Constant` with name ``self.prefix+name``. If not found, :py:func:`get` will first try to retrieve it from "shared" dict. If still not found, :py:func:`get` will create a new :py:class:`.Constant` with key-word arguments and insert it to self. Parameters ---------- name : str Name of the desired Constant. It will be prepended with this dictionary's prefix. value : array-like Initial value of constant. Returns ------- :py:class:`.Constant` The created or retrieved :py:class:`.Constant`.
def get_constant(self, name, value=None): """Retrieves a :py:class:`.Constant` with name ``self.prefix+name``. If not found, :py:func:`get` will first try to retrieve it from "shared" dict. If still not found, :py:func:`get` will create a new :py:class:`.Constant` with key-word arguments and insert it to self. Parameters ---------- name : str Name of the desired Constant. It will be prepended with this dictionary's prefix. value : array-like Initial value of constant. Returns ------- :py:class:`.Constant` The created or retrieved :py:class:`.Constant`. """ name = self.prefix + name param = self._get_impl(name) if param is None: if value is None: raise KeyError("No constant named '{}'. Please specify value " \ "if you want to create a new constant.".format( name)) param = Constant(name, value) self._params[name] = param elif value is not None: assert isinstance(param, Constant), \ "Parameter '{}' already exists but it is not a constant.".format( name) if isinstance(value, ndarray.NDArray): value = value.asnumpy() assert param.shape == value.shape and \ (param.value.asnumpy() == value).all(), \ "Constant '{}' already exists but it's value doesn't match new " \ "value".format(name) return param
Copies all Parameters in ``other`` to self.
def update(self, other): """Copies all Parameters in ``other`` to self.""" for k, v in other.items(): if k in self._params: assert self._params[k] is v, \ "Cannot update self with other because they have different " \ "Parameters with the same name '%s'"%k for k, v in other.items(): self._params[k] = v
Initializes all Parameters managed by this dictionary to be used for :py:class:`NDArray` API. It has no effect when using :py:class:`Symbol` API. Parameters ---------- init : Initializer Global default Initializer to be used when :py:meth:`Parameter.init` is ``None``. Otherwise, :py:meth:`Parameter.init` takes precedence. ctx : Context or list of Context Keeps a copy of Parameters on one or many context(s). verbose : bool, default False Whether to verbosely print out details on initialization. force_reinit : bool, default False Whether to force re-initialization if parameter is already initialized.
def initialize(self, init=initializer.Uniform(), ctx=None, verbose=False, force_reinit=False): """Initializes all Parameters managed by this dictionary to be used for :py:class:`NDArray` API. It has no effect when using :py:class:`Symbol` API. Parameters ---------- init : Initializer Global default Initializer to be used when :py:meth:`Parameter.init` is ``None``. Otherwise, :py:meth:`Parameter.init` takes precedence. ctx : Context or list of Context Keeps a copy of Parameters on one or many context(s). verbose : bool, default False Whether to verbosely print out details on initialization. force_reinit : bool, default False Whether to force re-initialization if parameter is already initialized. """ if verbose: init.set_verbosity(verbose=verbose) for _, v in self.items(): v.initialize(None, ctx, init, force_reinit=force_reinit)
Set an attribute to a new value for all Parameters. For example, set grad_req to null if you don't need gradient w.r.t a model's Parameters:: model.collect_params().setattr('grad_req', 'null') or change the learning rate multiplier:: model.collect_params().setattr('lr_mult', 0.5) Parameters ---------- name : str Name of the attribute. value : valid type for attribute name The new value for the attribute.
def setattr(self, name, value): """Set an attribute to a new value for all Parameters. For example, set grad_req to null if you don't need gradient w.r.t a model's Parameters:: model.collect_params().setattr('grad_req', 'null') or change the learning rate multiplier:: model.collect_params().setattr('lr_mult', 0.5) Parameters ---------- name : str Name of the attribute. value : valid type for attribute name The new value for the attribute. """ for i in self.values(): setattr(i, name, value)
Save parameters to file. Parameters ---------- filename : str Path to parameter file. strip_prefix : str, default '' Strip prefix from parameter names before saving.
def save(self, filename, strip_prefix=''): """Save parameters to file. Parameters ---------- filename : str Path to parameter file. strip_prefix : str, default '' Strip prefix from parameter names before saving. """ arg_dict = {} for param in self.values(): weight = param._reduce() if not param.name.startswith(strip_prefix): raise ValueError( "Prefix '%s' is to be striped before saving, but Parameter's " "name '%s' does not start with '%s'. " "this may be due to your Block shares parameters from other " "Blocks or you forgot to use 'with name_scope()' when creating " "child blocks. For more info on naming, please see " "http://mxnet.incubator.apache.org/tutorials/basic/naming.html"%( strip_prefix, param.name, strip_prefix)) arg_dict[param.name[len(strip_prefix):]] = weight ndarray.save(filename, arg_dict)
Load parameters from file. Parameters ---------- filename : str Path to parameter file. ctx : Context or list of Context Context(s) initialize loaded parameters on. allow_missing : bool, default False Whether to silently skip loading parameters not represents in the file. ignore_extra : bool, default False Whether to silently ignore parameters from the file that are not present in this ParameterDict. restore_prefix : str, default '' prepend prefix to names of stored parameters before loading.
def load(self, filename, ctx=None, allow_missing=False, ignore_extra=False, restore_prefix=''): """Load parameters from file. Parameters ---------- filename : str Path to parameter file. ctx : Context or list of Context Context(s) initialize loaded parameters on. allow_missing : bool, default False Whether to silently skip loading parameters not represents in the file. ignore_extra : bool, default False Whether to silently ignore parameters from the file that are not present in this ParameterDict. restore_prefix : str, default '' prepend prefix to names of stored parameters before loading. """ if restore_prefix: for name in self.keys(): assert name.startswith(restore_prefix), \ "restore_prefix is '%s' but Parameters name '%s' does not start " \ "with '%s'"%(restore_prefix, name, restore_prefix) lprefix = len(restore_prefix) loaded = [(k[4:] if k.startswith('arg:') or k.startswith('aux:') else k, v) \ for k, v in ndarray.load(filename).items()] arg_dict = {restore_prefix+k: v for k, v in loaded} if not allow_missing: for name in self.keys(): assert name in arg_dict, \ "Parameter '%s' is missing in file '%s', which contains parameters: %s. " \ "Please make sure source and target networks have the same prefix."%( name[lprefix:], filename, _brief_print_list(arg_dict.keys())) for name in arg_dict: if name not in self._params: assert ignore_extra, \ "Parameter '%s' loaded from file '%s' is not present in ParameterDict, " \ "choices are: %s. Set ignore_extra to True to ignore. " \ "Please make sure source and target networks have the same prefix."%( name[lprefix:], filename, _brief_print_list(self._params.keys())) continue self[name]._load_init(arg_dict[name], ctx)
List and add all the torch backed ndarray functions to current module.
def _init_torch_module(): """List and add all the torch backed ndarray functions to current module.""" plist = ctypes.POINTER(FunctionHandle)() size = ctypes.c_uint() check_call(_LIB.MXListFunctions(ctypes.byref(size), ctypes.byref(plist))) module_obj = sys.modules[__name__] for i in range(size.value): hdl = FunctionHandle(plist[i]) function = _make_torch_function(hdl) # if function name starts with underscore, register as static method of NDArray if function is not None: setattr(module_obj, function.__name__, function)
Create a Torch function from the FunctionHandle.
def _make_torch_function(handle): """Create a Torch function from the FunctionHandle.""" # Get the property of function n_used_vars = mx_uint() n_scalars = mx_uint() n_mutate_vars = mx_uint() type_mask = ctypes.c_int() check_call(_LIB.MXFuncDescribe( handle, ctypes.byref(n_used_vars), ctypes.byref(n_scalars), ctypes.byref(n_mutate_vars), ctypes.byref(type_mask))) n_mutate_vars = n_mutate_vars.value n_used_vars = n_used_vars.value n_scalars = n_scalars.value type_mask = type_mask.value # Get the information from the function name = ctypes.c_char_p() desc = ctypes.c_char_p() num_args = mx_uint() arg_names = ctypes.POINTER(ctypes.c_char_p)() arg_types = ctypes.POINTER(ctypes.c_char_p)() arg_descs = ctypes.POINTER(ctypes.c_char_p)() ret_type = ctypes.c_char_p() check_call(_LIB.MXFuncGetInfo( handle, ctypes.byref(name), ctypes.byref(desc), ctypes.byref(num_args), ctypes.byref(arg_names), ctypes.byref(arg_types), ctypes.byref(arg_descs), ctypes.byref(ret_type))) func_name = py_str(name.value) if not func_name.startswith('_th_'): return None narg = int(num_args.value) param_str = _build_param_doc( [py_str(arg_names[i]) for i in range(narg)], [py_str(arg_types[i]) for i in range(narg)], [py_str(arg_descs[i]) for i in range(narg)]) if n_mutate_vars > 1: res = ','.join(['res%d '%i for i in range(n_mutate_vars)]) else: res = 'res ' doc_str = (('Interface for Torch function {name}.\n' + 'Invoke with\n{res}= mxnet.th.{name}(Parameters)\nor\n'+ 'mxnet.th.{name}({res}, Parameters).\n\n' + '{param_str}\n' + 'References: ' + 'https://github.com/torch/torch7/blob/master/doc/maths.md\n').format( name=func_name[4:], param_str=param_str, res=res)) def generic_torch_function(*args, **kwargs): """Invoke this function by passing in parameters. Parameters ---------- *args Positional arguments of inputs (both scalar and `NDArray`). Returns ------- out : NDArray The result NDArray(tuple) of result of computation. """ ndargs = [] arg_format = '' value = '' for arg in args: if isinstance(arg, NDArray): ndargs.append(arg) arg_format += 'n' value += ',' elif isinstance(arg, int): arg_format += 'i' value += str(arg) + ',' elif isinstance(arg, str): arg_format += 's' value += str(arg) + ',' elif isinstance(arg, float): arg_format += 'f' value += str(arg) + ',' elif isinstance(arg, bool): arg_format += 'b' value += str(arg) + ',' value = value[:-1] if len(ndargs) == n_used_vars: ndargs = [NDArray(_new_empty_handle()) for _ in range(n_mutate_vars)] + ndargs arg_format = 'n'*n_mutate_vars + arg_format value = ','*n_mutate_vars + value elif len(ndargs) == n_mutate_vars + n_used_vars: pass else: raise AssertionError(('Incorrect number of input NDArrays. ' + 'Need to be either %d (inputs) or %d ' + '(output buffer) + %d (input)') % (n_used_vars, n_mutate_vars, n_used_vars)) kwargs['format'] = arg_format kwargs['args'] = value for k in kwargs: kwargs[k] = str(kwargs[k]) check_call(_LIB.MXFuncInvokeEx( handle, c_handle_array(ndargs[n_mutate_vars:]), # pylint: disable=invalid-slice-index c_array(mx_float, []), c_handle_array(ndargs[:n_mutate_vars]), # pylint: disable=invalid-slice-index ctypes.c_int(len(kwargs)), c_str_array(kwargs.keys()), c_str_array(kwargs.values()))) if n_mutate_vars == 1: return ndargs[0] else: return ndargs[:n_mutate_vars] # pylint: disable=invalid-slice-index # End of function declaration ret_function = generic_torch_function ret_function.__name__ = func_name[4:] ret_function.__doc__ = doc_str return ret_function
r"""Inception v3 model from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default $MXNET_HOME/models Location for keeping the model parameters.
def inception_v3(pretrained=False, ctx=cpu(), root=os.path.join(base.data_dir(), 'models'), **kwargs): r"""Inception v3 model from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default $MXNET_HOME/models Location for keeping the model parameters. """ net = Inception3(**kwargs) if pretrained: from ..model_store import get_model_file net.load_parameters(get_model_file('inceptionv3', root=root), ctx=ctx) return net
Pack a string into MXImageRecord. Parameters ---------- header : IRHeader Header of the image record. ``header.label`` can be a number or an array. See more detail in ``IRHeader``. s : str Raw image string to be packed. Returns ------- s : str The packed string. Examples -------- >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3] >>> id = 2574 >>> header = mx.recordio.IRHeader(0, label, id, 0) >>> with open(path, 'r') as file: ... s = file.read() >>> packed_s = mx.recordio.pack(header, s)
def pack(header, s): """Pack a string into MXImageRecord. Parameters ---------- header : IRHeader Header of the image record. ``header.label`` can be a number or an array. See more detail in ``IRHeader``. s : str Raw image string to be packed. Returns ------- s : str The packed string. Examples -------- >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3] >>> id = 2574 >>> header = mx.recordio.IRHeader(0, label, id, 0) >>> with open(path, 'r') as file: ... s = file.read() >>> packed_s = mx.recordio.pack(header, s) """ header = IRHeader(*header) if isinstance(header.label, numbers.Number): header = header._replace(flag=0) else: label = np.asarray(header.label, dtype=np.float32) header = header._replace(flag=label.size, label=0) s = label.tostring() + s s = struct.pack(_IR_FORMAT, *header) + s return s
Unpack a MXImageRecord to string. Parameters ---------- s : str String buffer from ``MXRecordIO.read``. Returns ------- header : IRHeader Header of the image record. s : str Unpacked string. Examples -------- >>> record = mx.recordio.MXRecordIO('test.rec', 'r') >>> item = record.read() >>> header, s = mx.recordio.unpack(item) >>> header HEADER(flag=0, label=14.0, id=20129312, id2=0)
def unpack(s): """Unpack a MXImageRecord to string. Parameters ---------- s : str String buffer from ``MXRecordIO.read``. Returns ------- header : IRHeader Header of the image record. s : str Unpacked string. Examples -------- >>> record = mx.recordio.MXRecordIO('test.rec', 'r') >>> item = record.read() >>> header, s = mx.recordio.unpack(item) >>> header HEADER(flag=0, label=14.0, id=20129312, id2=0) """ header = IRHeader(*struct.unpack(_IR_FORMAT, s[:_IR_SIZE])) s = s[_IR_SIZE:] if header.flag > 0: header = header._replace(label=np.frombuffer(s, np.float32, header.flag)) s = s[header.flag*4:] return header, s
Unpack a MXImageRecord to image. Parameters ---------- s : str String buffer from ``MXRecordIO.read``. iscolor : int Image format option for ``cv2.imdecode``. Returns ------- header : IRHeader Header of the image record. img : numpy.ndarray Unpacked image. Examples -------- >>> record = mx.recordio.MXRecordIO('test.rec', 'r') >>> item = record.read() >>> header, img = mx.recordio.unpack_img(item) >>> header HEADER(flag=0, label=14.0, id=20129312, id2=0) >>> img array([[[ 23, 27, 45], [ 28, 32, 50], ..., [ 36, 40, 59], [ 35, 39, 58]], ..., [[ 91, 92, 113], [ 97, 98, 119], ..., [168, 169, 167], [166, 167, 165]]], dtype=uint8)
def unpack_img(s, iscolor=-1): """Unpack a MXImageRecord to image. Parameters ---------- s : str String buffer from ``MXRecordIO.read``. iscolor : int Image format option for ``cv2.imdecode``. Returns ------- header : IRHeader Header of the image record. img : numpy.ndarray Unpacked image. Examples -------- >>> record = mx.recordio.MXRecordIO('test.rec', 'r') >>> item = record.read() >>> header, img = mx.recordio.unpack_img(item) >>> header HEADER(flag=0, label=14.0, id=20129312, id2=0) >>> img array([[[ 23, 27, 45], [ 28, 32, 50], ..., [ 36, 40, 59], [ 35, 39, 58]], ..., [[ 91, 92, 113], [ 97, 98, 119], ..., [168, 169, 167], [166, 167, 165]]], dtype=uint8) """ header, s = unpack(s) img = np.frombuffer(s, dtype=np.uint8) assert cv2 is not None img = cv2.imdecode(img, iscolor) return header, img
Pack an image into ``MXImageRecord``. Parameters ---------- header : IRHeader Header of the image record. ``header.label`` can be a number or an array. See more detail in ``IRHeader``. img : numpy.ndarray Image to be packed. quality : int Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9. img_fmt : str Encoding of the image (.jpg for JPEG, .png for PNG). Returns ------- s : str The packed string. Examples -------- >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3] >>> id = 2574 >>> header = mx.recordio.IRHeader(0, label, id, 0) >>> img = cv2.imread('test.jpg') >>> packed_s = mx.recordio.pack_img(header, img)
def pack_img(header, img, quality=95, img_fmt='.jpg'): """Pack an image into ``MXImageRecord``. Parameters ---------- header : IRHeader Header of the image record. ``header.label`` can be a number or an array. See more detail in ``IRHeader``. img : numpy.ndarray Image to be packed. quality : int Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9. img_fmt : str Encoding of the image (.jpg for JPEG, .png for PNG). Returns ------- s : str The packed string. Examples -------- >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3] >>> id = 2574 >>> header = mx.recordio.IRHeader(0, label, id, 0) >>> img = cv2.imread('test.jpg') >>> packed_s = mx.recordio.pack_img(header, img) """ assert cv2 is not None jpg_formats = ['.JPG', '.JPEG'] png_formats = ['.PNG'] encode_params = None if img_fmt.upper() in jpg_formats: encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality] elif img_fmt.upper() in png_formats: encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality] ret, buf = cv2.imencode(img_fmt, img, encode_params) assert ret, 'failed to encode image' return pack(header, buf.tostring())
Opens the record file.
def open(self): """Opens the record file.""" if self.flag == "w": check_call(_LIB.MXRecordIOWriterCreate(self.uri, ctypes.byref(self.handle))) self.writable = True elif self.flag == "r": check_call(_LIB.MXRecordIOReaderCreate(self.uri, ctypes.byref(self.handle))) self.writable = False else: raise ValueError("Invalid flag %s"%self.flag) self.pid = current_process().pid self.is_open = True
Closes the record file.
def close(self): """Closes the record file.""" if not self.is_open: return if self.writable: check_call(_LIB.MXRecordIOWriterFree(self.handle)) else: check_call(_LIB.MXRecordIOReaderFree(self.handle)) self.is_open = False self.pid = None
Check process id to ensure integrity, reset if in new process.
def _check_pid(self, allow_reset=False): """Check process id to ensure integrity, reset if in new process.""" if not self.pid == current_process().pid: if allow_reset: self.reset() else: raise RuntimeError("Forbidden operation in multiple processes")
Inserts a string buffer as a record. Examples --------- >>> record = mx.recordio.MXRecordIO('tmp.rec', 'w') >>> for i in range(5): ... record.write('record_%d'%i) >>> record.close() Parameters ---------- buf : string (python2), bytes (python3) Buffer to write.
def write(self, buf): """Inserts a string buffer as a record. Examples --------- >>> record = mx.recordio.MXRecordIO('tmp.rec', 'w') >>> for i in range(5): ... record.write('record_%d'%i) >>> record.close() Parameters ---------- buf : string (python2), bytes (python3) Buffer to write. """ assert self.writable self._check_pid(allow_reset=False) check_call(_LIB.MXRecordIOWriterWriteRecord(self.handle, ctypes.c_char_p(buf), ctypes.c_size_t(len(buf))))
Returns record as a string. Examples --------- >>> record = mx.recordio.MXRecordIO('tmp.rec', 'r') >>> for i in range(5): ... item = record.read() ... print(item) record_0 record_1 record_2 record_3 record_4 >>> record.close() Returns ---------- buf : string Buffer read.
def read(self): """Returns record as a string. Examples --------- >>> record = mx.recordio.MXRecordIO('tmp.rec', 'r') >>> for i in range(5): ... item = record.read() ... print(item) record_0 record_1 record_2 record_3 record_4 >>> record.close() Returns ---------- buf : string Buffer read. """ assert not self.writable # trying to implicitly read from multiple processes is forbidden, # there's no elegant way to handle unless lock is introduced self._check_pid(allow_reset=False) buf = ctypes.c_char_p() size = ctypes.c_size_t() check_call(_LIB.MXRecordIOReaderReadRecord(self.handle, ctypes.byref(buf), ctypes.byref(size))) if buf: buf = ctypes.cast(buf, ctypes.POINTER(ctypes.c_char*size.value)) return buf.contents.raw else: return None
Closes the record file.
def close(self): """Closes the record file.""" if not self.is_open: return super(MXIndexedRecordIO, self).close() self.fidx.close()
Sets the current read pointer position. This function is internally called by `read_idx(idx)` to find the current reader pointer position. It doesn't return anything.
def seek(self, idx): """Sets the current read pointer position. This function is internally called by `read_idx(idx)` to find the current reader pointer position. It doesn't return anything.""" assert not self.writable self._check_pid(allow_reset=True) pos = ctypes.c_size_t(self.idx[idx]) check_call(_LIB.MXRecordIOReaderSeek(self.handle, pos))
Returns the current position of write head. Examples --------- >>> record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'w') >>> print(record.tell()) 0 >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) ... print(record.tell()) 16 32 48 64 80
def tell(self): """Returns the current position of write head. Examples --------- >>> record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'w') >>> print(record.tell()) 0 >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) ... print(record.tell()) 16 32 48 64 80 """ assert self.writable pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOWriterTell(self.handle, ctypes.byref(pos))) return pos.value
Inserts input record at given index. Examples --------- >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) >>> record.close() Parameters ---------- idx : int Index of a file. buf : Record to write.
def write_idx(self, idx, buf): """Inserts input record at given index. Examples --------- >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) >>> record.close() Parameters ---------- idx : int Index of a file. buf : Record to write. """ key = self.key_type(idx) pos = self.tell() self.write(buf) self.fidx.write('%s\t%d\n'%(str(key), pos)) self.idx[key] = pos self.keys.append(key)
Add new metrics as new columns to selected pandas dataframe. Parameters ---------- dataframe : pandas.DataFrame Selected dataframe needs to be modified. metrics : metric.EvalMetric New metrics to be added.
def _add_new_columns(dataframe, metrics): """Add new metrics as new columns to selected pandas dataframe. Parameters ---------- dataframe : pandas.DataFrame Selected dataframe needs to be modified. metrics : metric.EvalMetric New metrics to be added. """ #TODO(leodirac): we don't really need to do this on every update. Optimize new_columns = set(metrics.keys()) - set(dataframe.columns) for col in new_columns: dataframe[col] = None
Generates callback arguments for model.fit() for a set of callback objects. Callback objects like PandasLogger(), LiveLearningCurve() get passed in. This assembles all their callback arguments.
def args_wrapper(*args): """Generates callback arguments for model.fit() for a set of callback objects. Callback objects like PandasLogger(), LiveLearningCurve() get passed in. This assembles all their callback arguments. """ out = defaultdict(list) for callback in args: callback_args = callback.callback_args() for k, v in callback_args.items(): out[k].append(v) return dict(out)
Append new metrics to selected dataframes. Parameters ---------- metrics : metric.EvalMetric New metrics to be added. df_name : str Name of the dataframe to be modified.
def append_metrics(self, metrics, df_name): """Append new metrics to selected dataframes. Parameters ---------- metrics : metric.EvalMetric New metrics to be added. df_name : str Name of the dataframe to be modified. """ dataframe = self._dataframes[df_name] _add_new_columns(dataframe, metrics) dataframe.loc[len(dataframe)] = metrics
Callback funtion for training.
def train_cb(self, param): """Callback funtion for training. """ if param.nbatch % self.frequent == 0: self._process_batch(param, 'train')
Update parameters for selected dataframe after a completed batch Parameters ---------- dataframe : pandas.DataFrame Selected dataframe needs to be modified.
def _process_batch(self, param, dataframe): """Update parameters for selected dataframe after a completed batch Parameters ---------- dataframe : pandas.DataFrame Selected dataframe needs to be modified. """ now = time.time() if param.eval_metric is not None: metrics = dict(param.eval_metric.get_name_value()) param.eval_metric.reset() else: metrics = {} # #11504 try: speed = self.frequent / (now - self.last_time) except ZeroDivisionError: speed = float('inf') metrics['batches_per_sec'] = speed * self.batch_size metrics['records_per_sec'] = speed metrics['elapsed'] = self.elapsed() metrics['minibatch_count'] = param.nbatch metrics['epoch'] = param.epoch self.append_metrics(metrics, dataframe) self.last_time = now
Callback function after each epoch. Now it records each epoch time and append it to epoch dataframe.
def epoch_cb(self): """Callback function after each epoch. Now it records each epoch time and append it to epoch dataframe. """ metrics = {} metrics['elapsed'] = self.elapsed() now = datetime.datetime.now() metrics['epoch_time'] = now - self.last_epoch_time self.append_metrics(metrics, 'epoch') self.last_epoch_time = now
Render the plot with bokeh.io and push to notebook.
def _push_render(self): """Render the plot with bokeh.io and push to notebook. """ bokeh.io.push_notebook(handle=self.handle) self.last_update = time.time()
Update selected dataframe after a completed batch Parameters ---------- df_name : str Selected dataframe name needs to be modified.
def _process_batch(self, param, df_name): """Update selected dataframe after a completed batch Parameters ---------- df_name : str Selected dataframe name needs to be modified. """ if param.eval_metric is not None: metrics = dict(param.eval_metric.get_name_value()) param.eval_metric.reset() else: metrics = {} metrics['elapsed'] = datetime.datetime.now() - self.start_time for key, value in metrics.items(): if key not in self._data[df_name]: self._data[df_name][key] = [] self._data[df_name][key].append(value)
:param nested_list: list of list of string :return: dictionary mapping from string to int, inverse of that dictionary
def build_vocab(nested_list): """ :param nested_list: list of list of string :return: dictionary mapping from string to int, inverse of that dictionary """ # Build vocabulary word_counts = Counter(itertools.chain(*nested_list)) # Mapping from index to label vocabulary_inv = [x[0] for x in word_counts.most_common()] # Mapping from label to index vocabulary = {x: i for i, x in enumerate(vocabulary_inv)} return vocabulary, vocabulary_inv
Reads a csv of sentences/tag sequences into a pandas dataframe. Converts into X = array(list(int)) & Y = array(list(int)) Splits into training and test sets Builds dictionaries mapping from index labels to labels/ indexed features to features :param data_dir: directory to read in csv data from :param max_records: total number of records to randomly select from input data :param train_fraction: fraction of the data to use for training :param batch_size: records in mini-batches during training :param buckets: size of each bucket in the iterators :return: train_iter, val_iter, word_to_index, index_to_word, pos_to_index, index_to_pos
def build_iters(data_dir, max_records, train_fraction, batch_size, buckets=None): """ Reads a csv of sentences/tag sequences into a pandas dataframe. Converts into X = array(list(int)) & Y = array(list(int)) Splits into training and test sets Builds dictionaries mapping from index labels to labels/ indexed features to features :param data_dir: directory to read in csv data from :param max_records: total number of records to randomly select from input data :param train_fraction: fraction of the data to use for training :param batch_size: records in mini-batches during training :param buckets: size of each bucket in the iterators :return: train_iter, val_iter, word_to_index, index_to_word, pos_to_index, index_to_pos """ # Read in data as numpy array df = pd.read_pickle(os.path.join(data_dir, "ner_data.pkl"))[:max_records] # Get feature lists entities=[list(array) for array in df["BILOU_tag"].values] sentences = [list(array) for array in df["token"].values] chars=[[[c for c in word] for word in sentence] for sentence in sentences] # Build vocabularies entity_to_index, index_to_entity = build_vocab(entities) word_to_index, index_to_word = build_vocab(sentences) char_to_index, index_to_char = build_vocab([np.array([c for c in word]) for word in index_to_word]) save_obj(entity_to_index, os.path.join(args.data_dir, "tag_to_index")) # Map strings to integer values indexed_entities=[list(map(entity_to_index.get, l)) for l in entities] indexed_tokens=[list(map(word_to_index.get, l)) for l in sentences] indexed_chars=[[list(map(char_to_index.get, word)) for word in sentence] for sentence in chars] # Split into training and testing data idx=int(len(indexed_tokens)*train_fraction) X_token_train, X_char_train, Y_train = indexed_tokens[:idx], indexed_chars[:idx], indexed_entities[:idx] X_token_test, X_char_test, Y_test = indexed_tokens[idx:], indexed_chars[idx:], indexed_entities[idx:] # build iterators to feed batches to network train_iter = iterators.BucketNerIter(sentences=X_token_train, characters=X_char_train, label=Y_train, max_token_chars=5, batch_size=batch_size, buckets=buckets) val_iter = iterators.BucketNerIter(sentences=X_token_test, characters=X_char_test, label=Y_test, max_token_chars=train_iter.max_token_chars, batch_size=batch_size, buckets=train_iter.buckets) return train_iter, val_iter, word_to_index, char_to_index, entity_to_index
Build NN symbol depending on the length of the input sequence
def sym_gen(seq_len): """ Build NN symbol depending on the length of the input sequence """ sentence_shape = train_iter.provide_data[0][1] char_sentence_shape = train_iter.provide_data[1][1] entities_shape = train_iter.provide_label[0][1] X_sent = mx.symbol.Variable(train_iter.provide_data[0].name) X_char_sent = mx.symbol.Variable(train_iter.provide_data[1].name) Y = mx.sym.Variable(train_iter.provide_label[0].name) ############################### # Character embedding component ############################### char_embeddings = mx.sym.Embedding(data=X_char_sent, input_dim=len(char_to_index), output_dim=args.char_embed, name='char_embed') char_embeddings = mx.sym.reshape(data=char_embeddings, shape=(0,1,seq_len,-1,args.char_embed), name='char_embed2') char_cnn_outputs = [] for i, filter_size in enumerate(args.char_filter_list): # Kernel that slides over entire words resulting in a 1d output convi = mx.sym.Convolution(data=char_embeddings, kernel=(1, filter_size, args.char_embed), stride=(1, 1, 1), num_filter=args.char_filters, name="char_conv_layer_" + str(i)) acti = mx.sym.Activation(data=convi, act_type='tanh') pooli = mx.sym.Pooling(data=acti, pool_type='max', kernel=(1, char_sentence_shape[2] - filter_size + 1, 1), stride=(1, 1, 1), name="char_pool_layer_" + str(i)) pooli = mx.sym.transpose(mx.sym.Reshape(pooli, shape=(0, 0, 0)), axes=(0, 2, 1), name="cchar_conv_layer_" + str(i)) char_cnn_outputs.append(pooli) # combine features from all filters & apply dropout cnn_char_features = mx.sym.Concat(*char_cnn_outputs, dim=2, name="cnn_char_features") regularized_cnn_char_features = mx.sym.Dropout(data=cnn_char_features, p=args.dropout, mode='training', name='regularized charCnn features') ################################## # Combine char and word embeddings ################################## word_embeddings = mx.sym.Embedding(data=X_sent, input_dim=len(word_to_index), output_dim=args.word_embed, name='word_embed') rnn_features = mx.sym.Concat(*[word_embeddings, regularized_cnn_char_features], dim=2, name='rnn input') ############################## # Bidirectional LSTM component ############################## # unroll the lstm cell in time, merging outputs bi_cell.reset() output, states = bi_cell.unroll(length=seq_len, inputs=rnn_features, merge_outputs=True) # Map to num entity classes rnn_output = mx.sym.Reshape(output, shape=(-1, args.lstm_state_size * 2), name='r_output') fc = mx.sym.FullyConnected(data=rnn_output, num_hidden=len(entity_to_index), name='fc_layer') # reshape back to same shape as loss will be reshaped_fc = mx.sym.transpose(mx.sym.reshape(fc, shape=(-1, seq_len, len(entity_to_index))), axes=(0, 2, 1)) sm = mx.sym.SoftmaxOutput(data=reshaped_fc, label=Y, ignore_label=-1, use_ignore=True, multi_output=True, name='softmax') return sm, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label]
Draw random samples from an approximately log-uniform or Zipfian distribution. This operation randomly samples *num_sampled* candidates the range of integers [0, range_max). The elements of sampled_candidates are drawn with replacement from the base distribution. The base distribution for this operator is an approximately log-uniform or Zipfian distribution: P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1) This sampler is useful when the true classes approximately follow such a distribution. For example, if the classes represent words in a lexicon sorted in decreasing order of \ frequency. If your classes are not ordered by decreasing frequency, do not use this op. Additionaly, it also returns the number of times each of the \ true classes and the sampled classes is expected to occur. Parameters ---------- true_classes : Symbol The target classes in 1-D. num_sampled: int The number of classes to randomly sample. range_max: int The number of possible classes. Returns ------- samples: Symbol The sampled candidate classes in 1-D `int64` dtype. expected_count_true: Symbol The expected count for true classes in 1-D `float64` dtype. expected_count_sample: Symbol The expected count for sampled candidates in 1-D `float64` dtype. Examples -------- >>> true_cls = mx.sym.Variable('true_cls') >>> samples, exp_count_true, exp_count_sample = mx.sym.contrib.rand_zipfian(true_cls, 4, 5) >>> samples.eval(true_cls=mx.nd.array([3]))[0].asnumpy() array([1, 3, 3, 3]) >>> exp_count_true.eval(true_cls=mx.nd.array([3]))[0].asnumpy() array([0.12453879]) >>> exp_count_sample.eval(true_cls=mx.nd.array([3]))[0].asnumpy() array([0.22629439, 0.12453879, 0.12453879, 0.12453879])
def rand_zipfian(true_classes, num_sampled, range_max): """Draw random samples from an approximately log-uniform or Zipfian distribution. This operation randomly samples *num_sampled* candidates the range of integers [0, range_max). The elements of sampled_candidates are drawn with replacement from the base distribution. The base distribution for this operator is an approximately log-uniform or Zipfian distribution: P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1) This sampler is useful when the true classes approximately follow such a distribution. For example, if the classes represent words in a lexicon sorted in decreasing order of \ frequency. If your classes are not ordered by decreasing frequency, do not use this op. Additionaly, it also returns the number of times each of the \ true classes and the sampled classes is expected to occur. Parameters ---------- true_classes : Symbol The target classes in 1-D. num_sampled: int The number of classes to randomly sample. range_max: int The number of possible classes. Returns ------- samples: Symbol The sampled candidate classes in 1-D `int64` dtype. expected_count_true: Symbol The expected count for true classes in 1-D `float64` dtype. expected_count_sample: Symbol The expected count for sampled candidates in 1-D `float64` dtype. Examples -------- >>> true_cls = mx.sym.Variable('true_cls') >>> samples, exp_count_true, exp_count_sample = mx.sym.contrib.rand_zipfian(true_cls, 4, 5) >>> samples.eval(true_cls=mx.nd.array([3]))[0].asnumpy() array([1, 3, 3, 3]) >>> exp_count_true.eval(true_cls=mx.nd.array([3]))[0].asnumpy() array([0.12453879]) >>> exp_count_sample.eval(true_cls=mx.nd.array([3]))[0].asnumpy() array([0.22629439, 0.12453879, 0.12453879, 0.12453879]) """ assert(isinstance(true_classes, Symbol)), "unexpected type %s" % type(true_classes) log_range = math.log(range_max + 1) rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64') # make sure sampled_classes are in the range of [0, range_max) sampled_classes = (rand.exp() - 1).astype('int64') % range_max true_classes = true_classes.astype('float64') expected_prob_true = ((true_classes + 2.0) / (true_classes + 1.0)).log() / log_range expected_count_true = expected_prob_true * num_sampled # cast sampled classes to fp64 to avoid interget division sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range expected_count_sampled = expected_prob_sampled * num_sampled return sampled_classes, expected_count_true, expected_count_sampled
Run a for loop with user-defined computation over Symbols on dimension 0. This operator simulates a for loop and body has the computation for an iteration of the for loop. It runs the computation in body on each slice from the input NDArrays. body takes two arguments as input and outputs a tuple of two elements, as illustrated below: out, states = body(data1, states) data1 can be either a symbol or a list of symbols. If data is a symbol, data1 is a symbol. Otherwise, data1 is a list of symbols and has the same size as data. states is a list of symbols and have the same size as init_states. Similarly, out can be either a symbol or a list of symbols, which are concatenated as the first output of foreach; states from the last execution of body are the second output of foreach. foreach can output only output data or states. If a user only wants states, the body function can return ([], states). Similarly, if a user only wants output data, the body function can return (out, []). The computation done by this operator is equivalent to the pseudo code below when the input data is NDArray:: states = init_states outs = [] for i in data.shape[0]: s = data[i] out, states = body(s, states) outs.append(out) outs = stack(*outs) Parameters ---------- body : a Python function. Define computation in an iteration. data: a symbol or a list of symbols. The input data. init_states: a Symbol or nested lists of symbols. The initial values of the loop states. name: string. The name of the operator. Returns ------- outputs: a Symbol or nested lists of Symbols. The output data concatenated from the output of all iterations. states: a Symbol or nested lists of Symbols. The loop states in the last iteration. Examples -------- >>> step = lambda data, states: (data + states[0], [states[0] * 2]) >>> data = mx.sym.var('data') >>> states = [mx.sym.var('state')] >>> outs, states = mx.sym.contrib.foreach(step, data, states)
def foreach(body, data, init_states, name="foreach"): """Run a for loop with user-defined computation over Symbols on dimension 0. This operator simulates a for loop and body has the computation for an iteration of the for loop. It runs the computation in body on each slice from the input NDArrays. body takes two arguments as input and outputs a tuple of two elements, as illustrated below: out, states = body(data1, states) data1 can be either a symbol or a list of symbols. If data is a symbol, data1 is a symbol. Otherwise, data1 is a list of symbols and has the same size as data. states is a list of symbols and have the same size as init_states. Similarly, out can be either a symbol or a list of symbols, which are concatenated as the first output of foreach; states from the last execution of body are the second output of foreach. foreach can output only output data or states. If a user only wants states, the body function can return ([], states). Similarly, if a user only wants output data, the body function can return (out, []). The computation done by this operator is equivalent to the pseudo code below when the input data is NDArray:: states = init_states outs = [] for i in data.shape[0]: s = data[i] out, states = body(s, states) outs.append(out) outs = stack(*outs) Parameters ---------- body : a Python function. Define computation in an iteration. data: a symbol or a list of symbols. The input data. init_states: a Symbol or nested lists of symbols. The initial values of the loop states. name: string. The name of the operator. Returns ------- outputs: a Symbol or nested lists of Symbols. The output data concatenated from the output of all iterations. states: a Symbol or nested lists of Symbols. The loop states in the last iteration. Examples -------- >>> step = lambda data, states: (data + states[0], [states[0] * 2]) >>> data = mx.sym.var('data') >>> states = [mx.sym.var('state')] >>> outs, states = mx.sym.contrib.foreach(step, data, states) """ flatten_data, data_fmt = _flatten(data, "foreach input") _check_data(flatten_data, symbol.Symbol, "data should be a symbol or a nested list of symbols") init_flatten_states, init_state_fmt = _flatten(init_states, "foreach states") _check_data(init_flatten_states, symbol.Symbol, "init_states should be a symbol or a nested list of symbols") # If the input python function references to the symbols outside # the python function, we need to prune the computation graph constructed from # the function. One way of doing it is to mark the nodes in the computation graph # with AttrScope and prune the nodes without the special attribute. name = _get_unique_subgraph_name(name) with AttrScope(__subgraph_name__=name): in_eles = [symbol.var(_get_sym_uniq_name(sym)) for sym in flatten_data] in_eles, _ = _regroup(in_eles, data_fmt) states = [symbol.var(_get_sym_uniq_name(s)) for s in init_flatten_states] states, _ = _regroup(states, copy.deepcopy(init_state_fmt)) sym_out, sym_states = body(in_eles, states) sym_out, out_fmt = _flatten(sym_out, "foreach output") sym_states, state_fmt = _flatten(sym_states, "foreach loop_vars") assert init_state_fmt == state_fmt, "The input and output loop_vars have different format" _check_data(sym_out, symbol.Symbol, "the output should be an NDArray or a nested list of NDArrays") _check_data(sym_states, symbol.Symbol, "the output states should be an NDArray or a nested list of NDArrays") num_out_data = len(sym_out) num_states = len(sym_states) num_outputs = num_out_data + num_states g = _construct_subgraph(sym_out, sym_states, name) input_syms = _get_graph_inputs(g) cut_syms = _cut_subgraph(g) input_syms = _get_graph_inputs(g) # Here we need to find out how the input symbols are ordered as well as # where the loop states are located in the list of inputs. # This dict contains the symbols of the subgraph. input_syms = {sym.name:sym for sym in input_syms} gin_names = input_syms.keys() # This array contains the symbols for the inputs of foreach. # They are ordered according to the inputs of the subgraph. state_names = [_get_sym_uniq_name(sym) for sym in init_flatten_states] data_names = [_get_sym_uniq_name(sym) for sym in flatten_data] cut_var_map = {sym.list_outputs()[0]:sym for sym in cut_syms} cut_var_names = cut_var_map.keys() subg_input_names = g.list_inputs() assert len(set(subg_input_names)) == len(subg_input_names), \ "The inputs of the subgraph don't have unique names: " + str(subg_input_names) # ordered_ins contains input symbols in the following order: # data_syms, state_syms, followed by cut_vars and vars in the closure. ordered_ins = [x for x in flatten_data] # this defines the location of data_syms in the list of subgraph inputs in_data_locs = [] for dname in data_names: # Some data may not be used. if dname in subg_input_names: in_data_locs.append(subg_input_names.index(dname)) else: raise AssertionError("the data arrays have to be used in the loop body") ordered_ins.extend(init_flatten_states) # this defines the location of state_syms in the list of subgraph inputs. in_state_locs = [] for sname in state_names: # Some state may not be used. if sname in subg_input_names: in_state_locs.append(subg_input_names.index(sname)) else: raise AssertionError("the state arrays have to be used in the loop body") remain_locs = [] for in_name in subg_input_names: assert in_name in gin_names, "The input variable %s can't be found in graph inputs: %s" \ % (in_name, str(gin_names)) if in_name in cut_var_names: ordered_ins.append(cut_var_map[in_name]) remain_locs.append(subg_input_names.index(in_name)) elif in_name not in data_names and in_name not in state_names: # The remaining inputs are the variable nodes created inside the UDF. # The subgraph can't have nodes shared with the main graph. As such, # we need to make a copy of these variable nodes. assert in_name in gin_names ordered_ins.append(copy.deepcopy(input_syms[in_name])) remain_locs.append(subg_input_names.index(in_name)) ret = symbol._internal._foreach(g, *ordered_ins, num_outputs=num_outputs, num_out_data=num_out_data, in_state_locs=in_state_locs, in_data_locs=in_data_locs, remain_locs=remain_locs) outs = [] for i in range(num_outputs - num_states): outs.append(ret[i]) outs, _ = _regroup(outs, out_fmt) states = [] for i in range(num_states): states.append(ret[num_outputs - num_states + i]) states, _ = _regroup(states, state_fmt) return (outs, states)
Run a while loop with user-defined computation and loop condition. This operator simulates a while loop which iterately does customized computation as long as the condition is satisfied. `loop_vars` is a Symbol or nested lists of Symbols on which the computation uses. `cond` is a user-defined function, used as the loop condition. It consumes `loop_vars`, and produces a scalar MXNet symbol, indicating the termination of the loop. The loop ends when `cond` returns false (zero). The `cond` is variadic, and its signature should be `cond(*loop_vars) => Symbol`. `func` is a user-defined function, used as the loop body. It also consumes `loop_vars`, and produces `step_output` and `new_loop_vars` at each step. In each step, `step_output` should contain the same number elements. Through all steps, the i-th element of `step_output` should have the same shape and dtype. Also, `new_loop_vars` should contain the same number of elements as `loop_vars`, and the corresponding element should have the same shape and dtype. The `func` is variadic, and its signature should be `func(*loop_vars) => (Symbol or nested List[Symbol] step_output, Symbol or nested List[Symbol] new_loop_vars)`. `max_iterations` is a scalar that defines the maximum number of iterations allowed. This function returns two lists. The first list has the length of `|step_output|`, in which the i-th element are all i-th elements of `step_output` from all steps, stacked along axis 0. The second list has the length of `|loop_vars|`, which represents final states of loop variables. .. warning:: For now, the axis 0 of all Symbols in the first list are `max_iterations`, due to lack of dynamic shape inference. .. warning:: Even if `cond` is never satisfied, while_loop returns a list of outputs with inferred dtype and shape. This is different from the Symbol version, where in this case `step_outputs` are assumed as an empty list. Parameters ---------- cond: a Python function. The loop condition. func: a Python function. The loop body. loop_vars: a Symbol or nested lists of Symbol. The initial values of the loop variables. max_iterations: a python int. Maximum number of iterations. Returns ------ outputs: a Symbol or nested lists of Symbols stacked output from each step states: a Symbol or nested lists of Symbols final state Examples -------- >>> cond = lambda i, s: i <= 5 >>> func = lambda i, s: ([i + s], [i + 1, s + i]) >>> loop_vars = (mx.sym.var('i'), mx.sym.var('s')) >>> outputs, states = mx.sym.contrib.while_loop(cond, func, loop_vars, max_iterations=10)
def while_loop(cond, func, loop_vars, max_iterations=None, name="while_loop"): """Run a while loop with user-defined computation and loop condition. This operator simulates a while loop which iterately does customized computation as long as the condition is satisfied. `loop_vars` is a Symbol or nested lists of Symbols on which the computation uses. `cond` is a user-defined function, used as the loop condition. It consumes `loop_vars`, and produces a scalar MXNet symbol, indicating the termination of the loop. The loop ends when `cond` returns false (zero). The `cond` is variadic, and its signature should be `cond(*loop_vars) => Symbol`. `func` is a user-defined function, used as the loop body. It also consumes `loop_vars`, and produces `step_output` and `new_loop_vars` at each step. In each step, `step_output` should contain the same number elements. Through all steps, the i-th element of `step_output` should have the same shape and dtype. Also, `new_loop_vars` should contain the same number of elements as `loop_vars`, and the corresponding element should have the same shape and dtype. The `func` is variadic, and its signature should be `func(*loop_vars) => (Symbol or nested List[Symbol] step_output, Symbol or nested List[Symbol] new_loop_vars)`. `max_iterations` is a scalar that defines the maximum number of iterations allowed. This function returns two lists. The first list has the length of `|step_output|`, in which the i-th element are all i-th elements of `step_output` from all steps, stacked along axis 0. The second list has the length of `|loop_vars|`, which represents final states of loop variables. .. warning:: For now, the axis 0 of all Symbols in the first list are `max_iterations`, due to lack of dynamic shape inference. .. warning:: Even if `cond` is never satisfied, while_loop returns a list of outputs with inferred dtype and shape. This is different from the Symbol version, where in this case `step_outputs` are assumed as an empty list. Parameters ---------- cond: a Python function. The loop condition. func: a Python function. The loop body. loop_vars: a Symbol or nested lists of Symbol. The initial values of the loop variables. max_iterations: a python int. Maximum number of iterations. Returns ------ outputs: a Symbol or nested lists of Symbols stacked output from each step states: a Symbol or nested lists of Symbols final state Examples -------- >>> cond = lambda i, s: i <= 5 >>> func = lambda i, s: ([i + s], [i + 1, s + i]) >>> loop_vars = (mx.sym.var('i'), mx.sym.var('s')) >>> outputs, states = mx.sym.contrib.while_loop(cond, func, loop_vars, max_iterations=10) """ def _to_python_scalar(inputs, type_, name): """Converts "inputs", possibly typed mxnet NDArray, a numpy ndarray, other python types, to the given type """ if hasattr(inputs, "asscalar"): inputs = inputs.asscalar() try: inputs = type_(inputs) except: raise ValueError("Cannot convert %s to python %s" % (name, type_.__name__)) return inputs def _cond_wrapper(loop_vars): result = cond(*loop_vars) if not isinstance(result, Symbol): raise ValueError("Return of cond must be a Symbol") return [], [result], [], [] def _func_wrapper(loop_vars): """This wrapper unifies "func: loop_vars -> new_loop_vars" and "func: loop_vars -> (step_output, new_loop_vars)" into "func: loop_vars -> (list of step_outputs, tuple of new_loop_vars) """ step_output, new_loop_vars = func(*loop_vars) if step_output is None: step_output = [] if new_loop_vars is None: new_loop_vars = [] if isinstance(step_output, tuple): step_output = list(step_output) if isinstance(new_loop_vars, tuple): new_loop_vars = list(new_loop_vars) step_output, out_fmt = _flatten(step_output, "while output") new_loop_vars, var_fmt = _flatten(new_loop_vars, "while loop_vars") if len(loop_vars) != len(new_loop_vars): raise ValueError("The number of loop_vars should be consistent during the loop") return step_output, new_loop_vars, out_fmt, var_fmt def _create_subgraph(graph_vars, graph_func, subgraph_name): subgraph_name = _get_unique_subgraph_name(subgraph_name) with AttrScope(__subgraph_name__=subgraph_name): # create new variables with the same name, # them feed them to the given func graph_vars, var_fmt = _flatten(graph_vars, "while loop_vars") new_graph_vars = [symbol.var(_get_sym_uniq_name(sym)) for sym in graph_vars] new_graph_vars, _ = _regroup(new_graph_vars, var_fmt) outputs, final_state, out_fmt, var_fmt = graph_func(new_graph_vars) # first `num_out_data` elements belong to `outputs` # other elements belong to `final_state` num_out_data = len(outputs) num_outputs = len(outputs) + len(final_state) # nnvm cut-graph does not allow inputs and outputs overlap # so we calculate the name of inputs, and copy outputs once it overlaps with inputs # group all outputs of graph_func all_input_names = symbol.Group(outputs + final_state).list_inputs() in_input = lambda x: x.name in all_input_names in_graph = lambda x: x.list_attr().get("__subgraph_name__", "") == subgraph_name make_identity = lambda x: symbol.op.identity(x) if in_input(x) or not in_graph(x) \ else x graph = symbol.Group(list(map(make_identity, outputs + final_state))) return graph, num_out_data, num_outputs, out_fmt, var_fmt flatten_loop_vars, init_loop_var_fmt = _flatten(loop_vars, "while loop_vars") _check_data(flatten_loop_vars, symbol.Symbol, "loop_vars should be a symbol or a nested list of symbols") def _union_inputs(*graphs): # Given a list of graphs, each whose inputs are either from loop_vars or other variables. # 1) calculate a list `inputs`, the union of their inputs. # 2) for each graph, determine in which indices their inputs reside in `inputs` # 3) for each variable in the input of `graph`, find which index it is inputs = [] # List[Symbol], result of 1) locs = [] # List[Tuple(List[Int], List[Int])], a list of tuples, # where tuples are results of 2) and 3) input_id_to_loc = {} # Dict[int, int], given id(sym), input_id_to_loc maps it # to a `loc`, where inputs[loc] = sym for graph in graphs: # some loop_vars are inputs to `graph`, some are not name_to_loop_vars = {_get_sym_uniq_name(sym): sym for sym in flatten_loop_vars} # other inputs to `graph` created by cut_graph name_to_cut_g_syms = {sym.list_outputs()[0]: sym for sym in _cut_subgraph(graph)} # input_syms: all inputs to the `graph` name_to_input_syms = {sym.name: sym for sym in _get_graph_inputs(graph)} # also we collect the mapping from var's name to var's loc in loop_vars name_to_var_locs = {_get_sym_uniq_name(sym): i for i, sym in enumerate(flatten_loop_vars)} # collect arguments for each subgraph input_locs = [] # results from the second step var_locs = [-1] * len(flatten_loop_vars) # results from the third step subg_input_names = graph.list_inputs() assert len(set(subg_input_names)) == len(subg_input_names), \ "The inputs of the subgraph don't have unique names: " + str(subg_input_names) for name in subg_input_names: assert name in name_to_input_syms # it should obviously hold # name -> sym if name in name_to_loop_vars: sym = name_to_loop_vars[name] elif name in name_to_cut_g_syms: sym = name_to_cut_g_syms[name] else: sym = copy.deepcopy(name_to_input_syms[name]) # do 2), and 1) is implicitly done if id(sym) in input_id_to_loc: loc = input_id_to_loc[id(sym)] else: loc = len(input_id_to_loc) inputs.append(sym) input_id_to_loc[id(sym)] = loc input_locs.append(loc) # do 3) if name in name_to_var_locs: var_locs[name_to_var_locs[name]] = len(input_locs) - 1 locs.append((input_locs, var_locs)) return inputs, locs if max_iterations is None: raise ValueError("max_iterations should be specified") max_iterations = _to_python_scalar(max_iterations, int, "max_iteration") # It should be work as fine if loop_vars are empty I guess, # but it is semantically unnecessary to include this case. if len(loop_vars) == 0: raise ValueError("loop_vars should contain at least one element") # create graph for `cond' cond_g, num_out_data, num_outputs, _, _ = \ _create_subgraph(loop_vars, _cond_wrapper, name + "_cond") assert num_out_data == 0 assert num_outputs == 1 # create graph for `func` func_g, num_out_data, num_outputs, out_fmt, _ = \ _create_subgraph(loop_vars, _func_wrapper, name + "_func") # find symbols used in either cond_g or func_g input_syms, ((cond_input_locs, _), (func_input_locs, func_var_locs)) = \ _union_inputs(cond_g, func_g) for i_th, loc in enumerate(func_var_locs, 1): if loc == -1: raise ValueError("The %d-th loop_var doesn't involve into the computation" % i_th) result = symbol._internal._while_loop( cond_g, func_g, *input_syms, max_iterations=max_iterations, cond_input_locs=cond_input_locs, func_input_locs=func_input_locs, func_var_locs=func_var_locs, num_out_data=num_out_data, num_outputs=num_outputs ) outputs = [result[i] for i in range(num_out_data)] outputs, _ = _regroup(outputs, out_fmt) final_loop_vars = [result[i] for i in range(num_out_data, num_outputs)] final_loop_vars, _ = _regroup(final_loop_vars, init_loop_var_fmt) return outputs, final_loop_vars
Run an if-then-else using user-defined condition and computation This operator simulates a if-like branch which chooses to do one of the two customized computations according to the specified condition. `pred` is a scalar MXNet Symbol, indicating which branch of computation should be used. `then_func` is a user-defined function, used as computation of the then branch. It produces `outputs`, which is a list of Symbols. The signature of `then_func` should be `then_func() => nested List[Symbol]`. `else_func` is a user-defined function, used as computation of the else branch. It produces `outputs`, which is a list of Symbols. The signature of `else_func` should be `else_func() => nested List[Symbol]`. The `outputs` produces by `then_func` and `else_func` should have the same number of elements, all of which should be in the same shape, of the same dtype and stype. This function returns a list of symbols, representing the computation result. Parameters ---------- pred: a MXNet Symbol representing a scalar. The branch condition. then_func: a Python function. The computation to be executed if `pred` is true. else_func: a Python function. The computation to be executed if `pred` is false. Returns ------- outputs: a Symbol or nested lists of Symbols, representing the result of computation. Examples -------- >>> a, b = mx.sym.var('a'), mx.sym.var('b') >>> pred = a * b < 5 >>> then_func = lambda: (a + 5) * (b + 5) >>> else_func = lambda: (a - 5) * (b - 5) >>> outputs = mx.sym.contrib.cond(pred, then_func, else_func)
def cond(pred, then_func, else_func, name="cond"): """Run an if-then-else using user-defined condition and computation This operator simulates a if-like branch which chooses to do one of the two customized computations according to the specified condition. `pred` is a scalar MXNet Symbol, indicating which branch of computation should be used. `then_func` is a user-defined function, used as computation of the then branch. It produces `outputs`, which is a list of Symbols. The signature of `then_func` should be `then_func() => nested List[Symbol]`. `else_func` is a user-defined function, used as computation of the else branch. It produces `outputs`, which is a list of Symbols. The signature of `else_func` should be `else_func() => nested List[Symbol]`. The `outputs` produces by `then_func` and `else_func` should have the same number of elements, all of which should be in the same shape, of the same dtype and stype. This function returns a list of symbols, representing the computation result. Parameters ---------- pred: a MXNet Symbol representing a scalar. The branch condition. then_func: a Python function. The computation to be executed if `pred` is true. else_func: a Python function. The computation to be executed if `pred` is false. Returns ------- outputs: a Symbol or nested lists of Symbols, representing the result of computation. Examples -------- >>> a, b = mx.sym.var('a'), mx.sym.var('b') >>> pred = a * b < 5 >>> then_func = lambda: (a + 5) * (b + 5) >>> else_func = lambda: (a - 5) * (b - 5) >>> outputs = mx.sym.contrib.cond(pred, then_func, else_func) """ def _create_subgraph(graph_vars, graph_func, subgraph_name): subgraph_name = _get_unique_subgraph_name(subgraph_name) with AttrScope(__subgraph_name__=subgraph_name): # create new variables with the same name, # them feed them to the given func new_graph_vars = [symbol.var(sym.name) for sym in graph_vars] outputs = graph_func(*new_graph_vars) outputs, out_fmt = _flatten(outputs, "cond outputs") num_outputs = len(outputs) # nnvm cut-graph does not allow inputs and outputs overlap # so we calculate the name of inputs, and copy outputs once it overlaps with inputs # group all outputs of graph_func all_input_names = symbol.Group(outputs).list_inputs() in_input = lambda x: x.name in all_input_names in_graph = lambda x: x.list_attr().get("__subgraph_name__", "") == subgraph_name make_identity = lambda x: symbol.op.identity(x) if in_input(x) or not in_graph(x) \ else x graph = symbol.Group(list(map(make_identity, outputs))) return graph, num_outputs, out_fmt def _union_inputs(*graphs): # Given a list of graphs, each whose inputs are either from input_vars or other variables. # 1) calculate a list `inputs`, the union of their inputs. # 2) for each graph, determine in which indices their inputs reside in `inputs` # 3) for each variable in the input of `graph`, find which index it is inputs = [] # List[Symbol], result of 1) locs = [] # List[Tuple(List[Int], List[Int])], a list of tuples, # where tuples are results of 2) and 3) input_id_to_loc = {} # Dict[int, int], given id(sym), input_id_to_loc maps it # to a `loc`, where inputs[loc] = sym for graph in graphs: # some input_vars are inputs to `graph`, some are not name_to_input_vars = {sym.name: sym for sym in inputs} # other inputs to `graph` created by cut_graph name_to_cut_g_syms = {sym.list_outputs()[0]: sym for sym in _cut_subgraph(graph)} # input_syms: all inputs to the `graph` name_to_input_syms = {sym.name: sym for sym in _get_graph_inputs(graph)} # collect arguments for each subgraph input_locs = [] # results from the second step for name in graph.list_inputs(): assert name in name_to_input_syms # it should obviously hold # name -> sym if name in name_to_input_vars: sym = name_to_input_vars[name] elif name in name_to_cut_g_syms: sym = name_to_cut_g_syms[name] else: sym = copy.deepcopy(name_to_input_syms[name]) # do 2), and 1) is implicitly done if id(sym) in input_id_to_loc: loc = input_id_to_loc[id(sym)] else: loc = len(input_id_to_loc) inputs.append(sym) input_id_to_loc[id(sym)] = loc input_locs.append(loc) locs.append(input_locs) return inputs, locs inputs = [] # create graph for `cond_func' cond_g, cond_num_outputs, _ = _create_subgraph(inputs, lambda: pred, name + "_pred") if cond_num_outputs != 1: raise ValueError("pred should always be a single output") # create graph for `then` then_g, then_num_outputs, then_fmt = _create_subgraph(inputs, then_func, name + "_then") # create graph for `else` else_g, else_num_outputs, _ = _create_subgraph(inputs, else_func, name + "_else") if then_num_outputs != else_num_outputs: raise ValueError("Number of outputs differs between then-branch and else-branch") # find symbols used in either cond_g or func_g input_syms, (cond_input_locs, then_input_locs, else_input_locs) = \ _union_inputs(cond_g, then_g, else_g) result = symbol._internal._cond( # [cond, then_g, else_g, *input_syms] cond_g, then_g, else_g, *input_syms, cond_input_locs=cond_input_locs, then_input_locs=then_input_locs, else_input_locs=else_input_locs, num_outputs=then_num_outputs ) outputs = [result[i] for i in range(then_num_outputs)] outputs, _ = _regroup(outputs, then_fmt) return outputs
Indexes unknown and reserved tokens.
def _index_unknown_and_reserved_tokens(self, unknown_token, reserved_tokens): """Indexes unknown and reserved tokens.""" self._unknown_token = unknown_token # Thus, constants.UNKNOWN_IDX must be 0. self._idx_to_token = [unknown_token] if reserved_tokens is None: self._reserved_tokens = None else: self._reserved_tokens = reserved_tokens[:] self._idx_to_token.extend(reserved_tokens) self._token_to_idx = {token: idx for idx, token in enumerate(self._idx_to_token)}
Indexes keys of `counter`. Indexes keys of `counter` according to frequency thresholds such as `most_freq_count` and `min_freq`.
def _index_counter_keys(self, counter, unknown_token, reserved_tokens, most_freq_count, min_freq): """Indexes keys of `counter`. Indexes keys of `counter` according to frequency thresholds such as `most_freq_count` and `min_freq`. """ assert isinstance(counter, collections.Counter), \ '`counter` must be an instance of collections.Counter.' unknown_and_reserved_tokens = set(reserved_tokens) if reserved_tokens is not None else set() unknown_and_reserved_tokens.add(unknown_token) token_freqs = sorted(counter.items(), key=lambda x: x[0]) token_freqs.sort(key=lambda x: x[1], reverse=True) token_cap = len(unknown_and_reserved_tokens) + ( len(counter) if most_freq_count is None else most_freq_count) for token, freq in token_freqs: if freq < min_freq or len(self._idx_to_token) == token_cap: break if token not in unknown_and_reserved_tokens: self._idx_to_token.append(token) self._token_to_idx[token] = len(self._idx_to_token) - 1
Converts tokens to indices according to the vocabulary. Parameters ---------- tokens : str or list of strs A source token or tokens to be converted. Returns ------- int or list of ints A token index or a list of token indices according to the vocabulary.
def to_indices(self, tokens): """Converts tokens to indices according to the vocabulary. Parameters ---------- tokens : str or list of strs A source token or tokens to be converted. Returns ------- int or list of ints A token index or a list of token indices according to the vocabulary. """ to_reduce = False if not isinstance(tokens, list): tokens = [tokens] to_reduce = True indices = [self.token_to_idx[token] if token in self.token_to_idx else C.UNKNOWN_IDX for token in tokens] return indices[0] if to_reduce else indices
Converts token indices to tokens according to the vocabulary. Parameters ---------- indices : int or list of ints A source token index or token indices to be converted. Returns ------- str or list of strs A token or a list of tokens according to the vocabulary.
def to_tokens(self, indices): """Converts token indices to tokens according to the vocabulary. Parameters ---------- indices : int or list of ints A source token index or token indices to be converted. Returns ------- str or list of strs A token or a list of tokens according to the vocabulary. """ to_reduce = False if not isinstance(indices, list): indices = [indices] to_reduce = True max_idx = len(self.idx_to_token) - 1 tokens = [] for idx in indices: if not isinstance(idx, int) or idx > max_idx: raise ValueError('Token index %d in the provided `indices` is invalid.' % idx) else: tokens.append(self.idx_to_token[idx]) return tokens[0] if to_reduce else tokens
Create an io iterator by handle.
def _make_io_iterator(handle): """Create an io iterator by handle.""" name = ctypes.c_char_p() desc = ctypes.c_char_p() num_args = mx_uint() arg_names = ctypes.POINTER(ctypes.c_char_p)() arg_types = ctypes.POINTER(ctypes.c_char_p)() arg_descs = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXDataIterGetIterInfo( \ handle, ctypes.byref(name), ctypes.byref(desc), \ ctypes.byref(num_args), \ ctypes.byref(arg_names), \ ctypes.byref(arg_types), \ ctypes.byref(arg_descs))) iter_name = py_str(name.value) narg = int(num_args.value) param_str = _build_param_doc( [py_str(arg_names[i]) for i in range(narg)], [py_str(arg_types[i]) for i in range(narg)], [py_str(arg_descs[i]) for i in range(narg)]) doc_str = ('%s\n\n' + '%s\n' + 'Returns\n' + '-------\n' + 'MXDataIter\n'+ ' The result iterator.') doc_str = doc_str % (desc.value, param_str) def creator(*args, **kwargs): """Create an iterator. The parameters listed below can be passed in as keyword arguments. Parameters ---------- name : string, required. Name of the resulting data iterator. Returns ------- dataiter: Dataiter The resulting data iterator. """ param_keys = [] param_vals = [] for k, val in kwargs.items(): param_keys.append(k) param_vals.append(str(val)) # create atomic symbol param_keys = c_str_array(param_keys) param_vals = c_str_array(param_vals) iter_handle = DataIterHandle() check_call(_LIB.MXDataIterCreateIter( handle, mx_uint(len(param_keys)), param_keys, param_vals, ctypes.byref(iter_handle))) if len(args): raise TypeError('%s can only accept keyword arguments' % iter_name) return MXDataIter(iter_handle, **kwargs) creator.__name__ = iter_name creator.__doc__ = doc_str return creator
List and add all the data iterators to current module.
def _init_io_module(): """List and add all the data iterators to current module.""" plist = ctypes.POINTER(ctypes.c_void_p)() size = ctypes.c_uint() check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist))) module_obj = sys.modules[__name__] for i in range(size.value): hdl = ctypes.c_void_p(plist[i]) dataiter = _make_io_iterator(hdl) setattr(module_obj, dataiter.__name__, dataiter)
Get DataDesc list from attribute lists. Parameters ---------- shapes : a tuple of (name_, shape_) types : a tuple of (name_, np.dtype)
def get_list(shapes, types): """Get DataDesc list from attribute lists. Parameters ---------- shapes : a tuple of (name_, shape_) types : a tuple of (name_, np.dtype) """ if types is not None: type_dict = dict(types) return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes] else: return [DataDesc(x[0], x[1]) for x in shapes]
Get next data batch from iterator. Returns ------- DataBatch The data of next batch. Raises ------ StopIteration If the end of the data is reached.
def next(self): """Get next data batch from iterator. Returns ------- DataBatch The data of next batch. Raises ------ StopIteration If the end of the data is reached. """ if self.iter_next(): return DataBatch(data=self.getdata(), label=self.getlabel(), \ pad=self.getpad(), index=self.getindex()) else: raise StopIteration
Ignore roll over data and set to start.
def hard_reset(self): """Ignore roll over data and set to start.""" if self.shuffle: self._shuffle_data() self.cursor = -self.batch_size self._cache_data = None self._cache_label = None
Resets the iterator to the beginning of the data.
def reset(self): """Resets the iterator to the beginning of the data.""" if self.shuffle: self._shuffle_data() # the range below indicate the last batch if self.last_batch_handle == 'roll_over' and \ self.num_data - self.batch_size < self.cursor < self.num_data: # (self.cursor - self.num_data) represents the data we have for the last batch self.cursor = self.cursor - self.num_data - self.batch_size else: self.cursor = -self.batch_size
Increments the coursor by batch_size for next batch and check current cursor if it exceed the number of data points.
def iter_next(self): """Increments the coursor by batch_size for next batch and check current cursor if it exceed the number of data points.""" self.cursor += self.batch_size return self.cursor < self.num_data
Returns the next batch of data.
def next(self): """Returns the next batch of data.""" if not self.iter_next(): raise StopIteration data = self.getdata() label = self.getlabel() # iter should stop when last batch is not complete if data[0].shape[0] != self.batch_size: # in this case, cache it for next epoch self._cache_data = data self._cache_label = label raise StopIteration return DataBatch(data=data, label=label, \ pad=self.getpad(), index=None)
Load data from underlying arrays.
def _getdata(self, data_source, start=None, end=None): """Load data from underlying arrays.""" assert start is not None or end is not None, 'should at least specify start or end' start = start if start is not None else 0 if end is None: end = data_source[0][1].shape[0] if data_source else 0 s = slice(start, end) return [ x[1][s] if isinstance(x[1], (np.ndarray, NDArray)) else # h5py (only supports indices in increasing order) array(x[1][sorted(self.idx[s])][[ list(self.idx[s]).index(i) for i in sorted(self.idx[s]) ]]) for x in data_source ]
Helper function to concat two NDArrays.
def _concat(self, first_data, second_data): """Helper function to concat two NDArrays.""" assert len(first_data) == len( second_data), 'data source should contain the same size' if first_data and second_data: return [ concat( first_data[x], second_data[x], dim=0 ) for x in range(len(first_data)) ] elif (not first_data) and (not second_data): return [] else: return [ first_data[0] if first_data else second_data[0] for x in range(len(first_data)) ]
Load data from underlying arrays, internal use only.
def _batchify(self, data_source): """Load data from underlying arrays, internal use only.""" assert self.cursor < self.num_data, 'DataIter needs reset.' # first batch of next epoch with 'roll_over' if self.last_batch_handle == 'roll_over' and \ -self.batch_size < self.cursor < 0: assert self._cache_data is not None or self._cache_label is not None, \ 'next epoch should have cached data' cache_data = self._cache_data if self._cache_data is not None else self._cache_label second_data = self._getdata( data_source, end=self.cursor + self.batch_size) if self._cache_data is not None: self._cache_data = None else: self._cache_label = None return self._concat(cache_data, second_data) # last batch with 'pad' elif self.last_batch_handle == 'pad' and \ self.cursor + self.batch_size > self.num_data: pad = self.batch_size - self.num_data + self.cursor first_data = self._getdata(data_source, start=self.cursor) second_data = self._getdata(data_source, end=pad) return self._concat(first_data, second_data) # normal case else: if self.cursor + self.batch_size < self.num_data: end_idx = self.cursor + self.batch_size # get incomplete last batch else: end_idx = self.num_data return self._getdata(data_source, self.cursor, end_idx)
Get pad value of DataBatch.
def getpad(self): """Get pad value of DataBatch.""" if self.last_batch_handle == 'pad' and \ self.cursor + self.batch_size > self.num_data: return self.cursor + self.batch_size - self.num_data # check the first batch elif self.last_batch_handle == 'roll_over' and \ -self.batch_size < self.cursor < 0: return -self.cursor else: return 0
Shuffle the data.
def _shuffle_data(self): """Shuffle the data.""" # shuffle index np.random.shuffle(self.idx) # get the data by corresponding index self.data = _getdata_by_idx(self.data, self.idx) self.label = _getdata_by_idx(self.label, self.idx)
Given a quantized symbol and a dict of params that have not been quantized, generate quantized params. Currently only supports quantizing the arg_params with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols that are excluded from being quantized, their corresponding params will not be quantized, but saved together with quantized params of the symbols that have been quantized. Parameters ---------- qsym : Symbol Quantized symbol from FP32 symbol. params : dict of str->NDArray th_dict: dict of min/max pairs of layers' output
def _quantize_params(qsym, params, th_dict): """Given a quantized symbol and a dict of params that have not been quantized, generate quantized params. Currently only supports quantizing the arg_params with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols that are excluded from being quantized, their corresponding params will not be quantized, but saved together with quantized params of the symbols that have been quantized. Parameters ---------- qsym : Symbol Quantized symbol from FP32 symbol. params : dict of str->NDArray th_dict: dict of min/max pairs of layers' output """ inputs_name = qsym.list_arguments() quantized_params = {} for name in inputs_name: if name.endswith(('weight_quantize', 'bias_quantize')): original_name = name[:-len('_quantize')] param = params[original_name] val, vmin, vmax = ndarray.contrib.quantize(data=param, min_range=ndarray.min(param), max_range=ndarray.max(param), out_type='int8') quantized_params[name] = val quantized_params[name+'_min'] = vmin quantized_params[name+'_max'] = vmax elif name in params: quantized_params[name] = params[name] elif name.endswith(('_min')): output = name[: - len('_min')] if output in th_dict: quantized_params[name] = ndarray.array([th_dict[output][0]]) elif name.endswith(('_max')): output = name[: - len('_min')] if output in th_dict: quantized_params[name] = ndarray.array([th_dict[output][1]]) return quantized_params
Given a symbol object representing a neural network of data type FP32, quantize it into a INT8 network. Parameters ---------- sym : Symbol FP32 neural network symbol. excluded_sym_names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized. offline_params : list of strs Names of the parameters that users want to quantize offline. It's always recommended to quantize parameters offline so that quantizing parameters during the inference can be avoided. quantized_dtype: str The quantized destination type for input data.
def _quantize_symbol(sym, excluded_symbols=None, offline_params=None, quantized_dtype='int8'): """Given a symbol object representing a neural network of data type FP32, quantize it into a INT8 network. Parameters ---------- sym : Symbol FP32 neural network symbol. excluded_sym_names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized. offline_params : list of strs Names of the parameters that users want to quantize offline. It's always recommended to quantize parameters offline so that quantizing parameters during the inference can be avoided. quantized_dtype: str The quantized destination type for input data. """ num_excluded_symbols = 0 if excluded_symbols is not None: assert isinstance(excluded_symbols, list) num_excluded_symbols = len(excluded_symbols) else: excluded_symbols = [] num_offline = 0 offline = [] if offline_params is not None: num_offline = len(offline_params) for k in offline_params: offline.append(c_str(k)) out = SymbolHandle() check_call(_LIB.MXQuantizeSymbol(sym.handle, ctypes.byref(out), mx_uint(num_excluded_symbols), c_str_array(excluded_symbols), mx_uint(num_offline), c_array(ctypes.c_char_p, offline), c_str(quantized_dtype), ctypes.c_bool(True))) return Symbol(out)
Given a dictionary containing the thresholds for quantizing the layers, set the thresholds into the quantized symbol as the params of requantize operators.
def _calibrate_quantized_sym(qsym, th_dict): """Given a dictionary containing the thresholds for quantizing the layers, set the thresholds into the quantized symbol as the params of requantize operators. """ if th_dict is None or len(th_dict) == 0: return qsym num_layer_outputs = len(th_dict) layer_output_names = [] min_vals = [] max_vals = [] for k, v in th_dict.items(): layer_output_names.append(k) min_vals.append(v[0]) max_vals.append(v[1]) calibrated_sym = SymbolHandle() check_call(_LIB.MXSetCalibTableToQuantizedSymbol(qsym.handle, mx_uint(num_layer_outputs), c_str_array(layer_output_names), c_array(ctypes.c_float, min_vals), c_array(ctypes.c_float, max_vals), ctypes.byref(calibrated_sym))) return Symbol(calibrated_sym)
Collect min and max values from layer outputs and save them in a dictionary mapped by layer names.
def _collect_layer_output_min_max(mod, data, include_layer=None, max_num_examples=None, logger=None): """Collect min and max values from layer outputs and save them in a dictionary mapped by layer names. """ collector = _LayerOutputMinMaxCollector(include_layer=include_layer, logger=logger) num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger) return collector.min_max_dict, num_examples
Collect layer outputs and save them in a dictionary mapped by layer names.
def _collect_layer_outputs(mod, data, include_layer=None, max_num_examples=None, logger=None): """Collect layer outputs and save them in a dictionary mapped by layer names.""" collector = _LayerOutputCollector(include_layer=include_layer, logger=logger) num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger) return collector.nd_dict, num_examples
Given a discrete distribution (may have not been normalized to 1), smooth it by replacing zeros with eps multiplied by a scaling factor and taking the corresponding amount off the non-zero values. Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf
def _smooth_distribution(p, eps=0.0001): """Given a discrete distribution (may have not been normalized to 1), smooth it by replacing zeros with eps multiplied by a scaling factor and taking the corresponding amount off the non-zero values. Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf """ is_zeros = (p == 0).astype(np.float32) is_nonzeros = (p != 0).astype(np.float32) n_zeros = is_zeros.sum() n_nonzeros = p.size - n_zeros if not n_nonzeros: raise ValueError('The discrete probability distribution is malformed. All entries are 0.') eps1 = eps * float(n_zeros) / float(n_nonzeros) assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1) hist = p.astype(np.float32) hist += eps * is_zeros + (-eps1) * is_nonzeros assert (hist <= 0).sum() == 0 return hist
Given a dataset, find the optimal threshold for quantizing it. The reference distribution is `q`, and the candidate distribution is `p`. `q` is a truncated version of the original distribution. Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
def _get_optimal_threshold(arr, quantized_dtype, num_bins=8001, num_quantized_bins=255): """Given a dataset, find the optimal threshold for quantizing it. The reference distribution is `q`, and the candidate distribution is `p`. `q` is a truncated version of the original distribution. Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf """ if isinstance(arr, NDArray): arr = arr.asnumpy() elif isinstance(arr, list): assert len(arr) != 0 for i, nd in enumerate(arr): if isinstance(nd, NDArray): arr[i] = nd.asnumpy() elif not isinstance(nd, np.ndarray): raise TypeError('get_optimal_threshold only supports input type of NDArray,' ' list of np.ndarrays or NDArrays, and np.ndarray,' ' while received type=%s' % (str(type(nd)))) arr = np.concatenate(arr) elif not isinstance(arr, np.ndarray): raise TypeError('get_optimal_threshold only supports input type of NDArray,' ' list of NDArrays and np.ndarray,' ' while received type=%s' % (str(type(arr)))) min_val = np.min(arr) max_val = np.max(arr) th = max(abs(min_val), abs(max_val)) if min_val >= 0 and quantized_dtype in ['auto', 'uint8']: # We need to move negative bins to positive bins to fit uint8 range. num_quantized_bins = num_quantized_bins * 2 + 1 hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-th, th)) zero_bin_idx = num_bins // 2 num_half_quantized_bins = num_quantized_bins // 2 thresholds = np.zeros(num_bins // 2 + 1 - num_quantized_bins // 2) divergence = np.zeros_like(thresholds) quantized_bins = np.zeros(num_quantized_bins, dtype=np.int32) # i means the number of bins on half axis excluding the zero bin. for i in range(num_quantized_bins // 2, num_bins // 2 + 1): p_bin_idx_start = zero_bin_idx - i p_bin_idx_stop = zero_bin_idx + i + 1 thresholds[i - num_half_quantized_bins] = hist_edges[p_bin_idx_stop] sliced_nd_hist = hist[p_bin_idx_start:p_bin_idx_stop] # generate reference distribution p p = sliced_nd_hist.copy() assert p.size % 2 == 1 assert p.size >= num_quantized_bins # put left outlier count in p[0] left_outlier_count = np.sum(hist[0:p_bin_idx_start]) p[0] += left_outlier_count # put right outlier count in p[-1] right_outlier_count = np.sum(hist[p_bin_idx_stop:]) p[-1] += right_outlier_count # is_nonzeros[k] indicates whether hist[k] is nonzero is_nonzeros = (p != 0).astype(np.int32) # calculate how many bins should be merged to generate quantized distribution q num_merged_bins = sliced_nd_hist.size // num_quantized_bins # merge hist into num_quantized_bins bins for j in range(num_quantized_bins): start = j * num_merged_bins stop = start + num_merged_bins quantized_bins[j] = sliced_nd_hist[start:stop].sum() quantized_bins[-1] += sliced_nd_hist[num_quantized_bins * num_merged_bins:].sum() # expand quantized_bins into p.size bins q = np.zeros(sliced_nd_hist.size, dtype=np.float32) for j in range(num_quantized_bins): start = j * num_merged_bins if j == num_quantized_bins - 1: stop = len(is_nonzeros) else: stop = start + num_merged_bins norm = is_nonzeros[start:stop].sum() if norm != 0: q[start:stop] = float(quantized_bins[j]) / float(norm) q[p == 0] = 0 p = _smooth_distribution(p) # There is a chance that q is an invalid probability distribution. try: q = _smooth_distribution(q) except ValueError: divergence[i - num_half_quantized_bins] = float("inf") divergence[i - num_half_quantized_bins] = stats.entropy(p, q) min_divergence_idx = np.argmin(divergence) min_divergence = divergence[min_divergence_idx] opt_th = thresholds[min_divergence_idx] return min_val, max_val, min_divergence, opt_th
Given a ndarray dict, find the optimal threshold for quantizing each value of the key.
def _get_optimal_thresholds(nd_dict, quantized_dtype, num_bins=8001, num_quantized_bins=255, logger=None): """Given a ndarray dict, find the optimal threshold for quantizing each value of the key.""" if stats is None: raise ImportError('scipy.stats is required for running entropy mode of calculating' ' the optimal thresholds for quantizing FP32 ndarrays into int8.' ' Please check if the scipy python bindings are installed.') assert isinstance(nd_dict, dict) if logger is not None: logger.info('Calculating optimal thresholds for quantization using KL divergence' ' with num_bins=%d and num_quantized_bins=%d' % (num_bins, num_quantized_bins)) th_dict = {} # copy nd_dict keys since the keys() only returns a view in python3 layer_names = list(nd_dict.keys()) for name in layer_names: assert name in nd_dict min_val, max_val, min_divergence, opt_th = \ _get_optimal_threshold(nd_dict[name], quantized_dtype, num_bins=num_bins, num_quantized_bins=num_quantized_bins) del nd_dict[name] # release the memory of ndarray if min_val < 0: th_dict[name] = (-opt_th, opt_th) else: th_dict[name] = (0, opt_th) if logger is not None: logger.info('layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f' % (name, min_val, max_val, min_divergence, opt_th)) return th_dict
Given a str as a path the symbol .json file or a symbol, returns a Symbol object.
def _load_sym(sym, logger=logging): """Given a str as a path the symbol .json file or a symbol, returns a Symbol object.""" if isinstance(sym, str): # sym is a symbol file path cur_path = os.path.dirname(os.path.realpath(__file__)) symbol_file_path = os.path.join(cur_path, sym) logger.info('Loading symbol from file %s' % symbol_file_path) return sym_load(symbol_file_path) elif isinstance(sym, Symbol): return sym else: raise ValueError('_load_sym only accepts Symbol or path to the symbol file,' ' while received type %s' % str(type(sym)))
Given a str as a path to the .params file or a pair of params, returns two dictionaries representing arg_params and aux_params.
def _load_params(params, logger=logging): """Given a str as a path to the .params file or a pair of params, returns two dictionaries representing arg_params and aux_params. """ if isinstance(params, str): cur_path = os.path.dirname(os.path.realpath(__file__)) param_file_path = os.path.join(cur_path, params) logger.info('Loading params from file %s' % param_file_path) save_dict = nd_load(param_file_path) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v return arg_params, aux_params elif isinstance(params, (tuple, list)) and len(params) == 2: return params[0], params[1] else: raise ValueError('Unsupported params provided. Must be either a path to the param file or' ' a pair of dictionaries representing arg_params and aux_params')
User-level API for generating a quantized model from a FP32 model w/ or w/o calibration. The backend quantized operators are only enabled for Linux systems. Please do not run inference using the quantized models on Windows for now. The quantization implementation adopts the TensorFlow's approach: https://www.tensorflow.org/performance/quantization. The calibration implementation borrows the idea of Nvidia's 8-bit Inference with TensorRT: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf and adapts the method to MXNet. Parameters ---------- sym : str or Symbol Defines the structure of a neural network for FP32 data types. arg_params : dict Dictionary of name to `NDArray`. aux_params : dict Dictionary of name to `NDArray`. data_names : a list of strs Data names required for creating a Module object to run forward propagation on the calibration dataset. label_names : a list of strs Label names required for creating a Module object to run forward propagation on the calibration dataset. ctx : Context Defines the device that users want to run forward propagation on the calibration dataset for collecting layer output statistics. Currently, only supports single context. excluded_sym_names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized. calib_mode : str If calib_mode='none', no calibration will be used and the thresholds for requantization after the corresponding layers will be calculated at runtime by calling min and max operators. The quantized models generated in this mode are normally 10-20% slower than those with calibrations during inference. If calib_mode='naive', the min and max values of the layer outputs from a calibration dataset will be directly taken as the thresholds for quantization. If calib_mode='entropy' (default mode), the thresholds for quantization will be derived such that the KL divergence between the distributions of FP32 layer outputs and quantized layer outputs is minimized based upon the calibration dataset. calib_data : DataIter A data iterator initialized by the calibration dataset. num_calib_examples : int or None The maximum number of examples that user would like to use for calibration. If not provided, the whole calibration dataset will be used. calib_layer : function Given a layer's output name in string, return True or False for deciding whether to calibrate this layer. If yes, the statistics of the layer's output will be collected; otherwise, no information of the layer's output will be collected. If not provided, all the layers' outputs that need requantization will be collected. quantized_dtype : str The quantized destination type for input data. Currently support 'int8' , 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result. Default value is 'int8'. logger : Object A logging object for printing information during the process of quantization. Returns ------- tuple A tuple of quantized symbol, quantized arg_params, and aux_params. -------
def quantize_model(sym, arg_params, aux_params, data_names=('data',), label_names=('softmax_label',), ctx=cpu(), excluded_sym_names=None, calib_mode='entropy', calib_data=None, num_calib_examples=None, calib_layer=None, quantized_dtype='int8', logger=logging): """User-level API for generating a quantized model from a FP32 model w/ or w/o calibration. The backend quantized operators are only enabled for Linux systems. Please do not run inference using the quantized models on Windows for now. The quantization implementation adopts the TensorFlow's approach: https://www.tensorflow.org/performance/quantization. The calibration implementation borrows the idea of Nvidia's 8-bit Inference with TensorRT: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf and adapts the method to MXNet. Parameters ---------- sym : str or Symbol Defines the structure of a neural network for FP32 data types. arg_params : dict Dictionary of name to `NDArray`. aux_params : dict Dictionary of name to `NDArray`. data_names : a list of strs Data names required for creating a Module object to run forward propagation on the calibration dataset. label_names : a list of strs Label names required for creating a Module object to run forward propagation on the calibration dataset. ctx : Context Defines the device that users want to run forward propagation on the calibration dataset for collecting layer output statistics. Currently, only supports single context. excluded_sym_names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized. calib_mode : str If calib_mode='none', no calibration will be used and the thresholds for requantization after the corresponding layers will be calculated at runtime by calling min and max operators. The quantized models generated in this mode are normally 10-20% slower than those with calibrations during inference. If calib_mode='naive', the min and max values of the layer outputs from a calibration dataset will be directly taken as the thresholds for quantization. If calib_mode='entropy' (default mode), the thresholds for quantization will be derived such that the KL divergence between the distributions of FP32 layer outputs and quantized layer outputs is minimized based upon the calibration dataset. calib_data : DataIter A data iterator initialized by the calibration dataset. num_calib_examples : int or None The maximum number of examples that user would like to use for calibration. If not provided, the whole calibration dataset will be used. calib_layer : function Given a layer's output name in string, return True or False for deciding whether to calibrate this layer. If yes, the statistics of the layer's output will be collected; otherwise, no information of the layer's output will be collected. If not provided, all the layers' outputs that need requantization will be collected. quantized_dtype : str The quantized destination type for input data. Currently support 'int8' , 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result. Default value is 'int8'. logger : Object A logging object for printing information during the process of quantization. Returns ------- tuple A tuple of quantized symbol, quantized arg_params, and aux_params. ------- """ if excluded_sym_names is None: excluded_sym_names = [] if not isinstance(excluded_sym_names, list): raise ValueError('excluded_sym_names must be a list of strings representing' ' the names of the symbols that will not be quantized,' ' while received type %s' % str(type(excluded_sym_names))) logger.info('Quantizing symbol') if quantized_dtype not in ('int8', 'uint8', 'auto'): raise ValueError('unknown quantized_dtype %s received,' ' expected `int8`, `uint8` or `auto`' % quantized_dtype) qsym = _quantize_symbol(sym, excluded_symbols=excluded_sym_names, offline_params=list(arg_params.keys()), quantized_dtype=quantized_dtype) th_dict = {} if calib_mode is not None and calib_mode != 'none': if not isinstance(ctx, Context): raise ValueError('currently only supports single ctx, while received %s' % str(ctx)) if calib_data is None: raise ValueError('calib_data must be provided when calib_mode=%s' % calib_mode) if not isinstance(calib_data, DataIter): raise ValueError('calib_data must be of DataIter type when calib_mode=%s,' ' while received type %s' % (calib_mode, str(type(calib_data)))) mod = Module(symbol=sym, data_names=data_names, label_names=label_names, context=ctx) if len(calib_data.provide_label) > 0: mod.bind(for_training=False, data_shapes=calib_data.provide_data, label_shapes=calib_data.provide_label) else: mod.bind(for_training=False, data_shapes=calib_data.provide_data) mod.set_params(arg_params, aux_params) if calib_mode == 'entropy': nd_dict, num_examples = _collect_layer_outputs(mod, calib_data, include_layer=calib_layer, max_num_examples=num_calib_examples, logger=logger) logger.info('Collected layer outputs from FP32 model using %d examples' % num_examples) logger.info('Calculating optimal thresholds for quantization') th_dict = _get_optimal_thresholds(nd_dict, quantized_dtype, logger=logger) elif calib_mode == 'naive': th_dict, num_examples = _collect_layer_output_min_max( mod, calib_data, include_layer=calib_layer, max_num_examples=num_calib_examples, logger=logger) logger.info('Collected layer output min/max values from FP32 model using %d examples' % num_examples) else: raise ValueError('unknown calibration mode %s received,' ' expected `none`, `naive`, or `entropy`' % calib_mode) logger.info('Calibrating quantized symbol') qsym = _calibrate_quantized_sym(qsym, th_dict) logger.info('Quantizing parameters') qarg_params = _quantize_params(qsym, arg_params, th_dict) return qsym, qarg_params, aux_params
Callback function for collecting layer output NDArrays.
def collect(self, name, arr): """Callback function for collecting layer output NDArrays.""" name = py_str(name) if self.include_layer is not None and not self.include_layer(name): return handle = ctypes.cast(arr, NDArrayHandle) arr = NDArray(handle, writable=False).copyto(cpu()) if self.logger is not None: self.logger.info("Collecting layer %s output of shape %s" % (name, arr.shape)) if name in self.nd_dict: self.nd_dict[name].append(arr) else: self.nd_dict[name] = [arr]
Callback function for collecting min and max values from an NDArray.
def collect(self, name, arr): """Callback function for collecting min and max values from an NDArray.""" name = py_str(name) if self.include_layer is not None and not self.include_layer(name): return handle = ctypes.cast(arr, NDArrayHandle) arr = NDArray(handle, writable=False) min_range = ndarray.min(arr).asscalar() max_range = ndarray.max(arr).asscalar() if name in self.min_max_dict: cur_min_max = self.min_max_dict[name] self.min_max_dict[name] = (min(cur_min_max[0], min_range), max(cur_min_max[1], max_range)) else: self.min_max_dict[name] = (min_range, max_range) if self.logger is not None: self.logger.info("Collecting layer %s min_range=%f, max_range=%f" % (name, min_range, max_range))
The encoder is a CNN which takes 32x32 image as input generates the 100 dimensional shape embedding as a sample from normal distribution using predicted meand and variance
def encoder(nef, z_dim, batch_size, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): '''The encoder is a CNN which takes 32x32 image as input generates the 100 dimensional shape embedding as a sample from normal distribution using predicted meand and variance ''' BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') e1 = mx.sym.Convolution(data, name='enc1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef, no_bias=no_bias) ebn1 = BatchNorm(e1, name='encbn1', fix_gamma=fix_gamma, eps=eps) eact1 = mx.sym.LeakyReLU(ebn1, name='encact1', act_type='leaky', slope=0.2) e2 = mx.sym.Convolution(eact1, name='enc2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*2, no_bias=no_bias) ebn2 = BatchNorm(e2, name='encbn2', fix_gamma=fix_gamma, eps=eps) eact2 = mx.sym.LeakyReLU(ebn2, name='encact2', act_type='leaky', slope=0.2) e3 = mx.sym.Convolution(eact2, name='enc3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*4, no_bias=no_bias) ebn3 = BatchNorm(e3, name='encbn3', fix_gamma=fix_gamma, eps=eps) eact3 = mx.sym.LeakyReLU(ebn3, name='encact3', act_type='leaky', slope=0.2) e4 = mx.sym.Convolution(eact3, name='enc4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*8, no_bias=no_bias) ebn4 = BatchNorm(e4, name='encbn4', fix_gamma=fix_gamma, eps=eps) eact4 = mx.sym.LeakyReLU(ebn4, name='encact4', act_type='leaky', slope=0.2) eact4 = mx.sym.Flatten(eact4) z_mu = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_mu") z_lv = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_lv") z = z_mu + mx.symbol.broadcast_mul(mx.symbol.exp(0.5*z_lv),mx.symbol.random_normal(loc=0, scale=1,shape=(batch_size,z_dim))) return z_mu, z_lv, z
The genrator is a CNN which takes 100 dimensional embedding as input and reconstructs the input image given to the encoder
def generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim=100, activation='sigmoid'): '''The genrator is a CNN which takes 100 dimensional embedding as input and reconstructs the input image given to the encoder ''' BatchNorm = mx.sym.BatchNorm rand = mx.sym.Variable('rand') rand = mx.sym.Reshape(rand, shape=(-1, z_dim, 1, 1)) g1 = mx.sym.Deconvolution(rand, name='gen1', kernel=(5,5), stride=(2,2),target_shape=(2,2), num_filter=ngf*8, no_bias=no_bias) gbn1 = BatchNorm(g1, name='genbn1', fix_gamma=fix_gamma, eps=eps) gact1 = mx.sym.Activation(gbn1, name="genact1", act_type="relu") g2 = mx.sym.Deconvolution(gact1, name='gen2', kernel=(5,5), stride=(2,2),target_shape=(4,4), num_filter=ngf*4, no_bias=no_bias) gbn2 = BatchNorm(g2, name='genbn2', fix_gamma=fix_gamma, eps=eps) gact2 = mx.sym.Activation(gbn2, name='genact2', act_type='relu') g3 = mx.sym.Deconvolution(gact2, name='gen3', kernel=(5,5), stride=(2,2), target_shape=(8,8), num_filter=ngf*2, no_bias=no_bias) gbn3 = BatchNorm(g3, name='genbn3', fix_gamma=fix_gamma, eps=eps) gact3 = mx.sym.Activation(gbn3, name='genact3', act_type='relu') g4 = mx.sym.Deconvolution(gact3, name='gen4', kernel=(5,5), stride=(2,2), target_shape=(16,16), num_filter=ngf, no_bias=no_bias) gbn4 = BatchNorm(g4, name='genbn4', fix_gamma=fix_gamma, eps=eps) gact4 = mx.sym.Activation(gbn4, name='genact4', act_type='relu') g5 = mx.sym.Deconvolution(gact4, name='gen5', kernel=(5,5), stride=(2,2), target_shape=(32,32), num_filter=nc, no_bias=no_bias) gout = mx.sym.Activation(g5, name='genact5', act_type=activation) return gout
First part of the discriminator which takes a 32x32 image as input and output a convolutional feature map, this is required to calculate the layer loss
def discriminator1(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): '''First part of the discriminator which takes a 32x32 image as input and output a convolutional feature map, this is required to calculate the layer loss''' BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') d1 = mx.sym.Convolution(data, name='d1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf, no_bias=no_bias) dact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2) d2 = mx.sym.Convolution(dact1, name='d2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*2, no_bias=no_bias) dbn2 = BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=eps) dact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2) d3 = mx.sym.Convolution(dact2, name='d3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*4, no_bias=no_bias) dbn3 = BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=eps) dact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2) return dact3
Second part of the discriminator which takes a 256x8x8 feature map as input and generates the loss based on whether the input image was a real one or fake one
def discriminator2(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): '''Second part of the discriminator which takes a 256x8x8 feature map as input and generates the loss based on whether the input image was a real one or fake one''' BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') label = mx.sym.Variable('label') d4 = mx.sym.Convolution(data, name='d4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*8, no_bias=no_bias) dbn4 = BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps) dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2) h = mx.sym.Flatten(dact4) d5 = mx.sym.FullyConnected(h, num_hidden=1, name="d5") dloss = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss') return dloss
GaussianLogDensity loss calculation for layer wise loss
def GaussianLogDensity(x, mu, log_var, name='GaussianLogDensity', EPSILON = 1e-6): '''GaussianLogDensity loss calculation for layer wise loss ''' c = mx.sym.ones_like(log_var)*2.0 * 3.1416 c = mx.symbol.log(c) var = mx.sym.exp(log_var) x_mu2 = mx.symbol.square(x - mu) # [Issue] not sure the dim works or not? x_mu2_over_var = mx.symbol.broadcast_div(x_mu2, var + EPSILON) log_prob = -0.5 * (c + log_var + x_mu2_over_var) log_prob = mx.symbol.sum(log_prob, axis=1, name=name) # keep_dims=True, return log_prob
Calculate the discriminator layer loss
def DiscriminatorLayerLoss(): '''Calculate the discriminator layer loss ''' data = mx.sym.Variable('data') label = mx.sym.Variable('label') data = mx.sym.Flatten(data) label = mx.sym.Flatten(label) label = mx.sym.BlockGrad(label) zeros = mx.sym.zeros_like(data) output = -GaussianLogDensity(label, data, zeros) dloss = mx.symbol.MakeLoss(mx.symbol.mean(output),name='lloss') return dloss
KLDivergenceLoss loss
def KLDivergenceLoss(): '''KLDivergenceLoss loss ''' data = mx.sym.Variable('data') mu1, lv1 = mx.sym.split(data, num_outputs=2, axis=0) mu2 = mx.sym.zeros_like(mu1) lv2 = mx.sym.zeros_like(lv1) v1 = mx.sym.exp(lv1) v2 = mx.sym.exp(lv2) mu_diff_sq = mx.sym.square(mu1 - mu2) dimwise_kld = .5 * ( (lv2 - lv1) + mx.symbol.broadcast_div(v1, v2) + mx.symbol.broadcast_div(mu_diff_sq, v2) - 1.) KL = mx.symbol.sum(dimwise_kld, axis=1) KLloss = mx.symbol.MakeLoss(mx.symbol.mean(KL),name='KLloss') return KLloss
Get the dataset
def get_data(path, activation): '''Get the dataset ''' data = [] image_names = [] for filename in os.listdir(path): img = cv2.imread(os.path.join(path,filename), cv2.IMREAD_GRAYSCALE) image_names.append(filename) if img is not None: data.append(img) data = np.asarray(data) if activation == 'sigmoid': data = data.astype(np.float32)/(255.0) elif activation == 'tanh': data = data.astype(np.float32)/(255.0/2) - 1.0 data = data.reshape((data.shape[0], 1, data.shape[1], data.shape[2])) np.random.seed(1234) p = np.random.permutation(data.shape[0]) X = data[p] return X, image_names
fill the ith grid of the buffer matrix with the values from the img buf : buffer matrix i : serial of the image in the 2D grid img : image data shape : ( height width depth ) of image
def fill_buf(buf, i, img, shape): '''fill the ith grid of the buffer matrix with the values from the img buf : buffer matrix i : serial of the image in the 2D grid img : image data shape : ( height width depth ) of image''' # grid height is a multiple of individual image height m = buf.shape[0]/shape[0] sx = (i%m)*shape[1] sy = (i//m)*shape[0] sx = int(sx) sy = int(sy) buf[sy:sy+shape[0], sx:sx+shape[1], :] = img
create a grid of images and save it as a final image title : grid image name X : array of images
def visual(title, X, activation): '''create a grid of images and save it as a final image title : grid image name X : array of images ''' assert len(X.shape) == 4 X = X.transpose((0, 2, 3, 1)) if activation == 'sigmoid': X = np.clip((X)*(255.0), 0, 255).astype(np.uint8) elif activation == 'tanh': X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8) n = np.ceil(np.sqrt(X.shape[0])) buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8) for i, img in enumerate(X): fill_buf(buff, i, img, X.shape[1:3]) cv2.imwrite('%s.jpg' % (title), buff)
adversarial training of the VAE
def train(dataset, nef, ndf, ngf, nc, batch_size, Z, lr, beta1, epsilon, ctx, check_point, g_dl_weight, output_path, checkpoint_path, data_path, activation,num_epoch, save_after_every, visualize_after_every, show_after_every): '''adversarial training of the VAE ''' #encoder z_mu, z_lv, z = encoder(nef, Z, batch_size) symE = mx.sym.Group([z_mu, z_lv, z]) #generator symG = generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim = Z, activation=activation ) #discriminator h = discriminator1(ndf) dloss = discriminator2(ndf) symD1 = h symD2 = dloss # ==============data============== X_train, _ = get_data(data_path, activation) train_iter = mx.io.NDArrayIter(X_train, batch_size=batch_size, shuffle=True) rand_iter = RandIter(batch_size, Z) label = mx.nd.zeros((batch_size,), ctx=ctx) # =============module E============= modE = mx.mod.Module(symbol=symE, data_names=('data',), label_names=None, context=ctx) modE.bind(data_shapes=train_iter.provide_data) modE.init_params(initializer=mx.init.Normal(0.02)) modE.init_optimizer( optimizer='adam', optimizer_params={ 'learning_rate': lr, 'wd': 1e-6, 'beta1': beta1, 'epsilon': epsilon, 'rescale_grad': (1.0/batch_size) }) mods = [modE] # =============module G============= modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx) modG.bind(data_shapes=rand_iter.provide_data, inputs_need_grad=True) modG.init_params(initializer=mx.init.Normal(0.02)) modG.init_optimizer( optimizer='adam', optimizer_params={ 'learning_rate': lr, 'wd': 1e-6, 'beta1': beta1, 'epsilon': epsilon, }) mods.append(modG) # =============module D============= modD1 = mx.mod.Module(symD1, label_names=[], context=ctx) modD2 = mx.mod.Module(symD2, label_names=('label',), context=ctx) modD = mx.mod.SequentialModule() modD.add(modD1).add(modD2, take_labels=True, auto_wiring=True) modD.bind(data_shapes=train_iter.provide_data, label_shapes=[('label', (batch_size,))], inputs_need_grad=True) modD.init_params(initializer=mx.init.Normal(0.02)) modD.init_optimizer( optimizer='adam', optimizer_params={ 'learning_rate': lr, 'wd': 1e-3, 'beta1': beta1, 'epsilon': epsilon, 'rescale_grad': (1.0/batch_size) }) mods.append(modD) # =============module DL============= symDL = DiscriminatorLayerLoss() modDL = mx.mod.Module(symbol=symDL, data_names=('data',), label_names=('label',), context=ctx) modDL.bind(data_shapes=[('data', (batch_size,nef * 4,4,4))], ################################################################################################################################ fix 512 here label_shapes=[('label', (batch_size,nef * 4,4,4))], inputs_need_grad=True) modDL.init_params(initializer=mx.init.Normal(0.02)) modDL.init_optimizer( optimizer='adam', optimizer_params={ 'learning_rate': lr, 'wd': 0., 'beta1': beta1, 'epsilon': epsilon, 'rescale_grad': (1.0/batch_size) }) # =============module KL============= symKL = KLDivergenceLoss() modKL = mx.mod.Module(symbol=symKL, data_names=('data',), label_names=None, context=ctx) modKL.bind(data_shapes=[('data', (batch_size*2,Z))], inputs_need_grad=True) modKL.init_params(initializer=mx.init.Normal(0.02)) modKL.init_optimizer( optimizer='adam', optimizer_params={ 'learning_rate': lr, 'wd': 0., 'beta1': beta1, 'epsilon': epsilon, 'rescale_grad': (1.0/batch_size) }) mods.append(modKL) def norm_stat(d): return mx.nd.norm(d)/np.sqrt(d.size) mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True) mon = None if mon is not None: for mod in mods: pass def facc(label, pred): '''calculating prediction accuracy ''' pred = pred.ravel() label = label.ravel() return ((pred > 0.5) == label).mean() def fentropy(label, pred): '''calculating binary cross-entropy loss ''' pred = pred.ravel() label = label.ravel() return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean() def kldivergence(label, pred): '''calculating KL divergence loss ''' mean, log_var = np.split(pred, 2, axis=0) var = np.exp(log_var) KLLoss = -0.5 * np.sum(1 + log_var - np.power(mean, 2) - var) KLLoss = KLLoss / nElements return KLLoss mG = mx.metric.CustomMetric(fentropy) mD = mx.metric.CustomMetric(fentropy) mE = mx.metric.CustomMetric(kldivergence) mACC = mx.metric.CustomMetric(facc) print('Training...') stamp = datetime.now().strftime('%Y_%m_%d-%H_%M') # =============train=============== for epoch in range(num_epoch): train_iter.reset() for t, batch in enumerate(train_iter): rbatch = rand_iter.next() if mon is not None: mon.tic() modG.forward(rbatch, is_train=True) outG = modG.get_outputs() # update discriminator on fake label[:] = 0 modD.forward(mx.io.DataBatch(outG, [label]), is_train=True) modD.backward() gradD11 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays] gradD12 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays] modD.update_metric(mD, [label]) modD.update_metric(mACC, [label]) #update discriminator on decoded modE.forward(batch, is_train=True) mu, lv, z = modE.get_outputs() z = z.reshape((batch_size, Z, 1, 1)) sample = mx.io.DataBatch([z], label=None, provide_data = [('rand', (batch_size, Z, 1, 1))]) modG.forward(sample, is_train=True) xz = modG.get_outputs() label[:] = 0 modD.forward(mx.io.DataBatch(xz, [label]), is_train=True) modD.backward() #modD.update() gradD21 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays] gradD22 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays] modD.update_metric(mD, [label]) modD.update_metric(mACC, [label]) # update discriminator on real label[:] = 1 batch.label = [label] modD.forward(batch, is_train=True) lx = [out.copyto(out.context) for out in modD1.get_outputs()] modD.backward() for gradsr, gradsf, gradsd in zip(modD1._exec_group.grad_arrays, gradD11, gradD21): for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd): gradr += 0.5 * (gradf + gradd) for gradsr, gradsf, gradsd in zip(modD2._exec_group.grad_arrays, gradD12, gradD22): for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd): gradr += 0.5 * (gradf + gradd) modD.update() modD.update_metric(mD, [label]) modD.update_metric(mACC, [label]) modG.forward(rbatch, is_train=True) outG = modG.get_outputs() label[:] = 1 modD.forward(mx.io.DataBatch(outG, [label]), is_train=True) modD.backward() diffD = modD1.get_input_grads() modG.backward(diffD) gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays] mG.update([label], modD.get_outputs()) modG.forward(sample, is_train=True) xz = modG.get_outputs() label[:] = 1 modD.forward(mx.io.DataBatch(xz, [label]), is_train=True) modD.backward() diffD = modD1.get_input_grads() modG.backward(diffD) gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays] mG.update([label], modD.get_outputs()) modG.forward(sample, is_train=True) xz = modG.get_outputs() modD1.forward(mx.io.DataBatch(xz, []), is_train=True) outD1 = modD1.get_outputs() modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True) modDL.backward() dlGrad = modDL.get_input_grads() modD1.backward(dlGrad) diffD = modD1.get_input_grads() modG.backward(diffD) for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2): for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2): grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2) modG.update() mG.update([label], modD.get_outputs()) modG.forward(rbatch, is_train=True) outG = modG.get_outputs() label[:] = 1 modD.forward(mx.io.DataBatch(outG, [label]), is_train=True) modD.backward() diffD = modD1.get_input_grads() modG.backward(diffD) gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays] mG.update([label], modD.get_outputs()) modG.forward(sample, is_train=True) xz = modG.get_outputs() label[:] = 1 modD.forward(mx.io.DataBatch(xz, [label]), is_train=True) modD.backward() diffD = modD1.get_input_grads() modG.backward(diffD) gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays] mG.update([label], modD.get_outputs()) modG.forward(sample, is_train=True) xz = modG.get_outputs() modD1.forward(mx.io.DataBatch(xz, []), is_train=True) outD1 = modD1.get_outputs() modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True) modDL.backward() dlGrad = modDL.get_input_grads() modD1.backward(dlGrad) diffD = modD1.get_input_grads() modG.backward(diffD) for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2): for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2): grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2) modG.update() mG.update([label], modD.get_outputs()) modG.forward(sample, is_train=True) xz = modG.get_outputs() #update generator modD1.forward(mx.io.DataBatch(xz, []), is_train=True) outD1 = modD1.get_outputs() modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True) DLloss = modDL.get_outputs() modDL.backward() dlGrad = modDL.get_input_grads() modD1.backward(dlGrad) diffD = modD1.get_input_grads() modG.backward(diffD) #update encoder nElements = batch_size modKL.forward(mx.io.DataBatch([mx.ndarray.concat(mu,lv, dim=0)]), is_train=True) KLloss = modKL.get_outputs() modKL.backward() gradKLLoss = modKL.get_input_grads() diffG = modG.get_input_grads() diffG = diffG[0].reshape((batch_size, Z)) modE.backward(mx.ndarray.split(gradKLLoss[0], num_outputs=2, axis=0) + [diffG]) modE.update() pred = mx.ndarray.concat(mu,lv, dim=0) mE.update([pred], [pred]) if mon is not None: mon.toc_print() t += 1 if t % show_after_every == 0: print('epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get(), mE.get(), KLloss[0].asnumpy(), DLloss[0].asnumpy()) mACC.reset() mG.reset() mD.reset() mE.reset() if epoch % visualize_after_every == 0: visual(output_path +'gout'+str(epoch), outG[0].asnumpy(), activation) visual(output_path + 'data'+str(epoch), batch.data[0].asnumpy(), activation) if check_point and epoch % save_after_every == 0: print('Saving...') modG.save_params(checkpoint_path + '/%s_G-%04d.params'%(dataset, epoch)) modD.save_params(checkpoint_path + '/%s_D-%04d.params'%(dataset, epoch)) modE.save_params(checkpoint_path + '/%s_E-%04d.params'%(dataset, epoch))
Creates/Validates dir
def create_and_validate_dir(data_dir): '''Creates/Validates dir ''' if data_dir != "": if not os.path.exists(data_dir): try: logging.info('create directory %s', data_dir) os.makedirs(data_dir) except OSError as exc: if exc.errno != errno.EEXIST: raise OSError('failed to create ' + data_dir)
Parse args
def parse_args(): '''Parse args ''' parser = argparse.ArgumentParser(description='Train and Test an Adversarial Variatiional Encoder') parser.add_argument('--train', help='train the network', action='store_true') parser.add_argument('--test', help='test the network', action='store_true') parser.add_argument('--save_embedding', help='saves the shape embedding of each input image', action='store_true') parser.add_argument('--dataset', help='dataset name', default='caltech', type=str) parser.add_argument('--activation', help='activation i.e. sigmoid or tanh', default='sigmoid', type=str) parser.add_argument('--training_data_path', help='training data path', default='datasets/caltech101/data/images32x32', type=str) parser.add_argument('--testing_data_path', help='testing data path', default='datasets/caltech101/test_data', type=str) parser.add_argument('--pretrained_encoder_path', help='pretrained encoder model path', default='checkpoints32x32_sigmoid/caltech_E-0045.params', type=str) parser.add_argument('--pretrained_generator_path', help='pretrained generator model path', default='checkpoints32x32_sigmoid/caltech_G-0045.params', type=str) parser.add_argument('--output_path', help='output path for the generated images', default='outputs32x32_sigmoid', type=str) parser.add_argument('--embedding_path', help='output path for the generated embeddings', default='outputs32x32_sigmoid', type=str) parser.add_argument('--checkpoint_path', help='checkpoint saving path ', default='checkpoints32x32_sigmoid', type=str) parser.add_argument('--nef', help='encoder filter count in the first layer', default=64, type=int) parser.add_argument('--ndf', help='discriminator filter count in the first layer', default=64, type=int) parser.add_argument('--ngf', help='generator filter count in the second last layer', default=64, type=int) parser.add_argument('--nc', help='generator filter count in the last layer i.e. 1 for grayscale image, 3 for RGB image', default=1, type=int) parser.add_argument('--batch_size', help='batch size, keep it 1 during testing', default=64, type=int) parser.add_argument('--Z', help='embedding size', default=100, type=int) parser.add_argument('--lr', help='learning rate', default=0.0002, type=float) parser.add_argument('--beta1', help='beta1 for adam optimizer', default=0.5, type=float) parser.add_argument('--epsilon', help='epsilon for adam optimizer', default=1e-5, type=float) parser.add_argument('--g_dl_weight', help='discriminator layer loss weight', default=1e-1, type=float) parser.add_argument('--gpu', help='gpu index', default=0, type=int) parser.add_argument('--use_cpu', help='use cpu', action='store_true') parser.add_argument('--num_epoch', help='number of maximum epochs ', default=45, type=int) parser.add_argument('--save_after_every', help='save checkpoint after every this number of epochs ', default=5, type=int) parser.add_argument('--visualize_after_every', help='save output images after every this number of epochs', default=5, type=int) parser.add_argument('--show_after_every', help='show metrics after this number of iterations', default=10, type=int) args = parser.parse_args() return args
Gets root mse between the logarithms of the prediction and the truth.
def get_rmse_log(net, X_train, y_train): """Gets root mse between the logarithms of the prediction and the truth.""" num_train = X_train.shape[0] clipped_preds = nd.clip(net(X_train), 1, float('inf')) return np.sqrt(2 * nd.sum(square_loss( nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
Gets a neural network. Better results are obtained with modifications.
def get_net(): """Gets a neural network. Better results are obtained with modifications.""" net = gluon.nn.Sequential() with net.name_scope(): net.add(gluon.nn.Dense(50, activation="relu")) net.add(gluon.nn.Dense(1)) net.initialize() return net
Trains the model.
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size): """Trains the model.""" dataset_train = gluon.data.ArrayDataset(X_train, y_train) data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay}) net.initialize(force_reinit=True) for epoch in range(epochs): for data, label in data_iter_train: with autograd.record(): output = net(data) loss = square_loss(output, label) loss.backward() trainer.step(batch_size) avg_loss = get_rmse_log(net, X_train, y_train) if epoch > verbose_epoch: print("Epoch %d, train loss: %f" % (epoch, avg_loss)) return avg_loss
Conducts k-fold cross validation for the model.
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train, learning_rate, weight_decay, batch_size): """Conducts k-fold cross validation for the model.""" assert k > 1 fold_size = X_train.shape[0] // k train_loss_sum = 0.0 test_loss_sum = 0.0 for test_idx in range(k): X_val_test = X_train[test_idx * fold_size: (test_idx + 1) * fold_size, :] y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size] val_train_defined = False for i in range(k): if i != test_idx: X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :] y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size] if not val_train_defined: X_val_train = X_cur_fold y_val_train = y_cur_fold val_train_defined = True else: X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0) y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0) net = get_net() train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) train_loss_sum += train_loss test_loss = get_rmse_log(net, X_val_test, y_val_test) print("Test loss: %f" % test_loss) test_loss_sum += test_loss return train_loss_sum / k, test_loss_sum / k
Trains the model and predicts on the test data set.
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate, weight_decay, batch_size): """Trains the model and predicts on the test data set.""" net = get_net() _ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) preds = net(X_test).asnumpy() test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test['Id'], test['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
Create CapsNet
def capsnet(batch_size, n_class, num_routing, recon_loss_weight): """Create CapsNet""" # data.shape = [batch_size, 1, 28, 28] data = mx.sym.Variable('data') input_shape = (1, 28, 28) # Conv2D layer # net.shape = [batch_size, 256, 20, 20] conv1 = mx.sym.Convolution(data=data, num_filter=256, kernel=(9, 9), layout='NCHW', name='conv1') conv1 = mx.sym.Activation(data=conv1, act_type='relu', name='conv1_act') # net.shape = [batch_size, 256, 6, 6] primarycaps = primary_caps(data=conv1, dim_vector=8, n_channels=32, kernel=(9, 9), strides=[2, 2], name='primarycaps') primarycaps.infer_shape(data=(batch_size, 1, 28, 28)) # CapsuleLayer kernel_initializer = mx.init.Xavier(rnd_type='uniform', factor_type='avg', magnitude=3) bias_initializer = mx.init.Zero() digitcaps = CapsuleLayer(num_capsule=10, dim_vector=16, batch_size=batch_size, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, num_routing=num_routing)(primarycaps) # out_caps : (batch_size, 10) out_caps = mx.sym.sqrt(data=mx.sym.sum(mx.sym.square(digitcaps), 2)) out_caps.infer_shape(data=(batch_size, 1, 28, 28)) y = mx.sym.Variable('softmax_label', shape=(batch_size,)) y_onehot = mx.sym.one_hot(y, n_class) y_reshaped = mx.sym.Reshape(data=y_onehot, shape=(batch_size, -4, n_class, -1)) y_reshaped.infer_shape(softmax_label=(batch_size,)) # inputs_masked : (batch_size, 16) inputs_masked = mx.sym.linalg_gemm2(y_reshaped, digitcaps, transpose_a=True) inputs_masked = mx.sym.Reshape(data=inputs_masked, shape=(-3, 0)) x_recon = mx.sym.FullyConnected(data=inputs_masked, num_hidden=512, name='x_recon') x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act') x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=1024, name='x_recon2') x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act2') x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=np.prod(input_shape), name='x_recon3') x_recon = mx.sym.Activation(data=x_recon, act_type='sigmoid', name='x_recon_act3') data_flatten = mx.sym.flatten(data=data) squared_error = mx.sym.square(x_recon-data_flatten) recon_error = mx.sym.mean(squared_error) recon_error_stopped = recon_error recon_error_stopped = mx.sym.BlockGrad(recon_error_stopped) loss = mx.symbol.MakeLoss((1-recon_loss_weight)*margin_loss(y_onehot, out_caps)+recon_loss_weight*recon_error) out_caps_blocked = out_caps out_caps_blocked = mx.sym.BlockGrad(out_caps_blocked) return mx.sym.Group([out_caps_blocked, loss, recon_error_stopped])
Perform CapsNet training
def do_training(num_epoch, optimizer, kvstore, learning_rate, model_prefix, decay): """Perform CapsNet training""" summary_writer = SummaryWriter(args.tblog_dir) lr_scheduler = SimpleLRScheduler(learning_rate) optimizer_params = {'lr_scheduler': lr_scheduler} module.init_params() module.init_optimizer(kvstore=kvstore, optimizer=optimizer, optimizer_params=optimizer_params) n_epoch = 0 while True: if n_epoch >= num_epoch: break train_iter.reset() val_iter.reset() loss_metric.reset() for n_batch, data_batch in enumerate(train_iter): module.forward_backward(data_batch) module.update() module.update_metric(loss_metric, data_batch.label) loss_metric.get_batch_log(n_batch) train_acc, train_loss, train_recon_err = loss_metric.get_name_value() loss_metric.reset() for n_batch, data_batch in enumerate(val_iter): module.forward(data_batch) module.update_metric(loss_metric, data_batch.label) loss_metric.get_batch_log(n_batch) val_acc, val_loss, val_recon_err = loss_metric.get_name_value() summary_writer.add_scalar('train_acc', train_acc, n_epoch) summary_writer.add_scalar('train_loss', train_loss, n_epoch) summary_writer.add_scalar('train_recon_err', train_recon_err, n_epoch) summary_writer.add_scalar('val_acc', val_acc, n_epoch) summary_writer.add_scalar('val_loss', val_loss, n_epoch) summary_writer.add_scalar('val_recon_err', val_recon_err, n_epoch) print('Epoch[%d] train acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, train_acc, train_loss, train_recon_err)) print('Epoch[%d] val acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, val_acc, val_loss, val_recon_err)) print('SAVE CHECKPOINT') module.save_checkpoint(prefix=model_prefix, epoch=n_epoch) n_epoch += 1 lr_scheduler.learning_rate = learning_rate * (decay ** n_epoch)
Shuffle the data.
def _shuffle(data, idx): """Shuffle the data.""" shuffle_data = [] for idx_k, idx_v in data: shuffle_data.append((idx_k, mx.ndarray.array(idx_v.asnumpy()[idx], idx_v.context))) return shuffle_data
Reset class MNISTCustomIter(mx.io.NDArrayIter):
def reset(self): """Reset class MNISTCustomIter(mx.io.NDArrayIter):""" # shuffle data if self.is_train: np.random.shuffle(self.idx) self.data = _shuffle(self.data, self.idx) self.label = _shuffle(self.label, self.idx) if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data: self.cursor = -self.batch_size + (self.cursor % self.num_data) % self.batch_size else: self.cursor = -self.batch_size
Update the hyper-parameters and loss of CapsNet
def update(self, labels, preds): """Update the hyper-parameters and loss of CapsNet""" batch_sum_metric = 0 batch_num_inst = 0 for label, pred_outcaps in zip(labels[0], preds[0]): label_np = int(label.asnumpy()) pred_label = int(np.argmax(pred_outcaps.asnumpy())) batch_sum_metric += int(label_np == pred_label) batch_num_inst += 1 batch_loss = preds[1].asnumpy() recon_loss = preds[2].asnumpy() self.sum_metric += batch_sum_metric self.num_inst += batch_num_inst self.loss += batch_loss self.recon_loss += recon_loss self.batch_sum_metric = batch_sum_metric self.batch_num_inst = batch_num_inst self.batch_loss = batch_loss self.n_batch += 1
Generate next of iterator
def next(self): """Generate next of iterator""" if self.iter_next(): if self.is_train: data_raw_list = self.getdata() data_shifted = [] for data_raw in data_raw_list[0]: data_shifted.append(random_shift(data_raw.asnumpy(), 0.1, 0.1)) return mx.io.DataBatch(data=[mx.nd.array(data_shifted)], label=self.getlabel(), pad=self.getpad(), index=None) else: return mx.io.DataBatch(data=self.getdata(), label=self.getlabel(), pad=self.getpad(), index=None) else: raise StopIteration
Get the attribute dict given the attribute set by the symbol. Parameters ---------- attr : dict of string to string The attribute passed in by user during symbol creation. Returns ------- attr : dict of string to string Updated attributes to add other scope related attributes.
def get(self, attr): """ Get the attribute dict given the attribute set by the symbol. Parameters ---------- attr : dict of string to string The attribute passed in by user during symbol creation. Returns ------- attr : dict of string to string Updated attributes to add other scope related attributes. """ if self._attr: ret = self._attr.copy() if attr: ret.update(attr) return ret else: return attr if attr else {}
Create kvstore assuming some parameters' storage types are row_sparse. Parameters ---------- kvstore : KVStore or str The kvstore. Returns ------- kvstore : KVStore update_on_kvstore : bool. Always True.
def _create_sparse_kvstore(kvstore): """Create kvstore assuming some parameters' storage types are row_sparse. Parameters ---------- kvstore : KVStore or str The kvstore. Returns ------- kvstore : KVStore update_on_kvstore : bool. Always True. """ # always update on kvstore update_on_kvstore = True if isinstance(kvstore, kvs.KVStore): kv = kvstore elif isinstance(kvstore, str): kv = kvs.create(kvstore) else: raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. " "The type must be KVStore or str." % kvstore) return (kv, update_on_kvstore)
Create kvstore This function select and create a proper kvstore if given the kvstore type. Parameters ---------- kvstore : KVStore or str The kvstore. num_device : int The number of devices arg_params : dict of str to `NDArray`. Model parameter, dict of name to `NDArray` of net's weights.
def _create_kvstore(kvstore, num_device, arg_params): """Create kvstore This function select and create a proper kvstore if given the kvstore type. Parameters ---------- kvstore : KVStore or str The kvstore. num_device : int The number of devices arg_params : dict of str to `NDArray`. Model parameter, dict of name to `NDArray` of net's weights. """ update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1"))) if kvstore is None: kv = None elif isinstance(kvstore, kvs.KVStore): kv = kvstore elif isinstance(kvstore, str): # create kvstore using the string type if num_device == 1 and 'dist' not in kvstore: # no need to use kv for single device and single machine kv = None else: kv = kvs.create(kvstore) if kvstore == 'local': # automatically select a proper local max_size = max(np.prod(param.shape) for param in arg_params.values()) if max_size > 1024 * 1024 * 16: update_on_kvstore = False else: raise TypeError('kvstore must be KVStore, str or None') if kv is None: update_on_kvstore = False return (kv, update_on_kvstore)
Initialize kvstore
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore): """Initialize kvstore""" for idx, param_on_devs in enumerate(param_arrays): name = param_names[idx] kvstore.init(name, arg_params[name]) if update_on_kvstore: kvstore.pull(name, param_on_devs, priority=-idx)