code stringlengths 17 6.64M |
|---|
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm
|
class FFTSHIFTOp(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, x, axes=None):
x = T.as_tensor_variable(x)
if (x.ndim < 2):
raise TypeError((('%s: input must have dimension >= 2. For example,' % self.__class__.__name__) + '(n_batches, 2, nx, ny[, nt])'))
if (axes is None):
axes = list(range(x.ndim))
elif isinstance(axes, int):
axes = (axes,)
axes = T.as_tensor_variable(axes)
if ((not axes.dtype.startswith('int')) and (not axes.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [x, axes], [self.output_type(x)()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
axes = inputs[1]
out = np.fft.fftshift(x, axes)
output_storage[0][0] = out
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [ifftshift_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class IFFTSHIFTOp(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, x, axes=None):
x = T.as_tensor_variable(x)
if (x.ndim < 2):
raise TypeError((('%s: input must have dimension >= 2. For example' % self.__class__.__name__) + '(n_batches, 2, nx, ny[, nt])'))
if (axes is None):
axes = list(range(x.ndim))
elif isinstance(axes, int):
axes = (axes,)
axes = T.as_tensor_variable(axes)
if ((not axes.dtype.startswith('int')) and (not axes.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [x, axes], [self.output_type(x)()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
axes = inputs[1]
out = np.fft.ifftshift(x, axes)
output_storage[0][0] = out
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [fftshift_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def fftshift(x, axes=None):
'\n Performs np.fft.fftshift. Gradient is implemented as ifftshift\n\n Parameters\n ----------\n x : array_like\n Input array.\n axes : int or shape tuple, optional\n Axes over which to calculate. Defaults to None, which shifts all axes.\n\n Returns\n -------\n y : ndarray\n The shifted array.\n\n '
return fftshift_op(x, axes)
|
def ifftshift(x, axes=None):
'\n Performs np.fft.ifftshift. Gradient is implemented as fftshift\n\n Parameters\n ----------\n x : array_like\n Input array.\n axes : int or shape tuple, optional\n Axes over which to calculate. Defaults to None, which shifts all axes.\n\n Returns\n -------\n y : ndarray\n The shifted array.\n\n '
return ifftshift_op(x, axes)
|
class CuFFTOp(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * inp.type.ndim), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[1:(- 1)]
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
assert ('int' in s.dtype)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
assert (input_shape[1:(- 1)] == s[:(- 1)]).all()
output_shape = input_shape
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.complex64, batch=input_shape[0])
input_pycuda.sync()
output_pycuda.sync()
fft.fft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [cuifft_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class CuIFFTOp(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * inp.type.ndim), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuIFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuIFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuIFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[1:(- 1)]
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
output_shape = input_shape
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.complex64, batch=output_shape[0])
input_pycuda.sync()
output_pycuda.sync()
fft.ifft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [cufft_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def cufft(inp, norm=None):
"\n Performs the fast Fourier transform of a real-valued input on the GPU.\n\n The input must be a real-valued float32 variable of dimensions (m, ..., n, 2).\n It performs FFTs of size (..., n) on m batches.\n\n The output is a GpuArray of dimensions (m, ..., n, 2). The second to\n last dimension of the output contains the n//2+1 non-trivial elements of\n the real-valued FFTs. The real and imaginary parts are stored as a pair of\n float32 arrays.\n\n Parameters\n ----------\n inp\n Array of real-valued float32 of size (m, ..., n), containing m inputs of\n size (..., n).\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[1:(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (cufft_op(inp, s) / scaling)
|
def cuifft(inp, norm=None, is_odd=False):
"\n Performs the inverse fast Fourier Transform with real-valued output on the GPU.\n\n The input is a variable of dimensions (m, ..., n, 2) with\n type float32 representing the non-trivial elements of m\n real-valued Fourier transforms of initial size (..., n). The real and\n imaginary parts are stored as a pair of float32 arrays.\n\n The output is a real-valued float32 variable of dimensions (m, ..., n)\n giving the m inverse FFTs.\n\n Parameters\n ----------\n inp\n Array of float32 of size (m, ..., n, 2), containing m inputs\n with n//2+1 non-trivial elements on the last dimension and real\n and imaginary parts stored as separate arrays.\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n is_odd : {True, False}\n Set to True to get a real inverse transform output with an odd last dimension\n of length (N-1)*2 + 1 for an input last dimension of length N.\n\n "
if (is_odd not in (True, False)):
raise ValueError(('Invalid value %s for id_odd, must be True or False' % is_odd))
s = inp.shape[1:(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm is None):
scaling = s.prod().astype('float32')
elif (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (cuifft_op(inp, s) / scaling)
|
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm
|
class CuFFT2Op(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * inp.type.ndim), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[(- 3):(- 1)]
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
assert ('int' in s.dtype)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
assert (input_shape[(- 3):(- 1)] == s).all()
output_shape = input_shape
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.complex64, batch=np.prod(input_shape[:(- 3)]))
input_pycuda.sync()
output_pycuda.sync()
fft.fft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [cuifft2_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class CuIFFT2Op(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * inp.type.ndim), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuIFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuIFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuIFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[(- 3):(- 1)]
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
output_shape = input_shape
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.complex64, batch=np.prod(input_shape[:(- 3)]))
input_pycuda.sync()
output_pycuda.sync()
fft.ifft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [cufft2_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def cufft2(inp, norm=None):
"\n Performs the 2D fast Fourier transform of a simulated complex-valued input on the GPU.\n\n The input must be a real-valued float32 variable of dimensions (m, ..., nx, ny, 2).\n It performs 2D FFTs of size (..., nx, ny) on m batches.\n\n The output is a GpuArray of dimensions (m, ..., nx, ny, 2). \n\n Parameters\n ----------\n inp\n Array of real-valued float32 of size (m, ..., nx, ny, 2).\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
print('... using GPU implementation for fft2')
s = inp.shape[(- 3):(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (cufft2_op(inp, s) / scaling)
|
def cuifft2(inp, norm=None):
"\n Performs the 2D inverse fast Fourier transform of a simulated complex-valued input on the GPU.\n\n The input must be a real-valued float32 variable of dimensions (m, ..., nx, ny, 2).\n It performs 2D IFFTs of size (..., nx, ny) on m batches.\n\n The output is a GpuArray of dimensions (m, ..., nx, ny, 2). \n\n Parameters\n ----------\n inp\n Array of real-valued float32 of size (m, ..., nx, ny, 2).\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
print('... using GPU implementation for ifft2')
s = inp.shape[(- 3):(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm is None):
scaling = s.prod().astype('float32')
elif (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (cuifft2_op(inp, s) / scaling)
|
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm
|
class CuRFFTOp(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * (inp.type.ndim + 1)), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[1:]
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
assert ('int' in s.dtype)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
assert (input_shape[1:] == s).all()
output_shape = ([input_shape[0]] + list(s))
output_shape[(- 1)] = ((output_shape[(- 1)] // 2) + 1)
output_shape += [2]
output_shape = tuple(output_shape)
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.float32, np.complex64, batch=input_shape[0])
input_pycuda.sync()
output_pycuda.sync()
fft.fft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
idx = ((([slice(None)] * (gout.ndim - 2)) + [slice(1, ((s[(- 1)] // 2) + (s[(- 1)] % 2)))]) + [slice(None)])
gout = T.set_subtensor(gout[idx], (gout[idx] * 0.5))
return [cuirfft_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class CuIRFFTOp(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * (inp.type.ndim - 1)), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuIFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuIFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuIFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[1:(- 1)]
s = T.set_subtensor(s[(- 1)], ((s[(- 1)] - 1) * 2))
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
assert (input_shape[1:(- 2)] == s[:(- 1)]).all()
assert ((((input_shape[(- 2)] - 1) * 2) + (s[(- 1)] % 2)) == s[(- 1)]).all()
output_shape = ([input_shape[0]] + list(s))
output_shape = tuple(output_shape)
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.float32, batch=output_shape[0])
input_pycuda.sync()
output_pycuda.sync()
fft.ifft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
gf = curfft_op(gout, s)
idx = ((([slice(None)] * (gf.ndim - 2)) + [slice(1, ((s[(- 1)] // 2) + (s[(- 1)] % 2)))]) + [slice(None)])
gf = T.set_subtensor(gf[idx], (gf[idx] * 2))
return [gf, DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def curfft(inp, norm=None):
"\n Performs the fast Fourier transform of a real-valued input on the GPU.\n\n The input must be a real-valued float32 variable of dimensions (m, ..., n).\n It performs FFTs of size (..., n) on m batches.\n\n The output is a GpuArray of dimensions (m, ..., n//2+1, 2). The second to\n last dimension of the output contains the n//2+1 non-trivial elements of\n the real-valued FFTs. The real and imaginary parts are stored as a pair of\n float32 arrays.\n\n Parameters\n ----------\n inp\n Array of real-valued float32 of size (m, ..., n), containing m inputs of\n size (..., n).\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[1:]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (curfft_op(inp, s) / scaling)
|
def cuirfft(inp, norm=None, is_odd=False):
"\n Performs the inverse fast Fourier Transform with real-valued output on the GPU.\n\n The input is a variable of dimensions (m, ..., n//2+1, 2) with\n type float32 representing the non-trivial elements of m\n real-valued Fourier transforms of initial size (..., n). The real and\n imaginary parts are stored as a pair of float32 arrays.\n\n The output is a real-valued float32 variable of dimensions (m, ..., n)\n giving the m inverse FFTs.\n\n Parameters\n ----------\n inp\n Array of float32 of size (m, ..., n//2+1, 2), containing m inputs\n with n//2+1 non-trivial elements on the last dimension and real\n and imaginary parts stored as separate arrays.\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n is_odd : {True, False}\n Set to True to get a real inverse transform output with an odd last dimension\n of length (N-1)*2 + 1 for an input last dimension of length N.\n\n "
if (is_odd not in (True, False)):
raise ValueError(('Invalid value %s for id_odd, must be True or False' % is_odd))
s = inp.shape[1:(- 1)]
if is_odd:
s = T.set_subtensor(s[(- 1)], (((s[(- 1)] - 1) * 2) + 1))
else:
s = T.set_subtensor(s[(- 1)], ((s[(- 1)] - 1) * 2))
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm is None):
scaling = s.prod().astype('float32')
elif (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (cuirfft_op(inp, s) / scaling)
|
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm
|
def tensor5(name=None, dtype=None):
if (dtype is None):
dtype = theano.config.floatX
type = T.TensorType(dtype, ((False,) * 5))
return type(name)
|
def prep_input(im, acc=4):
'Undersample the batch, then reformat them into what the network accepts.\n\n Parameters\n ----------\n gauss_ivar: float - controls the undersampling rate.\n higher the value, more undersampling\n '
mask = cs.cartesian_mask(im.shape, acc, sample_n=8)
(im_und, k_und) = cs.undersample(im, mask, centred=False, norm='ortho')
im_gnd_l = to_lasagne_format(im)
im_und_l = to_lasagne_format(im_und)
k_und_l = to_lasagne_format(k_und)
mask_l = to_lasagne_format(mask, mask=True)
return (im_und_l, k_und_l, mask_l, im_gnd_l)
|
def iterate_minibatch(data, batch_size, shuffle=True):
n = len(data)
if shuffle:
data = np.random.permutation(data)
for i in xrange(0, n, batch_size):
(yield data[i:(i + batch_size)])
|
def create_dummy_data():
'\n Creates dummy dataset from one knee subject for demo.\n In practice, one should take much bigger dataset,\n as well as train & test should have similar distribution.\n\n Source: http://mridata.org/\n '
data = loadmat(join(project_root, './data/lustig_knee_p2.mat'))['xn']
(nx, ny, nz, nc) = data.shape
train = np.transpose(data, (3, 0, 1, 2)).reshape(((- 1), ny, nz))
validate = np.transpose(data, (3, 1, 0, 2)).reshape(((- 1), nx, nz))
test = np.transpose(data, (3, 2, 0, 1)).reshape(((- 1), nx, ny))
return (train, validate, test)
|
def compile_fn(network, net_config, args):
'\n Create Training function and validation function\n '
base_lr = float(args.lr[0])
l2 = float(args.l2[0])
input_var = net_config['input'].input_var
mask_var = net_config['mask'].input_var
kspace_var = net_config['kspace_input'].input_var
target_var = T.tensor4('targets')
pred = lasagne.layers.get_output(network)
loss_sq = (lasagne.objectives.squared_error(target_var, pred).mean() * 2)
if l2:
l2_penalty = lasagne.regularization.regularize_network_params(network, lasagne.regularization.l2)
loss = (loss_sq + (l2_penalty * l2))
update_rule = lasagne.updates.adam
params = lasagne.layers.get_all_params(network, trainable=True)
updates = update_rule(loss, params, learning_rate=base_lr)
print(' Compiling ... ')
t_start = time.time()
train_fn = theano.function([input_var, mask_var, kspace_var, target_var], [loss], updates=updates, on_unused_input='ignore')
val_fn = theano.function([input_var, mask_var, kspace_var, target_var], [loss, pred], on_unused_input='ignore')
t_end = time.time()
print((' ... Done, took %.4f s' % (t_end - t_start)))
return (train_fn, val_fn)
|
def mse(x, y):
return np.mean((np.abs((x - y)) ** 2))
|
def psnr(x, y):
'\n Measures the PSNR of recon w.r.t x.\n Image must be of either integer (0, 256) or float value (0,1)\n :param x: [m,n]\n :param y: [m,n]\n :return:\n '
assert (x.shape == y.shape)
assert ((x.dtype == y.dtype) or (np.issubdtype(x.dtype, np.float) and np.issubdtype(y.dtype, np.float)))
if (x.dtype == np.uint8):
max_intensity = 256
else:
max_intensity = 1
mse = (np.sum(((x - y) ** 2)).astype(float) / x.size)
return ((20 * np.log10(max_intensity)) - (10 * np.log10(mse)))
|
def complex_psnr(x, y, peak='normalized'):
"\n x: reference image\n y: reconstructed image\n peak: normalised or max\n\n Notice that ``abs'' squares\n Be careful with the order, since peak intensity is taken from the reference\n image (taking from reconstruction yields a different value).\n\n "
mse = np.mean((np.abs((x - y)) ** 2))
if (peak == 'max'):
return (10 * np.log10(((np.max(np.abs(x)) ** 2) / mse)))
else:
return (10 * np.log10((1.0 / mse)))
|
def fftc(x, axis=(- 1), norm='ortho'):
' expect x as m*n matrix '
return fftshift(fft(ifftshift(x, axes=axis), axis=axis, norm=norm), axes=axis)
|
def ifftc(x, axis=(- 1), norm='ortho'):
' expect x as m*n matrix '
return fftshift(ifft(ifftshift(x, axes=axis), axis=axis, norm=norm), axes=axis)
|
def fft2c(x):
'\n Centered fft\n Note: fft2 applies fft to last 2 axes by default\n :param x: 2D onwards. e.g: if its 3d, x.shape = (n,row,col). 4d:x.shape = (n,slice,row,col)\n :return:\n '
axes = ((- 2), (- 1))
res = fftshift(fft2(ifftshift(x, axes=axes), norm='ortho'), axes=axes)
return res
|
def ifft2c(x):
'\n Centered ifft\n Note: fft2 applies fft to last 2 axes by default\n :param x: 2D onwards. e.g: if its 3d, x.shape = (n,row,col). 4d:x.shape = (n,slice,row,col)\n :return:\n '
axes = ((- 2), (- 1))
res = fftshift(ifft2(ifftshift(x, axes=axes), norm='ortho'), axes=axes)
return res
|
def fourier_matrix(rows, cols):
'\n parameters:\n rows: number or rows\n cols: number of columns\n\n return unitary (rows x cols) fourier matrix\n '
col_range = np.arange(cols)
row_range = np.arange(rows)
scale = (1 / np.sqrt(cols))
coeffs = np.outer(row_range, col_range)
fourier_matrix = (np.exp((coeffs * ((((- 2.0) * np.pi) * 1j) / cols))) * scale)
return fourier_matrix
|
def inverse_fourier_matrix(rows, cols):
return np.array(np.matrix(fourier_matrix(rows, cols)).getH())
|
def flip(m, axis):
'\n ==== > Only in numpy 1.12 < =====\n\n Reverse the order of elements in an array along the given axis.\n The shape of the array is preserved, but the elements are reordered.\n .. versionadded:: 1.12.0\n Parameters\n ----------\n m : array_like\n Input array.\n axis : integer\n Axis in array, which entries are reversed.\n Returns\n -------\n out : array_like\n A view of `m` with the entries of axis reversed. Since a view is\n returned, this operation is done in constant time.\n See Also\n --------\n flipud : Flip an array vertically (axis=0).\n fliplr : Flip an array horizontally (axis=1).\n Notes\n -----\n flip(m, 0) is equivalent to flipud(m).\n flip(m, 1) is equivalent to fliplr(m).\n flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.\n Examples\n --------\n >>> A = np.arange(8).reshape((2,2,2))\n >>> A\n array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n >>> flip(A, 0)\n array([[[4, 5],\n [6, 7]],\n [[0, 1],\n [2, 3]]])\n >>> flip(A, 1)\n array([[[2, 3],\n [0, 1]],\n [[6, 7],\n [4, 5]]])\n >>> A = np.random.randn(3,4,5)\n >>> np.all(flip(A,2) == A[:,:,::-1,...])\n True\n '
if (not hasattr(m, 'ndim')):
m = np.asarray(m)
indexer = ([slice(None)] * m.ndim)
try:
indexer[axis] = slice(None, None, (- 1))
except IndexError:
raise ValueError(('axis=%i is invalid for the %i-dimensional input array' % (axis, m.ndim)))
return m[tuple(indexer)]
|
def rot90_nd(x, axes=((- 2), (- 1)), k=1):
'Rotates selected axes'
def flipud(x):
return flip(x, axes[0])
def fliplr(x):
return flip(x, axes[1])
x = np.asanyarray(x)
if (x.ndim < 2):
raise ValueError('Input must >= 2-d.')
k = (k % 4)
if (k == 0):
return x
elif (k == 1):
return fliplr(x).swapaxes(*axes)
elif (k == 2):
return fliplr(flipud(x))
else:
return fliplr(x.swapaxes(*axes))
|
class ClevrBatcher():
def __init__(self, batchSize, split, maxSamples=None, rand=True):
dat = h5py.File('data/preprocessed/clevr.h5', 'r')
self.questions = dat[(split + 'Questions')]
self.answers = dat[(split + 'Answers')]
self.programs = dat[(split + 'Programs')]
self.imgs = dat[(split + 'Imgs')]
self.pMask = dat[(split + 'ProgramMask')]
self.imgIdx = dat[(split + 'ImageIdx')]
self.batchSize = batchSize
if (maxSamples is not None):
self.m = maxSamples
else:
self.m = ((len(self.questions) // batchSize) * batchSize)
self.batches = (self.m // batchSize)
self.pos = 0
def next(self):
batchSize = self.batchSize
if ((self.pos + batchSize) > self.m):
self.pos = 0
imgIdx = self.imgIdx[self.pos:(self.pos + batchSize)]
uniqueIdx = np.unique(imgIdx).tolist()
mapTo = np.arange(len(uniqueIdx)).tolist()
mapDict = dict(zip(uniqueIdx, mapTo))
relIdx = [mapDict[x] for x in imgIdx]
imgs = self.imgs[np.unique(imgIdx).tolist()][relIdx]
questions = self.questions[self.pos:(self.pos + batchSize)]
answers = self.answers[self.pos:(self.pos + batchSize)]
programs = self.programs[self.pos:(self.pos + batchSize)]
pMask = self.pMask[self.pos:(self.pos + batchSize)]
self.pos += batchSize
return ([questions, imgs, imgIdx], [programs, answers], [pMask])
|
def buildVocab(fName):
dat = open(fName).read()
dat = dat.split()
vocab = dict(zip(dat, (1 + np.arange(len(dat)))))
invVocab = {v: k for (k, v) in vocab.items()}
return (vocab, invVocab)
|
def applyVocab(line, vocab):
ret = []
for e in line:
ret += [vocab[e]]
return np.asarray(ret)
|
def applyInvVocab(x, vocab):
x = applyVocab(x, utils.invertDict(vocab))
return ''.join(x)
|
def invertDict(x):
return {v: k for (k, v) in x.items()}
|
def loadDict(fName):
with open(fName) as f:
s = eval(f.read())
return s
|
def norm(x, n=2):
return ((np.sum((np.abs(x) ** n)) ** (1.0 / n)) / np.prod(x.shape))
|
class Perm():
def __init__(self, n):
self.inds = np.random.permutation(np.arange(n))
self.m = n
self.pos = 0
def next(self, n):
assert ((self.pos + n) < self.m)
ret = self.inds[self.pos:(self.pos + n)]
self.pos += n
return ret
|
class CMA():
def __init__(self):
self.t = 0.0
self.cma = 0.0
def update(self, x):
self.cma = ((x + (self.t * self.cma)) / (self.t + 1))
self.t += 1.0
|
class EDA():
def __init__(self, k=0.99):
self.k = k
self.eda = 0.0
def update(self, x):
self.eda = (((1 - self.k) * x) + (self.k * self.eda))
|
def modelSize(net):
params = 0
for e in net.parameters():
params += np.prod(e.size())
params = int((params / 1000))
print('Network has ', params, 'K params')
|
def Conv2d(fIn, fOut, k):
pad = int(((k - 1) / 2))
return nn.Conv2d(fIn, fOut, k, padding=pad)
|
def list(module, *args, n=1):
return nn.ModuleList([module(*args) for i in range(n)])
|
def var(xNp, volatile=False, cuda=False):
x = Variable(t.from_numpy(xNp), volatile=volatile)
if cuda:
x = x.cuda()
return x
|
def initWeights(net, scheme='orthogonal'):
print('Initializing weights. Warning: may overwrite sensitive bias parameters (e.g. batchnorm)')
for e in net.parameters():
if (scheme == 'orthogonal'):
if (len(e.size()) >= 2):
init.orthogonal(e)
elif (scheme == 'normal'):
init.normal(e, std=0.01)
elif (scheme == 'xavier'):
init.xavier_normal(e)
|
class SaveManager():
def __init__(self, root):
(self.tl, self.ta, self.vl, self.va) = ([], [], [], [])
self.root = root
self.stateDict = None
self.lock = False
def update(self, net, tl, ta, vl, va):
nan = np.isnan(sum([t.sum(e) for e in net.state_dict().values()]))
if (nan or self.lock):
self.lock = True
print('NaN in update. Locking. Call refresh() to reset')
return
if ((self.epoch() == 1) or (va > np.max(self.va))):
self.stateDict = net.state_dict().copy()
t.save(net.state_dict(), (self.root + 'weights'))
self.tl += [tl]
self.ta += [ta]
self.vl += [vl]
self.va += [va]
np.save((self.root + 'tl.npy'), self.tl)
np.save((self.root + 'ta.npy'), self.ta)
np.save((self.root + 'vl.npy'), self.vl)
np.save((self.root + 'va.npy'), self.va)
def load(self, net, raw=False):
stateDict = t.load((self.root + 'weights'))
self.stateDict = stateDict
if (not raw):
net.load_state_dict(stateDict)
self.tl = np.load((self.root + 'tl.npy')).tolist()
self.ta = np.load((self.root + 'ta.npy')).tolist()
self.vl = np.load((self.root + 'vl.npy')).tolist()
self.va = np.load((self.root + 'va.npy')).tolist()
def refresh(self, net):
self.lock = False
net.load_state_dict(self.stateDict)
def epoch(self):
return (len(self.tl) + 1)
|
def _sequence_mask(sequence_length, max_len=None):
if (max_len is None):
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = t.range(0, (max_len - 1)).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = sequence_length.unsqueeze(1).expand_as(seq_range_expand)
return (seq_range_expand < seq_length_expand)
|
def maskedCE(logits, target, length):
'\n Args:\n logits: A Variable containing a FloatTensor of size\n (batch, max_len, num_classes) which contains the\n unnormalized probability for each class.\n target: A Variable containing a LongTensor of size\n (batch, max_len) which contains the index of the true\n class for each corresponding step.\n length: A Variable containing a LongTensor of size (batch,)\n which contains the length of each data in a batch.\n\n Returns:\n loss: An average loss value masked by the length.\n '
logits_flat = logits.view((- 1), logits.size((- 1)))
log_probs_flat = F.log_softmax(logits_flat)
target_flat = target.view((- 1), 1)
losses_flat = (- t.gather(log_probs_flat, dim=1, index=target_flat))
losses = losses_flat.view(*target.size())
mask = _sequence_mask(sequence_length=length, max_len=target.size(1))
losses = (losses * mask.float())
loss = (losses.sum() / length.float().sum())
return loss
|
def runMinibatch(net, batcher, cuda=True, volatile=False, trainable=False):
(x, y, mask) = batcher.next()
x = [var(e, volatile=volatile, cuda=cuda) for e in x]
y = [var(e, volatile=volatile, cuda=cuda) for e in y]
if (mask is not None):
mask = var(mask, volatile=volatile, cuda=cuda)
if (len(x) == 1):
x = x[0]
if (len(y) == 1):
y = y[0]
a = net(x, trainable)
return (a, y, mask)
|
def runData(net, opt, batcher, criterion=maskedCE, trainable=False, verbose=False, cuda=True, gradClip=10.0, minContext=0, numPrints=10):
iters = batcher.batches
meanAcc = CMA()
meanLoss = CMA()
for i in range(iters):
try:
if (verbose and ((i % int((iters / numPrints))) == 0)):
sys.stdout.write('#')
sys.stdout.flush()
except:
pass
(a, y, mask) = runMinibatch(net, batcher, trainable=trainable, cuda=cuda, volatile=(not trainable))
(loss, acc) = stats(criterion, a, y, mask)
if trainable:
opt.zero_grad()
loss.backward()
if (gradClip is not None):
t.nn.utils.clip_grad_norm(net.parameters(), gradClip, norm_type=1)
opt.step()
meanLoss.update(loss.data[0])
meanAcc.update(acc)
return (meanLoss.cma, meanAcc.cma)
|
def stats(criterion, a, y, mask):
if (mask is not None):
(_, preds) = t.max(a.data, 2)
(batch, sLen, c) = a.size()
loss = criterion(a.view((- 1), c), y.view((- 1)))
m = t.sum(mask)
mask = _sequence_mask(mask, sLen)
acc = (t.sum((mask.data.float() * (y.data == preds).float())) / float(m.data[0]))
else:
(_, preds) = t.max(a.data, 1)
loss = criterion(a, y)
acc = t.mean((y.data == preds).float())
return (loss, acc)
|
class ExecutionEngine(nn.Module):
def __init__(self, numUnary, numBinary, numClasses):
super(ExecutionEngine, self).__init__()
self.arities = (((2 * [0]) + ([1] * numUnary)) + ([2] * numBinary))
unaries = [UnaryModule() for i in range(numUnary)]
binaries = [BinaryModule() for i in range(numBinary)]
self.cells = nn.ModuleList((((2 * [None]) + unaries) + binaries))
self.CNN = CNN()
self.classifier = EngineClassifier(numClasses)
def parallel(self, pInds, p, imgFeats):
progs = []
for i in range(len(pInds)):
piInds = pInds[i]
pi = p[i]
feats = imgFeats[i:(i + 1)]
prog = Program(piInds, pi, feats, self.arities)
prog.build()
progs += [prog]
exeQ = FasterExecutioner(progs, self.cells)
a = exeQ.execute()
return a
def sequential(self, p, imgFeats):
progs = []
a = []
execs = []
for i in range(len(p)):
pi = p[i]
feats = imgFeats[i:(i + 1)]
prog = Program(pi, feats, self.arities)
prog.build()
exeQ = Executioner(prog, self.cells)
a += [exeQ.execute()]
execs += [exeQ]
a = t.cat(a, 0)
return a
def forward(self, x, fast=True):
(pInds, p, img) = x
a = []
imgFeats = self.CNN(img)
pInds = pInds.data.cpu().numpy().tolist()
if fast:
a = self.parallel(pInds, p, imgFeats)
else:
a = self.sequential(p, imgFeats)
a = self.classifier(a)
return a
|
class EngineClassifier(nn.Module):
def __init__(self, numClasses):
super(EngineClassifier, self).__init__()
self.conv1 = utils.Conv2d(128, 512, 1)
self.fc1 = nn.Linear(((512 * 7) * 7), 1024)
self.pool = nn.MaxPool2d(2)
self.fc2 = nn.Linear(1024, numClasses)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = x.view(x.size()[0], (- 1))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
|
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = utils.Conv2d(1024, 128, 3)
self.conv2 = utils.Conv2d(128, 128, 3)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
return x
|
class UnaryModule(nn.Module):
def __init__(self):
super(UnaryModule, self).__init__()
self.conv1 = utils.Conv2d(128, 128, 3)
self.conv2 = utils.Conv2d(128, 128, 3)
def forward(self, x):
inp = x
x = F.relu(self.conv1(x))
x = self.conv2(x)
x += inp
x = F.relu(x)
return x
|
class BinaryModule(nn.Module):
def __init__(self):
super(BinaryModule, self).__init__()
self.conv1 = utils.Conv2d(256, 128, 1)
self.conv2 = utils.Conv2d(128, 128, 3)
self.conv3 = utils.Conv2d(128, 128, 3)
def forward(self, x1, x2):
x = t.cat((x1, x2), 1)
x = F.relu(self.conv1(x))
res = x
x = F.relu(self.conv2(x))
x = self.conv3(x)
x += res
x = F.relu(x)
return x
|
class Upscale(nn.Module):
def __init__(self):
super(Upscale, self).__init__()
self.fc = nn.Linear(1, ((128 * 14) * 14))
def forward(self, x):
x = x.view(1, 1)
x = self.fc(x)
x = x.view((- 1), 128, 14, 14)
return x
|
class Node():
def __init__(self, prev):
self.prev = prev
self.inpData = []
def build(self, cellInd, mul, arity):
self.next = ([None] * arity)
self.arity = arity
self.cellInd = cellInd
self.mul = mul
|
class Program():
def __init__(self, prog, mul, imgFeats, arities):
self.prog = prog
self.mul = mul
self.imgFeats = imgFeats
self.arities = arities
self.root = Node(None)
def build(self, ind=0):
self.buildInternal(self.root)
def buildInternal(self, cur=None, count=0):
if (count >= len(self.prog)):
arity = 0
ind = 0
mul = 1.0
else:
ind = self.prog[count]
mul = self.mul[count]
arity = self.arities[ind]
cur.build(ind, mul, arity)
if (arity == 0):
cur.inpData = [self.imgFeats]
elif (arity == 1):
cur.next = [Node(cur)]
count = self.buildInternal(cur.next[0], (count + 1))
elif (arity == 2):
cur.next = [Node(cur), Node(cur)]
count = self.buildInternal(cur.next[0], (count + 1))
count = self.buildInternal(cur.next[1], (count + 1))
return count
def flat(self):
return self.flatInternal(self.root, [])
def flatInternal(self, cur, flattened):
flattened += [cur.cellInd]
for e in cur.next:
self.flatInternal(e, flattened)
return flattened
def topologicalSort(self):
return self.topInternal(self.root, [])
def topInternal(self, cur, flattened):
for e in cur.next:
self.topInternal(e, flattened)
flattened += [cur]
return flattened
|
class HighArcESort():
def __init__(self):
self.out = {}
def __call__(self, root):
assert (not self.out)
self.highArcESortInternal(root, 0)
return self.out
def highArcESortInternal(self, cur, rank):
for nxt in cur.next:
ret = self.highArcESortInternal(nxt, rank)
rank = max(rank, ret)
self.out[rank] = cur
return (rank + 1)
|
class FasterExecutioner():
def __init__(self, progs, cells):
self.cells = cells
self.progs = progs
self.roots = [p.root for p in progs]
self.sortProgs()
self.maxKey = max(list(self.progs.keys()))
def sortProgs(self):
progs = {}
for prog in self.progs:
prog = HighArcESort()(prog.root)
for (rank, nodeList) in prog.items():
progs.setdefault(rank, []).append(nodeList)
self.progs = progs
def execute(self):
for s in range((self.maxKey + 1)):
nodes = self.progs[s]
groupedNodes = {}
for node in nodes:
groupedNodes.setdefault(node.cellInd, []).append(node)
for (cellInd, nodes) in groupedNodes.items():
arity = nodes[0].arity
cell = self.cells[cellInd]
outData = [node.inpData[0] for node in nodes]
if (arity == 1):
arg = t.cat(outData, 0)
outData = cell(arg)
outData = [outData[i:(i + 1)] for i in range(outData.size()[0])]
elif (arity == 2):
arg2 = t.cat(outData, 0)
arg1 = t.cat([node.inpData[1] for node in nodes], 0)
outData = cell(arg1, arg2)
outData = [outData[i:(i + 1)] for i in range(outData.size()[0])]
for (node, outDat) in zip(nodes, outData):
if (type(node.mul) != float):
outDat = outDat
if (node.prev is None):
node.outData = outDat
else:
node.prev.inpData += [outDat]
outData = [root.outData for root in self.roots]
return t.cat(outData, 0)
|
class FastExecutioner():
def __init__(self, progs, cells):
self.cells = cells
self.progs = progs
self.sortProgs()
def sortProgs(self):
for i in range(len(self.progs)):
self.progs[i] = self.progs[i].topologicalSort()
def execute(self):
maxLen = max([len(e) for e in self.progs])
for s in range(maxLen):
nodes = []
for i in range(len(self.progs)):
prog = self.progs[i]
if (len(prog) <= s):
continue
nodes += [prog[s]]
groupedNodes = {}
for node in nodes:
groupedNodes.setdefault(node.cellInd, []).append(node)
for (cellInd, nodes) in groupedNodes.items():
arity = nodes[0].arity
cell = self.cells[cellInd]
outData = [node.inpData[0] for node in nodes]
if (arity == 1):
arg = t.cat(outData, 0)
outData = cell(arg)
outData = t.split(outData, 1, 0)
elif (arity == 2):
arg1 = t.cat(outData, 0)
arg2 = t.cat([node.inpData[1] for node in nodes], 0)
outData = cell(arg1, arg2)
outData = t.split(outData, 1, 0)
for (node, outDat) in zip(nodes, outData):
if (node.prev is None):
node.outData = outDat
else:
node.prev.inpData += [outDat]
outData = [prog[(- 1)].outData for prog in self.progs]
return t.cat(outData, 0)
|
class Executioner():
def __init__(self, prog, cells):
self.prog = prog
self.cells = cells
def execute(self):
return self.executeInternal(self.prog.root)
def executeInternal(self, cur):
if (cur.arity == 0):
return cur.inpData[0]
elif (cur.arity == 1):
args = [self.executeInternal(cur.next[0])]
elif (cur.arity == 2):
arg1 = self.executeInternal(cur.next[0])
arg2 = self.executeInternal(cur.next[1])
args = [arg1, arg2]
cell = self.cells[cur.cellInd]
return cell(*args)
|
class ProgramGenerator(nn.Module):
def __init__(self, embedDim, hGen, qLen, qVocab, pVocab):
super(ProgramGenerator, self).__init__()
self.embed = nn.Embedding(qVocab, embedDim)
self.encoder = t.nn.LSTM(embedDim, hGen, 2, batch_first=True)
self.decoder = t.nn.LSTM(hGen, hGen, 2, batch_first=True)
self.proj = nn.Linear(hGen, pVocab)
self.qLen = qLen
self.hGen = hGen
self.pVocab = pVocab
def forward(self, x):
x = self.embed(x)
(x, state) = self.encoder(x)
stateInp = [state[0][0] for i in range(self.qLen)]
stateInp = t.stack(stateInp, 1)
(x, _) = self.decoder(stateInp, state)
sz = list(x.size())
x = x.contiguous().view((- 1), self.hGen)
x = self.proj(x)
x = x.view(*sz[:2], (- 1))
return x
|
def ResNetFeatureExtractor():
resnet = torchvision.models.resnet101(pretrained=True)
return nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, resnet.layer2, resnet.layer3).eval()
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class BottleneckFinal(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BottleneckFinal, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
return out
|
class ResNet(nn.Module):
def __init__(self, num_classes=1000):
block = Bottleneck
layers = [3, 4, 5]
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.featureExtract = BottleneckFinal(1024, 256, 1, None)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
localStates = self.state_dict()
url = 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'
remoteStates = model_zoo.load_url(url)
localKeys = localStates.keys()
remoteKeys = remoteStates.keys()
localPrefix = 'featureExtract.'
remotePrefix = 'layer3.5.'
mismatch = ['conv1.weight', 'bn1.weight', 'bn1.bias', 'bn1.running_mean', 'bn1.running_var', 'conv2.weight', 'bn2.weight', 'bn2.bias', 'bn2.running_mean', 'bn2.running_var', 'conv3.weight']
for e in mismatch:
remoteStates[(localPrefix + e)] = remoteStates[(remotePrefix + e)]
for k in list(remoteKeys):
if (k not in list(localKeys)):
remoteStates.pop(k)
self.load_state_dict(remoteStates)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.featureExtract(x)
return x
|
def resnet101(pretrained=False, **kwargs):
'Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
|
class Node():
def __init__(self, cell):
self.nxt = cell['inputs'][::(- 1)]
self.func = cell['function']
if (len(cell['value_inputs']) > 0):
self.func += ('_' + cell['value_inputs'][0])
|
class BTree():
def __init__(self, cells):
self.root = Node(cells[(- 1)])
self.addNodes(cells[:(- 1)], self.root)
def addNodes(self, cells, cur):
for i in range(len(cur.nxt)):
e = cur.nxt[i]
node = Node(cells[e])
cur.nxt[i] = node
self.addNodes(cells, cur.nxt[i])
def flat(self):
return self.flatInternal(self.root, [])
def flatInternal(self, cur, flattened):
flattened += [cur.func]
for e in cur.nxt:
self.flatInternal(e, flattened)
return flattened
def print(self):
self.printInternal(self.root)
def printInternal(self, cur):
print(cur.func)
for e in cur.nxt:
self.printInternal(e)
|
def loadDat():
with open('../data/clevr/questions/CLEVR_val_questions.json') as dataF:
questions = json.load(dataF)['questions']
return questions
|
def getFuncs(dat):
vocab = []
for p in dat:
p = p['program']
for e in p:
func = e['function']
append = e['value_inputs']
if (len(append) > 0):
func += ('_' + append[0])
func = ((str(len(e['inputs'])) + '_') + func)
vocab += [func]
vocab = list(set(vocab))
return sorted(vocab)
|
def getAllWords(fName):
dat = open(fName).read()
dat = json.loads(dat)
dat = dat['questions']
wordsX = []
wordsY = []
for e in dat:
wordsX += e['question'].lower()[:(- 1)].split()
if ('answer' in e.keys()):
wordsY += e['answer'].lower().split()
return ((wordsX + ['?']), wordsY)
|
def name(split):
return (('../data/clevr/questions/CLEVR_' + split) + '_questions.json')
|
def plotResults():
batch = [1, 32, 64, 320, 640, 850]
fVanilla = [0.0031771, 0.0031694, 0.0026328, 0.00238375, 0.0023333]
fOurs = [0.003963, 0.00248858, 0.001686116, 0.000710902, 0.0005151, 0.00042235]
cVanilla = [0.002315934, 0.00287098, 0.00249189, 0.002322, 0.002199]
cOurs = [0.002244463, 0.00155909, 0.00117287, 0.000341281, 0.000202705, 0.000156266]
lineWidth = 3
ls = 26
fs = 24
ts = 26
leg = 24
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log', basex=2)
ax.set_yscale('log', basey=2)
ax.set_xlim(1, 1024)
ax.set_ylim((2 ** (- 13)), (2 ** (- 8)))
ax.tick_params(axis='x', labelsize=ls)
ax.tick_params(axis='y', labelsize=ls)
plt.xlabel('Minibatch Size', fontsize=fs)
plt.ylabel('Execution Time (sec / example)', fontsize=fs)
plt.title('Efficiency Gains with Improved Topological Sort', fontsize=ts)
ax.hold(True)
ax.plot(batch[:(- 1)], fVanilla, LineWidth=lineWidth, label='Vanilla Forward')
ax.plot(batch, fOurs, LineWidth=lineWidth, label='Our Forward')
ax.plot(batch[:(- 1)], cVanilla, LineWidth=lineWidth, label='Vanilla Cell')
ax.plot(batch, cOurs, LineWidth=lineWidth, label='Our Cell')
ax.legend(loc='lower left', shadow=False, prop={'size': leg})
plt.show()
|
def data_loader(file_name='data/google.csv', seq_len=7, missing_rate=0.2):
'Load complete data and introduce missingness.\n \n Args:\n - file_name: the location of file to be loaded\n - seq_len: sequence length\n - missing_rate: rate of missing data to be introduced\n \n Returns:\n - x: data with missing values\n - m: observation indicator (m=1: observe, m=0: missing)\n - t: time information (time difference between two measurments)\n - ori_x: original data without missing values (for evaluation)\n '
data = np.loadtxt(file_name, delimiter=',', skiprows=1)
data = data[::(- 1)]
(data, norm_parameters) = MinMaxScaler(data)
(no, dim) = data.shape
no = (no - seq_len)
ori_x = list()
for i in range(no):
temp_ori_x = data[i:(i + seq_len)]
ori_x = (ori_x + [temp_ori_x])
m = list()
x = list()
t = list()
for i in range(no):
temp_m = (1 * (np.random.uniform(0, 1, [seq_len, dim]) > missing_rate))
m = (m + [temp_m])
temp_x = ori_x[i].copy()
temp_x[np.where((temp_m == 0))] = np.nan
x = (x + [temp_x])
temp_t = np.ones([seq_len, dim])
for j in range(dim):
for k in range(1, seq_len):
if (temp_m[(k, j)] == 0):
temp_t[(k, j)] = (temp_t[((k - 1), j)] + 1)
t = (t + [temp_t])
x = np.asarray(x)
m = np.asarray(m)
t = np.asarray(t)
ori_x = np.asarray(ori_x)
x = np.nan_to_num(x, 0)
return (x, m, t, ori_x)
|
def main(args):
'MRNN main function.\n \n Args:\n - file_name: dataset file name\n - seq_len: sequence length of time-series data\n - missing_rate: the rate of introduced missingness\n - h_dim: hidden state dimensions\n - batch_size: the number of samples in mini batch\n - iteration: the number of iteration\n - learning_rate: learning rate of model training\n - metric_name: imputation performance metric (mse, mae, rmse)\n \n Returns:\n - output:\n - x: original data with missing\n - ori_x: original data without missing\n - m: mask matrix\n - t: time matrix\n - imputed_x: imputed data\n - performance: imputation performance\n '
(x, m, t, ori_x) = data_loader(args.file_name, args.seq_len, args.missing_rate)
if os.path.exists('tmp/mrnn_imputation'):
shutil.rmtree('tmp/mrnn_imputation')
model_parameters = {'h_dim': args.h_dim, 'batch_size': args.batch_size, 'iteration': args.iteration, 'learning_rate': args.learning_rate}
mrnn_model = mrnn(x, model_parameters)
mrnn_model.fit(x, m, t)
imputed_x = mrnn_model.transform(x, m, t)
performance = imputation_performance(ori_x, imputed_x, m, args.metric_name)
print(((args.metric_name + ': ') + str(np.round(performance, 4))))
output = {'x': x, 'ori_x': ori_x, 'm': m, 't': t, 'imputed_x': imputed_x, 'performance': performance}
if os.path.exists('tmp/mrnn_imputation'):
shutil.rmtree('tmp/mrnn_imputation')
return output
|
def MinMaxScaler(data):
'Normalization tool: Min Max Scaler.\n \n Args:\n - data: raw input data\n \n Returns:\n - normalized_data: minmax normalized data\n - norm_parameters: normalization parameters for rescaling if needed\n '
min_val = np.min(data, axis=0)
data = (data - min_val)
max_val = (np.max(data, axis=0) + 1e-08)
normalized_data = (data / max_val)
norm_parameters = {'min_val': min_val, 'max_val': max_val}
return (normalized_data, norm_parameters)
|
def imputation_performance(ori_x, imputed_x, m, metric_name):
'Performance metrics for imputation.\n \n Args:\n - ori_x: original complete data (without missing values)\n - imputed_x: imputed data from incomplete data\n - m: observation indicator\n - metric_name: mae, mse, or rmse\n \n Returns:\n - performance: imputation performance in terms or mae, mse, or rmse\n '
assert (metric_name in ['mae', 'mse', 'rmse'])
(no, seq_len, dim) = ori_x.shape
ori_x = np.reshape(ori_x, [(no * seq_len), dim])
imputed_x = np.reshape(imputed_x, [(no * seq_len), dim])
m = np.reshape(m, [(no * seq_len), dim])
if (metric_name == 'mae'):
performance = mean_absolute_error(ori_x, imputed_x, (1 - m))
elif (metric_name == 'mse'):
performance = mean_squared_error(ori_x, imputed_x, (1 - m))
elif (metric_name == 'rmse'):
performance = np.sqrt(mean_squared_error(ori_x, imputed_x, (1 - m)))
return performance
|
class _NNMFBase(object):
def __init__(self, num_users, num_items, D=10, Dprime=60, hidden_units_per_layer=50, latent_normal_init_params={'mean': 0.0, 'stddev': 0.1}, model_filename='model/nnmf.ckpt'):
self.num_users = num_users
self.num_items = num_items
self.D = D
self.Dprime = Dprime
self.hidden_units_per_layer = hidden_units_per_layer
self.latent_normal_init_params = latent_normal_init_params
self.model_filename = model_filename
self._epochs = 0
self.user_index = tf.placeholder(tf.int32, [None])
self.item_index = tf.placeholder(tf.int32, [None])
self.r_target = tf.placeholder(tf.float32, [None])
self._init_vars()
self._init_ops()
self.rmse = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.r, self.r_target))))
def _init_vars(self):
raise NotImplementedError
def _init_ops(self):
raise NotImplementedError
def init_sess(self, sess):
self.sess = sess
init = tf.initialize_all_variables()
self.sess.run(init)
def _train_iteration(self, data, additional_feed=None):
user_ids = data['user_id']
item_ids = data['item_id']
ratings = data['rating']
feed_dict = {self.user_index: user_ids, self.item_index: item_ids, self.r_target: ratings}
if additional_feed:
feed_dict.update(additional_feed)
for step in self.optimize_steps:
self.sess.run(step, feed_dict=feed_dict)
self._epochs += 1
def train_iteration(self, data):
self._train_iteration(data)
def eval_loss(self, data):
raise NotImplementedError
def eval_rmse(self, data):
user_ids = data['user_id']
item_ids = data['item_id']
ratings = data['rating']
feed_dict = {self.user_index: user_ids, self.item_index: item_ids, self.r_target: ratings}
return self.sess.run(self.rmse, feed_dict=feed_dict)
def predict(self, user_id, item_id):
rating = self.sess.run(self.r, feed_dict={self.user_index: [user_id], self.item_index: [item_id]})
return rating[0]
|
class NNMF(_NNMFBase):
def __init__(self, *args, **kwargs):
if ('lam' in kwargs):
self.lam = float(kwargs['lam'])
del kwargs['lam']
else:
self.lam = 0.01
super(NNMF, self).__init__(*args, **kwargs)
def _init_vars(self):
self.U = tf.Variable(tf.truncated_normal([self.num_users, self.D], **self.latent_normal_init_params))
self.Uprime = tf.Variable(tf.truncated_normal([self.num_users, self.Dprime], **self.latent_normal_init_params))
self.V = tf.Variable(tf.truncated_normal([self.num_items, self.D], **self.latent_normal_init_params))
self.Vprime = tf.Variable(tf.truncated_normal([self.num_items, self.Dprime], **self.latent_normal_init_params))
self.U_lu = tf.nn.embedding_lookup(self.U, self.user_index)
self.Uprime_lu = tf.nn.embedding_lookup(self.Uprime, self.user_index)
self.V_lu = tf.nn.embedding_lookup(self.V, self.item_index)
self.Vprime_lu = tf.nn.embedding_lookup(self.Vprime, self.item_index)
f_input_layer = tf.concat(concat_dim=1, values=[self.U_lu, self.V_lu, tf.mul(self.Uprime_lu, self.Vprime_lu)])
(_r, self.mlp_weights) = build_mlp(f_input_layer, hidden_units_per_layer=self.hidden_units_per_layer)
self.r = tf.squeeze(_r, squeeze_dims=[1])
def _init_ops(self):
reconstruction_loss = tf.reduce_sum(tf.square(tf.sub(self.r_target, self.r)), reduction_indices=[0])
reg = tf.add_n([tf.reduce_sum(tf.square(self.Uprime), reduction_indices=[0, 1]), tf.reduce_sum(tf.square(self.U), reduction_indices=[0, 1]), tf.reduce_sum(tf.square(self.V), reduction_indices=[0, 1]), tf.reduce_sum(tf.square(self.Vprime), reduction_indices=[0, 1])])
self.loss = (reconstruction_loss + (self.lam * reg))
self.optimizer = tf.train.AdamOptimizer()
f_train_step = self.optimizer.minimize(self.loss, var_list=self.mlp_weights.values())
latent_train_step = self.optimizer.minimize(self.loss, var_list=[self.U, self.Uprime, self.V, self.Vprime])
self.optimize_steps = [f_train_step, latent_train_step]
def eval_loss(self, data):
user_ids = data['user_id']
item_ids = data['item_id']
ratings = data['rating']
feed_dict = {self.user_index: user_ids, self.item_index: item_ids, self.r_target: ratings}
return self.sess.run(self.loss, feed_dict=feed_dict)
|
class SVINNMF(_NNMFBase):
num_latent_samples = 1
num_data_samples = 3
def __init__(self, *args, **kwargs):
if ('r_var' in kwargs):
self.r_var = float(kwargs['r_var'])
del kwargs['r_sigma']
else:
self.r_var = 1.0
if ('U_prior_var' in kwargs):
self.U_prior_var = float(kwargs['U_prior_var'])
del kwargs['U_prior_var']
else:
self.U_prior_var = 5.0
if ('Uprime_prior_var' in kwargs):
self.Uprime_prior_var = float(kwargs['Uprime_prior_var'])
del kwargs['Uprime_prior_var']
else:
self.Uprime_prior_var = 5.0
if ('V_prior_var' in kwargs):
self.V_prior_var = float(kwargs['V_prior_var'])
del kwargs['V_prior_var']
else:
self.V_prior_var = 5.0
if ('Vprime_prior_var' in kwargs):
self.Vprime_prior_var = float(kwargs['Vprime_prior_var'])
del kwargs['Vprime_prior_var']
else:
self.Vprime_prior_var = 5.0
if ('kl_full_epoch' in kwargs):
self.kl_full_epoch = int(kwargs['kl_full_epoch'])
del kwargs['kl_full_epoch']
else:
self.kl_full_epoch = 1000
if ('anneal_kl' in kwargs):
self.anneal_kl = bool(kwargs['anneal_kl'])
else:
self.anneal_kl = True
super(SVINNMF, self).__init__(*args, **kwargs)
def _init_vars(self):
self.U_mu = tf.Variable(tf.truncated_normal([self.num_users, self.D], **self.latent_normal_init_params))
self.U_log_var = tf.Variable(tf.random_uniform([self.num_users, self.D], minval=0.0, maxval=0.5))
self.Uprime_mu = tf.Variable(tf.truncated_normal([self.num_users, self.Dprime], **self.latent_normal_init_params))
self.Uprime_log_var = tf.Variable(tf.random_uniform([self.num_users, self.Dprime], minval=0.0, maxval=0.5))
self.V_mu = tf.Variable(tf.truncated_normal([self.num_items, self.D], **self.latent_normal_init_params))
self.V_log_var = tf.Variable(tf.random_uniform([self.num_items, self.D], minval=0.0, maxval=0.5))
self.Vprime_mu = tf.Variable(tf.truncated_normal([self.num_items, self.Dprime], **self.latent_normal_init_params))
self.Vprime_log_var = tf.Variable(tf.random_uniform([self.num_items, self.Dprime], minval=0.0, maxval=0.5))
U_mu_lu = tf.nn.embedding_lookup(self.U_mu, self.user_index)
U_log_var_lu = tf.nn.embedding_lookup(self.U_log_var, self.user_index)
Uprime_mu_lu = tf.nn.embedding_lookup(self.Uprime_mu, self.user_index)
Uprime_log_var_lu = tf.nn.embedding_lookup(self.Uprime_log_var, self.user_index)
V_mu_lu = tf.nn.embedding_lookup(self.V_mu, self.item_index)
V_log_var_lu = tf.nn.embedding_lookup(self.V_log_var, self.item_index)
Vprime_mu_lu = tf.nn.embedding_lookup(self.Vprime_mu, self.item_index)
Vprime_log_var_lu = tf.nn.embedding_lookup(self.Vprime_log_var, self.item_index)
q_U = tf.contrib.distributions.MultivariateNormalDiag(mu=U_mu_lu, diag_stdev=tf.sqrt(tf.exp(U_log_var_lu)))
q_Uprime = tf.contrib.distributions.MultivariateNormalDiag(mu=Uprime_mu_lu, diag_stdev=tf.sqrt(tf.exp(Uprime_log_var_lu)))
q_V = tf.contrib.distributions.MultivariateNormalDiag(mu=V_mu_lu, diag_stdev=tf.sqrt(tf.exp(V_log_var_lu)))
q_Vprime = tf.contrib.distributions.MultivariateNormalDiag(mu=Vprime_mu_lu, diag_stdev=tf.sqrt(tf.exp(Vprime_log_var_lu)))
self.U = q_U.sample()
self.Uprime = q_Uprime.sample()
self.V = q_V.sample()
self.Vprime = q_Vprime.sample()
f_input_layer = tf.concat(concat_dim=1, values=[self.U, self.V, tf.mul(self.Uprime, self.Vprime)])
(self.r_mu, self.mlp_weights) = build_mlp(f_input_layer, hidden_units_per_layer=self.hidden_units_per_layer)
self.r = tf.squeeze(self.r_mu, squeeze_dims=[1])
self.kl_weight = (tf.placeholder(tf.float32) if self.anneal_kl else tf.constant(1.0, dtype=tf.float32))
def _init_ops(self):
KL_U = KL(self.U_mu, self.U_log_var, prior_var=self.U_prior_var)
KL_Uprime = KL(self.Uprime_mu, self.Uprime_log_var, prior_var=self.Uprime_prior_var)
KL_V = KL(self.V_mu, self.V_log_var, prior_var=self.V_prior_var)
KL_Vprime = KL(self.Vprime_mu, self.Vprime_log_var, prior_var=self.Vprime_prior_var)
KL_all = (((KL_U + KL_Uprime) + KL_V) + KL_Vprime)
log_prob = ((- (1 / (2.0 * self.r_var))) * tf.reduce_sum(tf.square(tf.sub(self.r_target, self.r)), reduction_indices=[0]))
elbo = (log_prob - (self.kl_weight * KL_all))
self.loss = (- elbo)
self.optimizer = tf.train.AdamOptimizer()
self.optimize_steps = [self.optimizer.minimize(self.loss)]
def train_iteration(self, data):
additional_feed = ({self.kl_weight: get_kl_weight(self._epochs, on_epoch=self.kl_full_epoch)} if self.anneal_kl else {})
super(SVINNMF, self)._train_iteration(data, additional_feed=additional_feed)
def eval_loss(self, data):
user_ids = data['user_id']
item_ids = data['item_id']
ratings = data['rating']
feed_dict = {self.user_index: user_ids, self.item_index: item_ids, self.r_target: ratings, self.kl_weight: get_kl_weight(self._epochs, on_epoch=self.kl_full_epoch)}
return self.sess.run(self.loss, feed_dict=feed_dict)
|
def KL(mean, log_var, prior_var):
'Computes KL divergence for a group of univariate normals (ie. every dimension of a latent).'
return tf.reduce_sum((tf.log((math.sqrt(prior_var) / tf.sqrt(tf.exp(log_var)))) + ((tf.exp(log_var) + tf.square(mean)) / (2.0 * prior_var))), reduction_indices=[0, 1])
|
def _weight_init_range(n_in, n_out):
'Calculates range for picking initial weight values from a uniform distribution.'
range = ((4.0 * math.sqrt(6.0)) / math.sqrt((n_in + n_out)))
return {'minval': (- range), 'maxval': range}
|
def build_mlp(f_input_layer, hidden_units_per_layer):
'Builds a feed-forward NN (MLP) with 3 hidden layers.'
num_f_inputs = f_input_layer.get_shape().as_list()[1]
mlp_weights = {'h1': tf.Variable(tf.random_uniform([num_f_inputs, hidden_units_per_layer], **_weight_init_range(num_f_inputs, hidden_units_per_layer))), 'b1': tf.Variable(tf.zeros([hidden_units_per_layer])), 'h2': tf.Variable(tf.random_uniform([hidden_units_per_layer, hidden_units_per_layer], **_weight_init_range(hidden_units_per_layer, hidden_units_per_layer))), 'b2': tf.Variable(tf.zeros([hidden_units_per_layer])), 'h3': tf.Variable(tf.random_uniform([hidden_units_per_layer, hidden_units_per_layer], **_weight_init_range(hidden_units_per_layer, hidden_units_per_layer))), 'b3': tf.Variable(tf.zeros([hidden_units_per_layer])), 'out': tf.Variable(tf.random_uniform([hidden_units_per_layer, 1], **_weight_init_range(hidden_units_per_layer, 1))), 'b_out': tf.Variable(tf.zeros([1]))}
mlp_layer_1 = tf.nn.sigmoid((tf.matmul(f_input_layer, mlp_weights['h1']) + mlp_weights['b1']))
mlp_layer_2 = tf.nn.sigmoid((tf.matmul(mlp_layer_1, mlp_weights['h2']) + mlp_weights['b2']))
mlp_layer_3 = tf.nn.sigmoid((tf.matmul(mlp_layer_2, mlp_weights['h3']) + mlp_weights['b3']))
out = (tf.matmul(mlp_layer_3, mlp_weights['out']) + mlp_weights['b_out'])
return (out, mlp_weights)
|
def get_kl_weight(curr_iter, on_epoch=100):
"Outputs sigmoid scheduled KL weight term (to be fully on at 'on_epoch')"
return (1.0 / (1 + math.exp(((- (25.0 / on_epoch)) * (curr_iter - (on_epoch / 2.0))))))
|
def chunk_df(df, size):
'Splits a Pandas dataframe into chunks of size `size`.\n\n See here: https://stackoverflow.com/a/25701576/1424734\n '
return (df[pos:(pos + size)] for pos in xrange(0, len(df), size))
|
def load_data(train_filename, valid_filename, test_filename, delimiter='\t', col_names=['user_id', 'item_id', 'rating']):
'Helper function to load in/preprocess dataframes'
train_data = pd.read_csv(train_filename, delimiter=delimiter, header=None, names=col_names)
train_data['user_id'] = (train_data['user_id'] - 1)
train_data['item_id'] = (train_data['item_id'] - 1)
valid_data = pd.read_csv(valid_filename, delimiter=delimiter, header=None, names=col_names)
valid_data['user_id'] = (valid_data['user_id'] - 1)
valid_data['item_id'] = (valid_data['item_id'] - 1)
test_data = pd.read_csv(test_filename, delimiter=delimiter, header=None, names=col_names)
test_data['user_id'] = (test_data['user_id'] - 1)
test_data['item_id'] = (test_data['item_id'] - 1)
return (train_data, valid_data, test_data)
|
def train(model, sess, saver, train_data, valid_data, batch_size, max_epochs, use_early_stop, early_stop_max_epoch):
batch = (train_data.sample(batch_size) if batch_size else train_data)
train_error = model.eval_loss(batch)
train_rmse = model.eval_rmse(batch)
valid_rmse = model.eval_rmse(valid_data)
print('[start] Train error: {:3f}, Train RMSE: {:3f}; Valid RMSE: {:3f}'.format(train_error, train_rmse, valid_rmse))
prev_valid_rmse = float('Inf')
early_stop_epochs = 0
for epoch in xrange(max_epochs):
shuffled_df = train_data.sample(frac=1)
batches = (chunk_df(shuffled_df, batch_size) if batch_size else [train_data])
for (batch_iter, batch) in enumerate(batches):
model.train_iteration(batch)
train_error = model.eval_loss(batch)
train_rmse = model.eval_rmse(batch)
valid_rmse = model.eval_rmse(valid_data)
print('[{:d}-{:d}] Train error: {:3f}, Train RMSE: {:3f}; Valid RMSE: {:3f}'.format(epoch, batch_iter, train_error, train_rmse, valid_rmse))
if use_early_stop:
early_stop_epochs += 1
if (valid_rmse < prev_valid_rmse):
prev_valid_rmse = valid_rmse
early_stop_epochs = 0
saver.save(sess, model.model_filename)
elif (early_stop_epochs == early_stop_max_epoch):
print('Early stopping ({} vs. {})...'.format(prev_valid_rmse, valid_rmse))
break
else:
saver.save(sess, model.model_filename)
|
def test(model, sess, saver, test_data, train_data=None, log=True):
if (train_data is not None):
train_rmse = model.eval_rmse(train_data)
if log:
print('Final train RMSE: {}'.format(train_rmse))
test_rmse = model.eval_rmse(test_data)
if log:
print('Final test RMSE: {}'.format(test_rmse))
return test_rmse
|
def vime_self(x_unlab, p_m, alpha, parameters):
'Self-supervised learning part in VIME.\n \n Args:\n x_unlab: unlabeled feature\n p_m: corruption probability\n alpha: hyper-parameter to control the weights of feature and mask losses\n parameters: epochs, batch_size\n \n Returns:\n encoder: Representation learning block\n '
(_, dim) = x_unlab.shape
epochs = parameters['epochs']
batch_size = parameters['batch_size']
inputs = Input(shape=(dim,))
h = Dense(int(dim), activation='relu')(inputs)
output_1 = Dense(dim, activation='sigmoid', name='mask')(h)
output_2 = Dense(dim, activation='sigmoid', name='feature')(h)
model = Model(inputs=inputs, outputs=[output_1, output_2])
model.compile(optimizer='rmsprop', loss={'mask': 'binary_crossentropy', 'feature': 'mean_squared_error'}, loss_weights={'mask': 1, 'feature': alpha})
m_unlab = mask_generator(p_m, x_unlab)
(m_label, x_tilde) = pretext_generator(m_unlab, x_unlab)
model.fit(x_tilde, {'mask': m_label, 'feature': x_unlab}, epochs=epochs, batch_size=batch_size)
layer_name = model.layers[1].name
layer_output = model.get_layer(layer_name).output
encoder = models.Model(inputs=model.input, outputs=layer_output)
return encoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.