repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
torch2trt | torch2trt-master/torch2trt/converters/identity.py | from torch2trt.torch2trt import *
@tensorrt_converter('torch.Tensor.contiguous')
@tensorrt_converter('torch.nn.functional.dropout')
@tensorrt_converter('torch.nn.functional.dropout2d')
@tensorrt_converter('torch.nn.functional.dropout3d')
def convert_functional_identity(ctx):
input = ctx.method_args[0]
if not hasattr(input, '_trt'):
return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
output._trt = input_trt
@tensorrt_converter('torch.nn.Dropout.forward')
@tensorrt_converter('torch.nn.Dropout2d.forward')
@tensorrt_converter('torch.nn.Dropout3d.forward')
def convert_identity(ctx):
input = ctx.method_args[1]
if not hasattr(input, '_trt'):
return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
output._trt = input_trt
| 865 | 31.074074 | 64 | py |
torch2trt | torch2trt-master/torch2trt/converters/softmax.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.Tensor.softmax')
@tensorrt_converter('torch.nn.functional.softmax')
def convert_softmax(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
# get dims from args or kwargs
if 'dim' in ctx.method_kwargs:
dim = ctx.method_kwargs['dim']
elif len(ctx.method_args) >= 2:
dim = ctx.method_args[1]
# convert negative dims
if dim < 0:
dim = len(input.shape) + dim
axes = torch_dim_to_trt_axes(dim)
layer = ctx.network.add_softmax(input=input_trt)
layer.axes = axes
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_softmax_module():
return torch.nn.Softmax(1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_softmax_module_dim2():
return torch.nn.Softmax(2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_softmax_module_neg1():
return torch.nn.Softmax(-1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_softmax_module_dim_neg2():
return torch.nn.Softmax(-2)
| 1,426 | 27.54 | 69 | py |
torch2trt | torch2trt-master/torch2trt/converters/split.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.split')
@tensorrt_converter('torch.Tensor.split')
def convert_split(ctx):
input = get_arg(ctx, 'input', 0, None)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
# we don't need to parse split/chunk (arg 1)
# since we infer size from output tensors
dim = get_arg(ctx, 'dim', 2, 0)
outputs = ctx.method_return
# assert(dim >= 1)
start = [0] * len(input.shape)
stride = [1] * len(start)
offset = 0
# add slice layers
for i, output in enumerate(outputs):
shape = list(output.shape)
start[dim] = offset
layer = ctx.network.add_slice(input_trt, start=start, shape=shape, stride=stride)
output._trt = layer.get_output(0)
offset = offset + shape[dim]
class TorchSplit(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(TorchSplit, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, x):
return torch.split(x, *self.args, **self.kwargs)
class TensorSplit(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(TensorSplit, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, x):
return x.split(*self.args, **self.kwargs)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)], max_batch_size=2)
def test_torch_split_1_1():
return TorchSplit(1, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)], max_batch_size=2)
def test_torch_split_2_1():
return TorchSplit(2, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)], max_batch_size=2)
def test_torch_split_3_1():
return TorchSplit(3, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)], max_batch_size=2)
def test_torch_split_3_2():
return TorchSplit(3, 2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)], max_batch_size=2)
def test_tensor_split_3_2():
return TensorSplit(3, 2) | 2,791 | 32.238095 | 89 | py |
torch2trt | torch2trt-master/torch2trt/converters/adaptive_max_pool3d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter("torch.nn.functional.adaptive_max_pool3d")
def convert_adaptive_max_pool3d(ctx):
input = ctx.method_args[0]
output = ctx.method_return
output_size = ctx.method_args[1]
if isinstance(output_size, int):
output_size = (output_size,) * 3
stride = (
input._trt.shape[-3] // output_size[-3],
input._trt.shape[-2] // output_size[-2],
input._trt.shape[-1] // output_size[-1],
)
kernel_size = stride
layer = ctx.network.add_pooling_nd(
input=input._trt, type=trt.PoolingType.MAX, window_size=kernel_size
)
layer.stride_nd = stride
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 16, 224, 224)])
def test_adaptive_max_pool3d_1x1x1():
return torch.nn.AdaptiveMaxPool3d((1, 1, 1))
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 16, 224, 224)])
def test_adaptive_max_pool3d_2x2x2():
return torch.nn.AdaptiveMaxPool3d((2, 2, 2))
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 16, 224, 224)])
def test_adaptive_max_pool3d_3x3x3():
return torch.nn.AdaptiveMaxPool3d((3, 3, 3))
| 1,255 | 28.904762 | 77 | py |
torch2trt | torch2trt-master/torch2trt/converters/ConvTranspose2d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter("torch.nn.ConvTranspose2d.forward", enabled=trt_version() < '7.0')
def convert_ConvTranspose2d(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
kernel_size = module.kernel_size
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size,) * 2
stride = module.stride
if not isinstance(stride, tuple):
stride = (stride,) * 2
padding = module.padding
if not isinstance(padding, tuple):
padding = (padding,) * 2
kernel = module.weight.detach().cpu().numpy()
bias = trt.Weights(torch_dtype_to_trt(module.weight.dtype))
if module.bias is not None:
bias = module.bias.detach().cpu().numpy()
layer = ctx.network.add_deconvolution(
input=input_trt,
num_output_maps=module.out_channels,
kernel_shape=kernel_size,
kernel=kernel,
bias=bias,
)
layer.stride = stride
# if output_padding in original pytorch layer is not 0, pre_padding and post_padding should be set respectively. Otherwise the output dimension of pytorch and tensorrt may be different.
output_padding = module.output_padding
if output_padding[0] + output_padding[1] > 0:
layer.pre_padding = padding
layer.post_padding = trt.tensorrt.DimsHW(padding[0] - output_padding[0], padding[1] - output_padding[1])
else:
layer.padding = padding
if module.groups is not None:
layer.num_groups = module.groups
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device("cuda"), [(1,3,224,224)], enabled=trt_version() < '7.0')
def test_square_kernel_equal_stride_mode():
return torch.nn.ConvTranspose2d(3,3,3,stride=2)
@add_module_test(torch.float32, torch.device("cuda"), [(1,3,224,224)], enabled=trt_version() < '7.0')
def test_square_kernel_equal_stride_mode_unequal_op_size():
return torch.nn.ConvTranspose2d(3,6,3,stride=2)
@add_module_test(torch.float32, torch.device("cuda"), [(1,3,224,224)], enabled=trt_version() < '7.0')
def test_unequal_stride_mode():
return torch.nn.ConvTranspose2d(3,3,3, stride=(2,1), padding=(4,2))
@add_module_test(torch.float32, torch.device("cuda"), [(1,3,112,112)], enabled=trt_version() < '7.0')
@add_module_test(torch.float32, torch.device("cuda"), [(1,3,7,7)], enabled=trt_version() < '7.0')
def test_kernelsize_4():
return torch.nn.ConvTranspose2d(3,3,4, stride=2, padding=1)
| 2,617 | 36.942029 | 189 | py |
torch2trt | torch2trt-master/torch2trt/converters/mul.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.mul')
@tensorrt_converter('torch.Tensor.mul_')
@tensorrt_converter('torch.Tensor.__imul__')
@tensorrt_converter('torch.Tensor.__mul__')
@tensorrt_converter('torch.Tensor.__rmul__')
def convert_mul(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.PROD)
output._trt = layer.get_output(0)
class Mul(torch.nn.Module):
def __init__(self):
super(Mul, self).__init__()
def forward(self, x, y):
return x * y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_mul_basic():
return Mul()
class IMul(torch.nn.Module):
def __init__(self):
super(IMul, self).__init__()
def forward(self, x, y):
x *= y
return x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_mul_imul():
return IMul()
class TorchMul(torch.nn.Module):
def __init__(self):
super(TorchMul, self).__init__()
def forward(self, x, y):
return torch.mul(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_mul_torchmul():
return TorchMul()
class RMulInt(torch.nn.Module):
def __init__(self):
super(RMulInt, self).__init__()
def forward(self, x):
return 10 * x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_rmul_int():
return RMulInt()
class RMulFloat(torch.nn.Module):
def __init__(self):
super(RMulFloat, self).__init__()
def forward(self, x):
return 10.0 * x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_rmul_float():
return RMulFloat()
class MulConstantNoBatch(torch.nn.Module):
def __init__(self):
super(MulConstantNoBatch, self).__init__()
self.register_buffer('y', torch.ones((3, 10, 10)))
def forward(self, x):
return x * self.y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10, 10)])
def test_mul_constant_nobatch():
return MulConstantNoBatch()
class MulConstantBatch(torch.nn.Module):
def __init__(self):
super(MulConstantBatch, self).__init__()
self.register_buffer('y', torch.ones((1, 3, 10, 10)))
def forward(self, x):
return x * self.y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10, 10)])
def test_mul_constant_batch():
return MulConstantBatch()
| 2,891 | 25.290909 | 112 | py |
torch2trt | torch2trt-master/torch2trt/converters/example_plugin.py | import torch
import torch.nn as nn
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import numpy as np
import ctypes
try:
ctypes.CDLL('libtorch2trt_plugins.so')
def create_example_plugin(scale):
registry = trt.get_plugin_registry()
creator = registry.get_plugin_creator('ExamplePlugin', '1', '')
fc = trt.PluginFieldCollection([
trt.PluginField(
'scale',
scale * np.ones((1,)).astype(np.float32),
trt.PluginFieldType.FLOAT32
)
])
return creator.create_plugin('', fc)
class ExampleLayer(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return self.scale * x
@tensorrt_converter(ExampleLayer.forward)
def convert_example_layer(ctx):
module = get_arg(ctx, 'self', pos=0, default=None)
input = get_arg(ctx, 'x', pos=1, default=None)
output = ctx.method_return
input_trt = input._trt
plugin = create_example_plugin(module.scale)
layer = ctx.network.add_plugin_v2([input_trt], plugin)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 4, 6)])
def test_example_layer_scale3():
return ExampleLayer(3.0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 4, 6)])
def test_example_layer_scale4():
return ExampleLayer(4.0)
except:
pass # TODO: log plugin not found | 1,597 | 27.535714 | 73 | py |
torch2trt | torch2trt-master/torch2trt/converters/getitem.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
def slice_to_trt(ctx, dim_size, dim_slice):
start = 0 if dim_slice.start is None else dim_slice.start
stop = dim_size if dim_slice.stop is None else dim_slice.stop
stride = 1 if dim_slice.step is None else dim_slice.step
start = make_int_wrapper(start)
stop = make_int_wrapper(stop)
stride = make_int_wrapper(stride)
size = (stop - start - 1) // stride + 1
return start, size, stride
def num_slice_types(slices):
num_slice = 0
for s in slices:
if isinstance(s, slice) or isinstance(s, int):
num_slice += 1
return num_slice
@tensorrt_converter('torch.Tensor.__getitem__')
def convert_tensor_getitem(ctx):
input = ctx.method_args[0]
slices = ctx.method_args[1]
output = ctx.method_return
if not hasattr(input, '_trt'):
return
input_trt = input._trt
# Step 1 - Replace ellipsis with expanded slices
num_ellipsis = len(input.shape) - num_slice_types(slices)
new_slices = []
for s in slices:
if s == Ellipsis:
while num_ellipsis > 0:
new_slices.append(slice(None, None, None))
num_ellipsis -= 1
elif isinstance(s, slice):
new_slices.append(s)
elif s is None:
new_slices.append(None)
elif isinstance(s, int) or isinstance(s, IntWrapper):
new_slices.append(s)
# fill missing slices at end
while num_slice_types(new_slices) < len(input.shape):
new_slices.append(slice(None, None, None))
# Step 2 - Remove batch from slices (TRT from this point)
slices = tuple(new_slices) # remove batch
# Step 3 - Add slice layer (will currently ignore 'None' slices)
starts = []
sizes = []
strides = []
input_dim = 0
input_size = input.size()
for s in slices:
if input_dim >= len(input_trt.shape):
break
if isinstance(s, slice):
start, size, stride = slice_to_trt(ctx, input_size[input_dim], s)
starts.append(start)
sizes.append(size)
strides.append(stride)
input_dim += 1
elif isinstance(s, int) or isinstance(s, IntWrapper):
starts.append(make_int_wrapper(s))
sizes.append(make_int_wrapper(1))
strides.append(make_int_wrapper(1))
input_dim += 1
starts = make_size_wrapper(starts)
sizes = make_size_wrapper(sizes)
strides = make_size_wrapper(strides)
layer = ctx.network.add_slice(input_trt, starts, sizes, strides)
layer.set_input(1, starts._trt)
layer.set_input(2, sizes._trt)
layer.set_input(3, strides._trt)
output_trt = layer.get_output(0)
# Step 4 - Add shuffle layer to insert dimensions for 'None' slices and remove dimensions for 'int' slices
num_non_slice = len([s for s in slices if not isinstance(s, slice)])
if num_non_slice > 0:
final_shape = []
i = 0
for s in slices:
if isinstance(s, slice):
# copy slice dim
final_shape.append(sizes[i])
i += 1
elif isinstance(s, int) or isinstance(s, IntWrapper):
# remove int dim
i += 1
else:
# insert None dim
final_shape.append(make_int_wrapper(1))
final_shape = make_size_wrapper(final_shape)
layer = ctx.network.add_shuffle(output_trt)
layer.reshape_dims = tuple(output.shape) # exclude batch
layer.set_input(1, final_shape._trt)
output_trt = layer.get_output(0)
output._trt = output_trt
class LambdaModule(torch.nn.Module):
def __init__(self, fn):
super(LambdaModule, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_tensor_getitem_1d_int():
return LambdaModule(lambda x: x[:, 0])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_int():
return LambdaModule(lambda x: x[:, 0])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_strided():
return LambdaModule(lambda x: x[:, ::2])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_strided_offset():
return LambdaModule(lambda x: x[:, 1::2])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_strided_range():
return LambdaModule(lambda x: x[:, 1:3:2])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_insert_dim():
return LambdaModule(lambda x: x[:, None])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_insert_dim_ellipsis():
return LambdaModule(lambda x: x[:, None, ...])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_append_dim():
return LambdaModule(lambda x: x[:, ..., None])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_append_2dim():
return LambdaModule(lambda x: x[:, ..., None, None])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 4, 3)])
def test_tensor_getitem_2d_weird_combo():
return LambdaModule(lambda x: x[:, 0:3:4, None, None, 1, ...])
| 5,696 | 28.671875 | 110 | py |
torch2trt | torch2trt-master/torch2trt/converters/activation.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
from .unary import UnaryModule
# | RELU : Rectified Linear activation (impl in relu.py)
# | SIGMOID : Sigmoid activation (impl in sigmoid.py)
# | TANH : Hyperbolic Tangent activation (impl in tanh.py)
# | LEAKY_RELU : Leaky Relu activation: f(x) = x if x >= 0, f(x) = alpha * x if x < 0
@tensorrt_converter('torch.nn.functional.leaky_relu')
@tensorrt_converter('torch.nn.functional.leaky_relu_')
def convert_leaky_relu(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
negative_slope = get_arg(ctx, 'negative_slope', pos=1, default=0.01)
output = ctx.method_return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
layer = ctx.network.add_activation(input_trt, trt.ActivationType.LEAKY_RELU)
layer.alpha = negative_slope
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_leaky_relu():
return UnaryModule(lambda x: torch.nn.functional.leaky_relu(x))
# | ELU : Elu activation: f(x) = x if x >= 0, f(x) = alpha * (exp(x) - 1) if x < 0
@tensorrt_converter('torch.nn.functional.elu')
@tensorrt_converter('torch.nn.functional.elu_')
def convert_elu(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
alpha = get_arg(ctx, 'alpha', pos=1, default=1.0)
output = ctx.method_return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
layer = ctx.network.add_activation(input_trt, trt.ActivationType.ELU)
layer.alpha = alpha
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_elu():
return UnaryModule(lambda x: torch.nn.functional.elu(x))
# | SELU : Selu activation: f(x) = beta * x if x > 0, f(x) = beta * (alpha * exp(x) - alpha) if x <= 0
@tensorrt_converter('torch.selu')
@tensorrt_converter('torch.selu_')
@tensorrt_converter('torch.nn.functional.selu')
@tensorrt_converter('torch.nn.functional.selu_')
def convert_selu(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
alpha = get_arg(ctx, 'alpha', pos=1, default=1.0)
output = ctx.method_return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
layer = ctx.network.add_activation(input_trt, trt.ActivationType.SELU)
layer.alpha = 1.6732632423543772848170429916717
layer.beta = 1.0507009873554804934193349852946
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_selu():
return UnaryModule(lambda x: torch.nn.functional.selu(x))
# | SOFTSIGN : Softsign activation: f(x) = x / (1 + \|x\|)
@tensorrt_converter('torch.nn.functional.softsign')
def convert_softsign(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
output = ctx.method_return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
layer = ctx.network.add_activation(input_trt, trt.ActivationType.SOFTSIGN)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_softsign():
return UnaryModule(lambda x: torch.nn.functional.softsign(x))
# | SOFTPLUS : Softplus activation: f(x) = alpha * log(exp(beta * x) + 1)
@tensorrt_converter('torch.nn.functional.softplus')
def convert_softplus(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
output = ctx.method_return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
layer = ctx.network.add_activation(input_trt, trt.ActivationType.SOFTPLUS)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_softplus():
return UnaryModule(lambda x: torch.nn.functional.softplus(x))
# | CLIP : Clip activation: f(x) = max(alpha, min(beta, x)) (impl in clamp.py)
# | HARD_SIGMOID : Hard sigmoid activation: f(x) = max(0, min(1, alpha * x + beta)) (not sure if there is this in Pytorch?)
# | SCALED_TANH : Scaled Tanh activation: f(x) = alpha * tanh(beta * x) (not sure if there is this in Pytorch?)
# | THRESHOLDED_RELU : Thresholded Relu activation: f(x) = x if x > alpha, f(x) = 0 if x <= alpha (not sure if there is this in Pytorch?) | 4,328 | 34.77686 | 141 | py |
torch2trt | torch2trt-master/torch2trt/converters/roll.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.roll')
@tensorrt_converter('torch.Tensor.roll')
def convert_roll(ctx):
input = get_arg(ctx, 'input', 0, None)
shifts = get_arg(ctx, 'shifts', 1, None)
dims = get_arg(ctx, 'dims', 2, None)
output = ctx.method_return
assert dims is not None, "roll converter only supports roll when dims is specified"
ndim = input.ndim
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
try:
iter(shifts)
except:
shifts = (shifts,)
dims = (dims,)
start = [0] * ndim
shape = tuple([int(d) for d in input.shape])
stride = [1] * ndim
for s, d in zip(shifts, dims):
start[d] = (-s) % shape[d]
start = tuple(start)
shape = tuple(shape)
stride = tuple(stride)
shape_dynamic = ctx.network.add_shape(input._trt).get_output(0)
layer = ctx.network.add_slice(
input_trt,
start, # [1:] to exclude batch
shape,
stride
)
layer.set_input(2, shape_dynamic)
layer.mode = trt.SliceMode.WRAP
output._trt = layer.get_output(0)
class Roll(torch.nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.args = args
self.kwargs = kwargs
def forward(self, x):
return torch.roll(x, *self.args, **self.kwargs)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 4, 5)], max_batch_size=2)
def test_roll_int():
return Roll(1, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 4, 5)], max_batch_size=2)
def test_roll_int_dim():
return Roll(1, -2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 4, 5)], max_batch_size=2)
def test_roll_tuple():
return Roll((2, 3), (1, 3)) | 2,301 | 28.512821 | 87 | py |
torch2trt | torch2trt-master/torch2trt/converters/BatchNorm2d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter("torch.nn.BatchNorm2d.forward", enabled=trt_version() < '7.0')
def convert_BatchNorm2d(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
scale = module.weight.detach().cpu().numpy() / np.sqrt(
module.running_var.detach().cpu().numpy() + module.eps
)
bias = (
module.bias.detach().cpu().numpy()
- module.running_mean.detach().cpu().numpy() * scale
)
power = np.ones_like(scale)
layer = ctx.network.add_scale(input_trt, trt.ScaleMode.CHANNEL, bias, scale, power)
output._trt = layer.get_output(0)
| 771 | 31.166667 | 87 | py |
torch2trt | torch2trt-master/torch2trt/converters/instance_norm.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
def _add_scale_1d2d3d(network, x_trt, mode, offset, scale, power):
ndim = len(x_trt.shape)
y_trt = x_trt
# shape to 2D
if ndim != 4:
layer = network.add_shuffle(y_trt)
layer.reshape_dims = (x_trt.shape[0], x_trt.shape[1], x_trt.shape[2], -1) # NCH -> NCHW
y_trt = layer.get_output(0)
y_trt = network.add_scale(y_trt, mode, offset, scale, power).get_output(0)
# shape to original dimension
if ndim != 4:
layer = network.add_shuffle(layer.get_output(0))
layer.reshape_dims = tuple(x_trt.shape)
y_trt = layer.get_output(0)
return y_trt
@tensorrt_converter('torch.instance_norm')
@tensorrt_converter('torch.nn.functional.instance_norm')
def convert_instance_norm(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
running_mean = get_arg(ctx, 'running_mean', pos=1, default=None)
running_var = get_arg(ctx, 'running_var', pos=2, default=None)
weight = get_arg(ctx, 'weight', pos=3, default=None)
bias = get_arg(ctx, 'bias', pos=4, default=None)
use_input_stats = get_arg(ctx, 'use_input_stats', pos=5, default=True)
momentum = get_arg(ctx, 'momentum', pos=6, default=0.1)
eps = get_arg(ctx, 'eps', pos=7, default=1e-05)
output = ctx.method_return
# CASE 1 - USING RUNNING STATISTICS
if not use_input_stats:
# equivalent to batch norm
scale = 1.0 / np.sqrt(running_var.detach().cpu().numpy() + eps)
offset = -running_mean.detach().cpu().numpy() * scale
power = np.ones_like(scale)
if weight is not None:
scale *= weight.detach().cpu().numpy()
offset += bias.detach().cpu().numpy()
result_trt = _add_scale_1d2d3d(ctx.network, input._trt, trt.ScaleMode.CHANNEL, offset, scale, power)
output._trt = result_trt
# CASE 2 - USING INPUT STATS
else:
eps_np = np.array([eps], dtype=np.float32)
keep_dims = True
reduce_axes = torch_dim_to_trt_axes(tuple(range(2, len(input.shape))))
# compute mean over spatial
mean_trt = ctx.network.add_reduce(input._trt, trt.ReduceOperation.AVG, reduce_axes, keep_dims).get_output(0)
# compute variance over spatial (include eps, to reduce layer count)
delta_trt = ctx.network.add_elementwise(input._trt, mean_trt, trt.ElementWiseOperation.SUB).get_output(0)
var_trt = ctx.network.add_scale(delta_trt, trt.ScaleMode.UNIFORM, np.zeros_like(eps_np), np.ones_like(eps_np), 2 * np.ones_like(eps_np)).get_output(0)
var_trt = ctx.network.add_reduce(var_trt, trt.ReduceOperation.AVG, reduce_axes, keep_dims).get_output(0)
# compute sqrt(var + eps)
var_trt = ctx.network.add_scale(var_trt, trt.ScaleMode.UNIFORM, eps_np, np.ones_like(eps_np), 0.5 * np.ones_like(eps_np)).get_output(0)
# compute final result
result_trt = ctx.network.add_elementwise(delta_trt, var_trt, trt.ElementWiseOperation.DIV).get_output(0)
# compute affine (if applicable)
if weight is not None:
weight_np = weight.detach().cpu().numpy()
bias_np = bias.detach().cpu().numpy()
result_trt = _add_scale_1d2d3d(ctx.network, result_trt, trt.ScaleMode.CHANNEL, bias_np, weight_np, np.ones_like(bias_np))
output._trt = result_trt
# STATIC
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3)])
def test_instance_norm_1d_static():
return torch.nn.InstanceNorm1d(10, track_running_stats=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3)])
def test_instance_norm_2d_static():
return torch.nn.InstanceNorm2d(10, track_running_stats=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3, 3)])
def test_instance_norm_3d_static():
return torch.nn.InstanceNorm3d(10, track_running_stats=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3)])
def test_instance_norm_1d_static_affine():
return torch.nn.InstanceNorm1d(10, affine=True, track_running_stats=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3)])
def test_instance_norm_2d_static_affine():
return torch.nn.InstanceNorm2d(10, affine=True, track_running_stats=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3, 3)])
def test_instance_norm_3d_static_affine():
return torch.nn.InstanceNorm3d(10, affine=True, track_running_stats=True)
# DYNAMIC
# @TODO(jwelsh): 1D dynamic test failing
# @add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3)])
# def test_instance_norm_1d_dynamic():
# return torch.nn.InstanceNorm1d(10, track_running_stats=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3)])
def test_instance_norm_2d_dynamic():
return torch.nn.InstanceNorm2d(10, track_running_stats=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3, 3)])
def test_instance_norm_3d_dynamic():
return torch.nn.InstanceNorm3d(10, track_running_stats=False)
# @TODO(jwelsh): 1D dynamic test failing
# @add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3)])
# def test_instance_norm_1d_dynamic_affine():
# return torch.nn.InstanceNorm1d(10, affine=True, track_running_stats=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3)])
def test_instance_norm_2d_dynamic_affine():
return torch.nn.InstanceNorm2d(10, affine=True, track_running_stats=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3, 3)])
def test_instance_norm_3d_dynamic_affine():
return torch.nn.InstanceNorm3d(10, affine=True, track_running_stats=False)
| 5,909 | 38.139073 | 158 | py |
torch2trt | torch2trt-master/torch2trt/converters/mean.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.mean')
@tensorrt_converter('torch.Tensor.mean')
def convert_mean(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
# get dims from args or kwargs
if 'dim' in ctx.method_kwargs:
dim = ctx.method_kwargs['dim']
elif len(ctx.method_args) >= 2:
dim = ctx.method_args[1]
# convert list to tuple
if isinstance(dim, list):
dim = tuple(dim)
if not isinstance(dim, tuple):
dim = (dim, )
# create axes bitmask for reduce layer
axes = torch_dim_to_trt_axes(dim)
# get whether to keep dimensions
if 'keepdim' in ctx.method_kwargs:
keep_dims = ctx.method_kwargs['keepdim']
elif len(ctx.method_args) == 3:
keep_dims = ctx.method_args[2]
else:
keep_dims = False
layer = ctx.network.add_reduce(input_trt, trt.ReduceOperation.AVG, axes, keep_dims)
output._trt = layer.get_output(0)
class Mean(torch.nn.Module):
def __init__(self, dim, keepdim):
super(Mean, self).__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return x.mean(self.dim, self.keepdim)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_mean_channel():
return Mean(1, False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_mean_tuple():
return Mean((1, 2), False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_mean_keepdim():
return Mean(1, True) | 2,066 | 30.318182 | 87 | py |
torch2trt | torch2trt-master/torch2trt/converters/min.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
from .unary import UnaryModule
def __convert_min_elementwise(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.MIN)
output._trt = layer.get_output(0)
def __convert_min_reduce(ctx):
input = ctx.method_args[0]
dim = get_arg(ctx, 'dim', pos=1, default=tuple(range(0, len(input.shape))))
keepdim = get_arg(ctx, 'keepdim', pos=2, default=False)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
if isinstance(ctx.method_return, torch.Tensor):
output_val = ctx.method_return
else:
output_val = ctx.method_return[0]
output_idx = ctx.method_return[1]
layer = ctx.network.add_reduce(input_trt, trt.ReduceOperation.MIN, torch_dim_to_trt_axes(dim), keepdim)
output_val._trt = layer.get_output(0)
@tensorrt_converter('torch.min')
@tensorrt_converter('torch.Tensor.min')
def convert_min(ctx):
if len(ctx.method_args) > 1 and isinstance(ctx.method_args[1], torch.Tensor):
__convert_min_elementwise(ctx)
else:
__convert_min_reduce(ctx)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(3, 3, 3)])
def test_min():
# Can't exit the network with a 0D tensor so we unsqueeze a dim.
return UnaryModule(lambda x: torch.min(x).unsqueeze(0))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_min_reduce_dim1():
return UnaryModule(lambda x: torch.min(x, 1)[0])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_min_reduce_dim22():
return UnaryModule(lambda x: torch.min(x, 2)[0])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_min_reduce_dim1_keepdim():
return UnaryModule(lambda x: torch.min(x, 1, keepdim=True)[0])
class MinElementwise(torch.nn.Module):
def forward(self, x, y):
return torch.min(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3), (1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3), (1,)]) # broadcast
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3), (1, 3, 3)]) # broadcast
def test_min_elementwise():
return MinElementwise()
| 2,813 | 37.027027 | 112 | py |
torch2trt | torch2trt-master/torch2trt/converters/floordiv.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.Tensor.__floordiv__')
@tensorrt_converter('torch.Tensor.__ifloordiv__')
@tensorrt_converter('torch.floor_divide')
def convert_floordiv(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
# we can not use ElementWiseOperation.FLOOR_DIV directly because Torch truncate negative result toward 0
# but TensorRT FLOOR_DIV op toward -Inf
# sign = ab / |ab|
# floordiv result: sign * (|a| // |b|)
ab_layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.PROD)
abs_ab_layer = ctx.network.add_unary(ab_layer.get_output(0), trt.UnaryOperation.ABS)
sign_layer = ctx.network.add_elementwise(ab_layer.get_output(0), abs_ab_layer.get_output(0),
trt.ElementWiseOperation.DIV)
abs_a_layer = ctx.network.add_unary(input_a_trt, trt.UnaryOperation.ABS)
abs_b_layer = ctx.network.add_unary(input_b_trt, trt.UnaryOperation.ABS)
abs_floor_layer = ctx.network.add_elementwise(abs_a_layer.get_output(0), abs_b_layer.get_output(0),
trt.ElementWiseOperation.FLOOR_DIV)
out_layer = ctx.network.add_elementwise(sign_layer.get_output(0), abs_floor_layer.get_output(0),
trt.ElementWiseOperation.PROD)
output._trt = out_layer.get_output(0)
class FloorDiv(torch.nn.Module):
def __init__(self):
super(FloorDiv, self).__init__()
def forward(self, x, y):
return x // y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20), (1, 3, 1, 20)])
def test_floordiv_op():
return FloorDiv()
class FloorDivAssign (torch.nn.Module):
def __init__(self):
super(FloorDivAssign, self).__init__()
def forward(self, x, y):
x //= y
return x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20), (1, 3, 1, 20)])
def test_floordiv_op_assign():
return FloorDivAssign()
class FloorDivConst(torch.nn.Module):
def __init__(self):
super(FloorDivConst, self).__init__()
def forward(self, x):
return x // 2.
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20)])
def test_floordiv_op_const():
return FloorDivConst()
class TorchFloorDiv(torch.nn.Module):
def __init__(self):
super(TorchFloorDiv, self).__init__()
def forward(self, x, y):
return torch.floor_divide(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20), (1, 3, 1, 20)])
def test_floordiv_func():
return TorchFloorDiv()
| 2,918 | 34.597561 | 112 | py |
torch2trt | torch2trt-master/torch2trt/converters/add.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.add')
@tensorrt_converter('torch.Tensor.__iadd__')
@tensorrt_converter('torch.Tensor.__add__')
@tensorrt_converter('torch.Tensor.__radd__')
def convert_add(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.SUM)
output._trt = layer.get_output(0)
class Add(torch.nn.Module):
def __init__(self):
super(Add, self).__init__()
def forward(self, x, y):
return x + y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_add_basic():
return Add()
class IAdd(torch.nn.Module):
def __init__(self):
super(IAdd, self).__init__()
def forward(self, x, y):
x += y
return x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_add_iadd():
return IAdd()
class TorchAdd(torch.nn.Module):
def __init__(self):
super(TorchAdd, self).__init__()
def forward(self, x, y):
return torch.add(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_add_torchadd():
return TorchAdd()
class RAddInt(torch.nn.Module):
def __init__(self):
super(RAddInt, self).__init__()
def forward(self, x):
return 1 + x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_add_radd_int():
return RAddInt()
class RAddFloat(torch.nn.Module):
def __init__(self):
super(RAddFloat, self).__init__()
def forward(self, x):
return 1.0 + x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_add_radd_float():
return RAddFloat()
class AddConstantNoBatch(torch.nn.Module):
def __init__(self):
super(AddConstantNoBatch, self).__init__()
self.register_buffer('y', torch.ones((3, 10, 10)))
def forward(self, x):
return x + self.y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10, 10)])
def test_add_constant_nobatch():
return AddConstantNoBatch()
class AddConstantBatch(torch.nn.Module):
def __init__(self):
super(AddConstantBatch, self).__init__()
self.register_buffer('y', torch.ones((1, 3, 10, 10)))
def forward(self, x):
return x + self.y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10, 10)])
def test_add_constant_batch():
return AddConstantBatch()
| 2,864 | 25.045455 | 112 | py |
torch2trt | torch2trt-master/torch2trt/converters/mod.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.fmod')
def convert_mod(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
# we can not use ElementWiseOperation.FLOOR_DIV directly because Torch truncate negative result toward 0
# but TensorRT FLOOR_DIV op toward -Inf
# sign = ab / |ab|
# floordiv result: sign * (|a| // |b|)
ab_layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.PROD)
abs_ab_layer = ctx.network.add_unary(ab_layer.get_output(0), trt.UnaryOperation.ABS)
sign_layer = ctx.network.add_elementwise(ab_layer.get_output(0), abs_ab_layer.get_output(0),
trt.ElementWiseOperation.DIV)
abs_a_layer = ctx.network.add_unary(input_a_trt, trt.UnaryOperation.ABS)
abs_b_layer = ctx.network.add_unary(input_b_trt, trt.UnaryOperation.ABS)
abs_floor_layer = ctx.network.add_elementwise(abs_a_layer.get_output(0), abs_b_layer.get_output(0),
trt.ElementWiseOperation.FLOOR_DIV)
# a % b = a - (a//b) * b
floordiv_layer = ctx.network.add_elementwise(sign_layer.get_output(0), abs_floor_layer.get_output(0),
trt.ElementWiseOperation.PROD)
prod_layer = ctx.network.add_elementwise(floordiv_layer.get_output(0), input_b_trt, trt.ElementWiseOperation.PROD)
sub_layer = ctx.network.add_elementwise(input_a_trt, prod_layer.get_output(0), trt.ElementWiseOperation.SUB)
output._trt = sub_layer.get_output(0)
@tensorrt_converter('torch.Tensor.__imod__')
@tensorrt_converter('torch.Tensor.__mod__')
# we need separate converter for operator because for some reason Torch use truncation toward -Inf for this op.
# bug is filed: https://github.com/pytorch/pytorch/issues/52425
# but for now we have to convert model exactly
def convert_mod(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
# a % b = a - (a//b) * b
floordiv_layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.FLOOR_DIV)
prod_layer = ctx.network.add_elementwise(floordiv_layer.get_output(0), input_b_trt, trt.ElementWiseOperation.PROD)
mod_layer = ctx.network.add_elementwise(input_a_trt, prod_layer.get_output(0), trt.ElementWiseOperation.SUB)
output._trt = mod_layer.get_output(0)
class Mod(torch.nn.Module):
def __init__(self):
super(Mod, self).__init__()
def forward(self, x, y):
return x % y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20), (1, 3, 1, 20)])
def test_mod_op():
return Mod()
class ModAssign(torch.nn.Module):
def __init__(self):
super(ModAssign, self).__init__()
def forward(self, x, y):
x %= y
return x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20), (1, 3, 1, 20)])
def test_mod_op_assign():
return ModAssign()
class ModConst(torch.nn.Module):
def __init__(self):
super(ModConst, self).__init__()
def forward(self, x):
return x % 2.
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20)])
def test_mod_op_const():
return ModConst()
class TorchMod(torch.nn.Module):
def __init__(self):
super(TorchMod, self).__init__()
def forward(self, x, y):
return torch.fmod(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20), (1, 3, 40, 20)])
def test_mod_func():
return TorchMod()
| 4,033 | 38.940594 | 118 | py |
torch2trt | torch2trt-master/torch2trt/converters/AdaptiveAvgPool3d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter(
"torch.nn.AdaptiveAvgPool3d.forward", enabled=trt_version() >= "7.0"
)
def convert_AdaptiveAvgPool3d(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
output = ctx.method_return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output_size = module.output_size
if not isinstance(output_size, tuple):
output_size = (output_size,) * 3
stride = (
input_trt.shape[-3] // output_size[-3],
input_trt.shape[-2] // output_size[-2],
input_trt.shape[-1] // output_size[-1],
)
kernel_size = stride
layer = ctx.network.add_pooling_nd(
input=input_trt,
type=trt.PoolingType.AVERAGE,
window_size=kernel_size,
)
layer.stride_nd = stride
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 16, 224, 224)])
def test_AdaptiveAvgPool3d_1x1x1():
return torch.nn.AdaptiveAvgPool3d((1, 1, 1))
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 16, 224, 224)])
def test_AdaptiveAvgPool3d_2x2x2():
return torch.nn.AdaptiveAvgPool3d((2, 2, 2))
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 16, 224, 224)])
def test_AdaptiveAvgPool3d_3x3x3():
return torch.nn.AdaptiveAvgPool3d((3, 3, 3))
| 1,397 | 27.530612 | 77 | py |
torch2trt | torch2trt-master/torch2trt/converters/gelu.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import math
@tensorrt_converter('torch.nn.functional.gelu')
def convert_gelu_v1(ctx):
# approximate equation 1 from paper
input = get_arg(ctx, 'input', 0, None)
output = ctx.method_return
x, c05, c1, cs2pi, c044, c3 = add_missing_trt_tensors(
ctx.network,
[input, 0.5, 1.0, math.sqrt(2.0 / math.pi), 0.044715, 3.0]
)
x, c05, c1, cs2pi, c044, c3 = broadcast_trt_tensors(
ctx.network,
[x, c05, c1, cs2pi, c044, c3],
len(output.shape)
)
y = ctx.network.add_elementwise(x, c3, trt.ElementWiseOperation.POW).get_output(0)
y = ctx.network.add_elementwise(y, c044, trt.ElementWiseOperation.PROD).get_output(0)
y = ctx.network.add_elementwise(x, y, trt.ElementWiseOperation.SUM).get_output(0)
y = ctx.network.add_elementwise(y, cs2pi, trt.ElementWiseOperation.PROD).get_output(0)
y = ctx.network.add_activation(y, trt.ActivationType.TANH).get_output(0)
y = ctx.network.add_elementwise(y, c1, trt.ElementWiseOperation.SUM).get_output(0)
y = ctx.network.add_elementwise(x, y, trt.ElementWiseOperation.PROD).get_output(0)
y = ctx.network.add_elementwise(y, c05, trt.ElementWiseOperation.PROD).get_output(0)
output._trt = y
# @tensorrt_converter('torch.nn.functional.gelu')
# def convert_gelu_v2(ctx):
# # approximate equation 1 from paper
# input = get_arg(ctx, 'input', 0, None)
# output = ctx.method_return
# x, c1702 = add_missing_trt_tensors(
# ctx.network,
# [input, 1.702]
# )
# x, c1702 = broadcast_trt_tensors(
# ctx.network,
# [x, c1702],
# len(output.shape) - 1
# )
# y = ctx.network.add_elementwise(x, c1702, trt.ElementWiseOperation.PROD).get_output(0)
# y = ctx.network.add_activation(y, trt.ActivationType.SIGMOID).get_output(0)
# y = ctx.network.add_elementwise(x, y, trt.ElementWiseOperation.PROD).get_output(0)
# output._trt = y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3, 3)])
def test_silu():
return torch.nn.GELU() | 2,309 | 35.666667 | 92 | py |
torch2trt | torch2trt-master/torch2trt/converters/pow.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.pow')
@tensorrt_converter('torch.Tensor.__ipow__')
@tensorrt_converter('torch.Tensor.__pow__')
def convert_pow(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.POW)
output._trt = layer.get_output(0)
@tensorrt_converter('torch.Tensor.__rpow__')
def convert_pow(ctx):
input_a = ctx.method_args[1]
input_b = ctx.method_args[0] # flipped for rpow
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.POW)
output._trt = layer.get_output(0)
class Pow(torch.nn.Module):
def __init__(self):
super(Pow, self).__init__()
def forward(self, x, y):
return x ** y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_pow_basic():
return Pow()
# __ipow__ not yet impl in torch
# class IPow(torch.nn.Module):
# def __init__(self):
# super(IPow, self).__init__()
# def forward(self, x, y):
# x **= y
# return x
# @add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
# def test_pow_ipow():
# return IPow()
class TorchPow(torch.nn.Module):
def __init__(self):
super(TorchPow, self).__init__()
def forward(self, x, y):
return torch.pow(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_torch_pow():
return TorchPow()
class RpowInt(torch.nn.Module):
def __init__(self):
super(RpowInt, self).__init__()
def forward(self, x):
return 2 ** x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_rpow_int():
return RpowInt()
class RpowFloat(torch.nn.Module):
def __init__(self):
super(RpowFloat, self).__init__()
def forward(self, x):
return 2.0 ** x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_rpow_float():
return RpowFloat()
| 2,643 | 27.430108 | 112 | py |
torch2trt | torch2trt-master/torch2trt/converters/getitem_test.py | import pytest
import torch
import torch.nn as nn
from torch2trt import torch2trt, trt
class YOLOXFocusTestModule(nn.Module):
def forward(self, x):
patch_top_left = x[..., ::2, ::2]
patch_top_right = x[..., ::2, 1::2]
patch_bot_left = x[..., 1::2, ::2]
patch_bot_right = x[..., 1::2, 1::2]
x = torch.cat(
(
patch_top_left,
patch_bot_left,
patch_top_right,
patch_bot_right,
),
dim=1,
)
return x
def test_getitem_dynamic_yolox_layer():
class YOLOXFocusTestModule(nn.Module):
def forward(self, x):
patch_top_left = x[..., ::2, ::2]
patch_top_right = x[..., ::2, 1::2]
patch_bot_left = x[..., 1::2, ::2]
patch_bot_right = x[..., 1::2, 1::2]
x = torch.cat(
(
patch_top_left,
patch_bot_left,
patch_top_right,
patch_bot_right,
),
dim=1,
)
return x
module = YOLOXFocusTestModule().cuda().eval()
data = torch.randn(1, 3, 112, 112).cuda()
module_trt = torch2trt(module, [data], max_batch_size=4, log_level=trt.Logger.VERBOSE)
data = torch.randn(1, 3, 112, 112).cuda()
assert(torch.allclose(module_trt(data), module(data), atol=1e-4, rtol=1e-4))
data = torch.randn(4, 3, 112, 112).cuda()
assert(torch.allclose(module_trt(data), module(data), atol=1e-4, rtol=1e-4))
def test_getitem_dynamic_add_dim():
class TestModule(nn.Module):
def forward(self, x):
patch_top_left = x[..., None]
patch_top_right = x[..., None]
patch_bot_left = x[..., None]
patch_bot_right = x[..., None]
x = torch.cat(
(
patch_top_left,
patch_bot_left,
patch_top_right,
patch_bot_right,
),
dim=1,
)
return x
module = TestModule().cuda().eval()
data = torch.randn(1, 3, 112, 112).cuda()
module_trt = torch2trt(module, [data], max_batch_size=4, log_level=trt.Logger.VERBOSE)
data = torch.randn(1, 3, 112, 112).cuda()
assert(torch.allclose(module_trt(data), module(data), atol=1e-4, rtol=1e-4))
data = torch.randn(4, 3, 112, 112).cuda()
assert(torch.allclose(module_trt(data), module(data), atol=1e-4, rtol=1e-4))
def test_getitem_dynamic_remove_dim():
class TestModule(nn.Module):
def forward(self, x):
patch_top_left = x[..., 0]
patch_top_right = x[..., 0]
patch_bot_left = x[..., 0]
patch_bot_right = x[..., 0]
x = torch.cat(
(
patch_top_left,
patch_bot_left,
patch_top_right,
patch_bot_right,
),
dim=1,
)
return x
module = TestModule().cuda().eval()
data = torch.randn(1, 3, 112, 112).cuda()
module_trt = torch2trt(module, [data], max_batch_size=4, log_level=trt.Logger.VERBOSE)
data = torch.randn(1, 3, 112, 112).cuda()
assert(torch.allclose(module_trt(data), module(data), atol=1e-4, rtol=1e-4))
data = torch.randn(4, 3, 112, 112).cuda()
assert(torch.allclose(module_trt(data), module(data), atol=1e-4, rtol=1e-4))
def test_getitem_dynamic_remove_add_dim():
class TestModule(nn.Module):
def forward(self, x):
patch_top_left = x[..., 0, None]
patch_top_right = x[..., 0, None]
patch_bot_left = x[..., 0, None]
patch_bot_right = x[..., 0, None]
x = torch.cat(
(
patch_top_left,
patch_bot_left,
patch_top_right,
patch_bot_right,
),
dim=1,
)
return x
module = TestModule().cuda().eval()
data = torch.randn(1, 3, 112, 112).cuda()
module_trt = torch2trt(module, [data], max_batch_size=4, log_level=trt.Logger.VERBOSE)
data = torch.randn(1, 3, 112, 112).cuda()
assert(torch.allclose(module_trt(data), module(data), atol=1e-4, rtol=1e-4))
data = torch.randn(4, 3, 112, 112).cuda()
assert(torch.allclose(module_trt(data), module(data), atol=1e-4, rtol=1e-4))
if __name__ == '__main__':
test_getitem_dynamic() | 4,595 | 26.854545 | 90 | py |
torch2trt | torch2trt-master/torch2trt/converters/Linear.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.linear')
def convert_Linear(ctx):
input = ctx.method_args[0]
weight = get_arg(ctx, 'weight', 1, None)
bias = get_arg(ctx, 'bias', 2, None)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
# reshape to ...xNx1x1
layer = ctx.network.add_shuffle(input_trt)
layer.reshape_dims = tuple([0]*input.ndim) + (1, 1)
bias_trt = trt.Weights(torch_dtype_to_trt(weight.dtype))
if bias is not None:
bias_trt = bias.detach().cpu().numpy()
# add fully connected
layer = ctx.network.add_fully_connected(
input=layer.get_output(0),
num_outputs=int(weight.shape[0]),
kernel=weight.detach().cpu().numpy(),
bias=bias_trt)
# reshape back to N
layer = ctx.network.add_shuffle(layer.get_output(0))
layer.reshape_dims = tuple([0] * output.ndim)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 10)])
def test_Linear_basic():
return torch.nn.Linear(10, 5)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 10)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 4, 10)], max_batch_size=2)
def test_Linear_no_bias():
return torch.nn.Linear(10, 5, bias=False)
| 1,674 | 33.895833 | 88 | py |
torch2trt | torch2trt-master/torch2trt/converters/chunk.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
from .split import convert_split
@tensorrt_converter('torch.chunk')
@tensorrt_converter('torch.Tensor.chunk')
def convert_chunk(ctx):
convert_split(ctx)
class TorchChunk(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(TorchChunk, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, x):
return torch.chunk(x, *self.args, **self.kwargs)
class TensorChunk(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(TensorChunk, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, x):
return x.chunk(*self.args, **self.kwargs)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)], max_batch_size=2)
def test_torch_chunk_1_1():
return TorchChunk(1, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)], max_batch_size=2)
def test_torch_chunk_2_1():
return TorchChunk(2, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)], max_batch_size=2)
def test_torch_chunk_3_1():
return TorchChunk(3, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)], max_batch_size=2)
def test_torch_chunk_3_2():
return TorchChunk(3, 2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 3, 3)], max_batch_size=2)
def test_tensor_chunk_3_2():
return TensorChunk(3, 2) | 2,155 | 32.169231 | 87 | py |
torch2trt | torch2trt-master/torch2trt/converters/BatchNorm1d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.BatchNorm1d.forward')
def convert_BatchNorm1d(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
scale = module.weight.detach().cpu().numpy() / np.sqrt(module.running_var.detach().cpu().numpy() + module.eps)
bias = module.bias.detach().cpu().numpy() - module.running_mean.detach().cpu().numpy() * scale
power = np.ones_like(scale)
# reshape to 2D
layer = ctx.network.add_shuffle(input_trt)
if len(input.shape) == 2:
layer.reshape_dims = (0, 0, 1, 1)
else:
layer.reshape_dims = (0, 0, 0, 1)
layer = ctx.network.add_scale(layer.get_output(0), trt.ScaleMode.CHANNEL, bias, scale, power)
# reshape back to 1D
layer = ctx.network.add_shuffle(layer.get_output(0))
if len(input.shape) == 2:
layer.reshape_dims = (0, 0)
else:
layer.reshape_dims = (0, 0, 0)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 10, 3)], max_batch_size=2)
def test_BatchNorm1d_basic():
return torch.nn.BatchNorm1d(10) | 1,409 | 34.25 | 114 | py |
torch2trt | torch2trt-master/torch2trt/converters/max_pool1d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.max_pool1d')
def convert_max_pool1d(ctx):
# At the time of this implementation, TensorRT 8.x does not yet support max pooling in 1D using `add_pooling_nd(...)`.
# As such, we use a workaround here, by unsqueezing another dimension into the input (thus transforming it from
# (N, C, L) to (N, C, L, 1)) so that we can use 2D max pooling across the last three dimensions.
input = get_arg(ctx, 'input', pos=0, default=None)
input_trt = trt_(ctx.network, input)
output = ctx.method_return
kernel_size = get_arg(ctx, 'kernel_size', pos=1, default=None)
stride = get_arg(ctx, 'stride', pos=2, default=None)
padding = get_arg(ctx, 'padding', pos=3, default=0)
dilation = get_arg(ctx, 'dilation', pos=4, default=1) # Unused.
return_indices = get_arg(ctx, 'return_indices', pos=5, default=False) # Unused.
ceil_mode = get_arg(ctx, 'ceil_mode', pos=6, default=False)
# Convert inputs to be 2d compatible as inputs will always be 1d.
kernel_size = (kernel_size, 1)
stride = kernel_size if not stride else (stride, 1)
padding = (padding, 0)
# Shuffle layer to unsqueeze another dimension for 2D max pooling.
unsqueeze_layer = ctx.network.add_shuffle(input_trt)
set_layer_precision(ctx, unsqueeze_layer)
unsqueeze_layer.reshape_dims = tuple([0]*input.ndim) + (1,)
unsqueeze_trt = unsqueeze_layer.get_output(0)
# Use 2D max pooling here to fake 1D max pooling.
pooling_layer = ctx.network.add_pooling_nd(
input=unsqueeze_trt, type=trt.PoolingType.MAX, window_size=kernel_size
)
set_layer_precision(ctx, pooling_layer)
pooling_layer.stride = stride
pooling_layer.padding = padding
if ceil_mode:
pooling_layer.padding_mode = trt.PaddingMode.EXPLICIT_ROUND_UP
pooling_trt = pooling_layer.get_output(0)
# Shuffle layer to squeeze out dimension that was just added for 2D max pooling so return is still in 1D.
squeeze_layer = ctx.network.add_shuffle(pooling_trt)
set_layer_precision(ctx, squeeze_layer)
squeeze_layer.reshape_dims = tuple([0] * input.ndim)
output._trt = squeeze_layer.get_output(0)
class MaxPool1D(torch.nn.Module):
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.ceil_mode = ceil_mode
def forward(self, x):
return torch.nn.functional.max_pool1d(x, self.kernel_size, stride=self.stride, padding=self.padding, ceil_mode=self.ceil_mode)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 32)])
def test_max_pool1d_basic():
return MaxPool1D(2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 32)], fp16_mode=True)
def test_max_pool1d_fp16_mode():
return MaxPool1D(2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 32)], int8_mode=True)
def test_max_pool1d_int8_mode():
return MaxPool1D(2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 32)])
def test_max_pool1d_stride():
return MaxPool1D(2, stride=3)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 32)])
def test_max_pool1d_max_padding():
return MaxPool1D(2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 32)])
def test_max_pool1d_ceil_mode():
return MaxPool1D(2, ceil_mode=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 32)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 32)], max_batch_size=2)
def test_max_pool1d_all():
return MaxPool1D(4, stride=3, padding=2, ceil_mode=True)
| 3,777 | 36.405941 | 134 | py |
torch2trt | torch2trt-master/torch2trt/converters/Conv2d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter("torch.nn.Conv2d.forward", enabled=trt_version() < '7.0')
def convert_Conv2d(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
kernel_size = module.kernel_size
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size,) * 2
stride = module.stride
if not isinstance(stride, tuple):
stride = (stride,) * 2
padding = module.padding
if not isinstance(padding, tuple):
padding = (padding,) * 2
dilation = module.dilation
if not isinstance(dilation, tuple):
dilation = (dilation,) * 2
kernel = module.weight.detach().cpu().numpy()
bias = trt.Weights(torch_dtype_to_trt(module.weight.dtype))
if module.bias is not None:
bias = module.bias.detach().cpu().numpy()
layer = ctx.network.add_convolution(
input=input_trt,
num_output_maps=module.out_channels,
kernel_shape=kernel_size,
kernel=kernel,
bias=bias,
)
layer.stride = stride
layer.padding = padding
layer.dilation = dilation
if module.groups is not None:
layer.num_groups = module.groups
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 10, 224, 224)], enabled=trt_version() < '7.0')
def test_Conv2d_basic():
return torch.nn.Conv2d(10, 5, kernel_size=1, stride=1, padding=0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 10, 224, 224)], enabled=trt_version() < '7.0')
def test_Conv2d_stride2():
return torch.nn.Conv2d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 10, 224, 224)], enabled=trt_version() < '7.0')
def test_Conv2d_kernel3():
return torch.nn.Conv2d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 10, 224, 224)], enabled=trt_version() < '7.0')
def test_Conv2d_dilation2():
return torch.nn.Conv2d(10, 5, kernel_size=3, stride=1, padding=1, dilation=2)
| 2,199 | 30.884058 | 105 | py |
torch2trt | torch2trt-master/torch2trt/converters/sum.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
from .unary import UnaryModule
from torch import nn
@tensorrt_converter('torch.sum')
@tensorrt_converter('torch.Tensor.sum')
def convert_sum(ctx):
input = ctx.method_args[0]
dim = get_arg(ctx, 'dim', pos=1, default=tuple(range(1, len(input.shape))))
keepdim = get_arg(ctx, 'keepdim', pos=2, default=False)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
layer = ctx.network.add_reduce(input_trt, trt.ReduceOperation.SUM, torch_dim_to_trt_axes(dim), keepdim)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_sum_reduce_all():
return UnaryModule(lambda x: torch.sum(x))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_sum_reduce_dim1():
return UnaryModule(lambda x: torch.sum(x, 1))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_sum_reduce_dim22():
return UnaryModule(lambda x: torch.sum(x, 2))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_sum_reduce_dim1_keepdim():
return UnaryModule(lambda x: torch.sum(x, 1, keepdim=True))
class DisparityRegression(nn.Module):
def __init__(self, maxdisp):
super(DisparityRegression, self).__init__()
self.register_buffer('disp', torch.arange(maxdisp, dtype=torch.float32).view(maxdisp, 1, 1))
def forward(self, x):
return x * self.disp#, 1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 23, 23)])
def test_disparity_reg():
return DisparityRegression(10)
| 1,970 | 36.188679 | 108 | py |
torch2trt | torch2trt-master/torch2trt/converters/clamp.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
def _add_clamp_val(network, trt_input, val, op):
# create TensorRT constant for minimum value
val_shape = (1, ) * len(trt_input.shape) # broadcast all dimensions
val_tensor = val * torch.ones(val_shape, dtype=torch_dtype_from_trt(trt_input.dtype)).cpu().numpy()
val_trt = network.add_constant(val_shape, val_tensor)
layer = network.add_elementwise(trt_input, val_trt.get_output(0), op)
return layer
def _add_clamp_tensor(network, trt_input, tensor, op):
tensor_trt = trt_(network, tensor)
trt_input, tensor_trt = broadcast_trt_tensors(network, [trt_input, tensor_trt], max(len(trt_input.shape), len(tensor_trt.shape)))
layer = network.add_elementwise(trt_input, tensor_trt, op)
return layer
def __add_clamp(network, trt_input, val, op):
return (_add_clamp_tensor(network, trt_input, val, op) if isinstance(val, torch.Tensor)
else _add_clamp_val(network, trt_input, val, op))
# CLAMP_MIN
@tensorrt_converter('torch.clamp_min')
@tensorrt_converter('torch.Tensor.clamp_min')
def convert_clamp_min(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
val = ctx.method_args[1]
output = ctx.method_return
layer = __add_clamp(ctx.network, input_trt, val, trt.ElementWiseOperation.MAX)
output._trt = layer.get_output(0)
class TorchClampMin(torch.nn.Module):
def forward(self, x):
return torch.clamp_min(x, -0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_min():
return TorchClampMin()
class TensorClampMin(torch.nn.Module):
def forward(self, x):
return x.clamp_min(-0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_tensor_clamp_min():
return TensorClampMin()
# CLAMP_MAX
@tensorrt_converter('torch.clamp_max')
@tensorrt_converter('torch.Tensor.clamp_max')
def convert_clamp_max(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
val = ctx.method_args[1]
output = ctx.method_return
layer = __add_clamp(ctx.network, input_trt, val, trt.ElementWiseOperation.MIN)
output._trt = layer.get_output(0)
class TorchClampMax(torch.nn.Module):
def forward(self, x):
return torch.clamp_max(x, 0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_max():
return TorchClampMax()
class TensorClampMax(torch.nn.Module):
def forward(self, x):
return x.clamp_max(0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_tensor_clamp_max():
return TensorClampMax()
# CLAMP
@tensorrt_converter('torch.clamp')
@tensorrt_converter('torch.Tensor.clamp')
def convert_clamp(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
if (("min" in ctx.method_kwargs and ctx.method_kwargs["min"] is not None)
and ("max" in ctx.method_kwargs and ctx.method_kwargs["max"] is not None)):
min_val = ctx.method_kwargs["min"]
max_val = ctx.method_kwargs["max"]
layer = __add_clamp(ctx.network, input_trt, min_val, trt.ElementWiseOperation.MAX)
layer = __add_clamp(ctx.network, layer.get_output(0), max_val, trt.ElementWiseOperation.MIN)
elif "min" in ctx.method_kwargs and ctx.method_kwargs["min"] is not None:
min_val = ctx.method_kwargs["min"]
layer = __add_clamp(ctx.network, input_trt, min_val, trt.ElementWiseOperation.MAX)
elif "max" in ctx.method_kwargs and ctx.method_kwargs["max"] is not None:
max_val = ctx.method_kwargs["max"]
layer = __add_clamp(ctx.network, input_trt, max_val, trt.ElementWiseOperation.MIN)
else:
min_val = ctx.method_args[1]
max_val = ctx.method_args[2]
layer = __add_clamp(ctx.network, input_trt, min_val, trt.ElementWiseOperation.MAX)
layer = __add_clamp(ctx.network, layer.get_output(0), max_val, trt.ElementWiseOperation.MIN)
output._trt = layer.get_output(0)
class TorchClampTensor(torch.nn.Module):
def __init__(self, min_=None, max_=None):
super().__init__()
self.min = min_
self.max = max_
def forward(self, x):
return torch.clamp(x, min=self.min, max=self.max)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_tensor_min():
return TorchClampTensor(min_=torch.ones(1, 3, 224, 224).cuda() * -0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_tensor_min_broadcasted():
return TorchClampTensor(min_=torch.tensor((-0.1,)).cuda())
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_tensor_max():
return TorchClampTensor(max_=torch.ones(1, 3, 224, 224).cuda() * 0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_tensor_max_broadcasted():
return TorchClampTensor(max_=torch.tensor((0.1,)).cuda())
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_tensor_min_max():
return TorchClampTensor(min_=torch.ones(1, 3, 224, 224).cuda() * -0.1, max_=torch.ones(1, 3, 224, 224).cuda() * 0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_tensor_min_max_broadcasted():
return TorchClampTensor(min_=torch.tensor((-0.1,)).cuda(), max_=torch.tensor((0.1,)).cuda())
class TorchClamp(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, -0.1, 0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp():
return TorchClamp()
class TensorClamp(torch.nn.Module):
def forward(self, x):
return x.clamp(-0.1, 0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_tensor_clamp():
return TensorClamp()
class TorchClampOptionMax(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, max=0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_option_max():
return TorchClampOptionMax()
class TorchClampOptionMin(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, min=-0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_option_min():
return TorchClampOptionMin()
class TorchClampOptionMaxMin(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, min=-0.1, max=0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_torch_clamp_option_max_min():
return TorchClampOptionMaxMin()
class TensorClampOptionMax(torch.nn.Module):
def forward(self, x):
return x.clamp(max=0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_tensor_clamp_option_max():
return TensorClampOptionMax()
class TensorClampOptionMin(torch.nn.Module):
def forward(self, x):
return x.clamp(min=-0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_tensor_clamp_option_min():
return TensorClampOptionMin()
class TensorClampOptionMaxMin(torch.nn.Module):
def forward(self, x):
return x.clamp(min=-0.1, max=0.1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_tensor_clamp_option_max_min():
return TensorClampOptionMaxMin()
| 7,705 | 30.453061 | 133 | py |
torch2trt | torch2trt-master/torch2trt/converters/expand.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.Tensor.expand')
def convert_expand(ctx):
input = ctx.method_args[0]
if not hasattr(input, '_trt'):
return
sizes = ctx.method_args[1:]
output = ctx.method_return
inshape = tuple(input.shape)
shape = tuple(output.shape)
ndim = len(shape)
start = tuple([0]*ndim)
stride = tuple([int(i == o) for i, o in zip(inshape, shape)]) # stride == 1 if dimensions match, 0 otherwise
layer = ctx.network.add_slice(input._trt, start, shape, stride)
output._trt = layer.get_output(0)
class ExpandModule(torch.nn.Module):
def __init__(self, *sizes):
super(ExpandModule, self).__init__()
self.sizes = sizes
def forward(self, x):
return x.expand(*self.sizes)
@add_module_test(torch.float32, torch.device('cuda'), [(1,1,3,3)])
def test_tensor_expand_singledim():
return ExpandModule(1, 3, 3, 3)
@add_module_test(torch.float32, torch.device('cuda'), [(1,1,1,3)])
def test_tensor_expand_multidim():
return ExpandModule(1, 3, 3, 3)
@add_module_test(torch.float32, torch.device('cuda'), [(1,1,1,3)])
def test_tensor_expand_inferdim():
return ExpandModule(1, 3, -1, -1)
@add_module_test(torch.float32, torch.device('cuda'), [(2,1,1,3)], max_batch_size=2)
def test_tensor_expand_inferdim_bs2():
return ExpandModule(2, 3, -1, -1) | 1,691 | 31.538462 | 113 | py |
torch2trt | torch2trt-master/torch2trt/converters/cat.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.cat')
def convert_cat(ctx):
inputs = get_arg(ctx, 'input', pos=0, default=None)
dim = get_arg(ctx, 'dim', pos=1, default=0)
# Reverse negative dims.
if dim < 0:
dim = len(inputs[0].shape) - abs(dim)
output = ctx.method_return
trt_inputs = add_missing_trt_tensors(ctx.network, inputs)
trt_inputs = broadcast_trt_tensors(ctx.network, trt_inputs, len(output.shape))
layer = ctx.network.add_concatenation(inputs=trt_inputs)
layer.axis = dim
output._trt = layer.get_output(0)
class Cat(torch.nn.Module):
def __init__(self, dim):
super(Cat, self).__init__()
self.dim = dim
def forward(self, *x):
return torch.cat(x, dim=self.dim)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 4), (1, 3, 4), (1, 17, 4)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 4, 4), (2, 3, 4), (2, 17, 4)], max_batch_size=2)
def test_Cat_basic():
return Cat(1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 4), (1, 4, 4), (1, 4, 4)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 4, 4), (2, 4, 4), (2, 4, 4)], max_batch_size=2)
def test_Cat_neg1_dim():
return Cat(-1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 4), (1, 4, 4), (1, 4, 4)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 4, 4), (2, 4, 4), (2, 4, 4)], max_batch_size=2)
def test_Cat_neg2_dim():
return Cat(-2)
| 1,547 | 31.25 | 107 | py |
torch2trt | torch2trt-master/torch2trt/converters/dummy_converters.py | from torch2trt.torch2trt import *
def is_private(method):
method = method.split('.')[-1] # remove prefix
return method[0] == '_' and method[1] != '_'
def is_function_type(method):
fntype = eval(method + '.__class__.__name__')
return fntype == 'function' or fntype == 'builtin_function_or_method' or fntype == 'method_descriptor'
def get_methods(namespace):
methods = []
for method in dir(eval(namespace)):
full_method = namespace + '.' + method
if not is_private(full_method) and is_function_type(full_method):
methods.append(full_method)
return methods
TORCH_METHODS = []
TORCH_METHODS += get_methods('torch')
TORCH_METHODS += get_methods('torch.Tensor')
TORCH_METHODS += get_methods('torch.nn.functional')
for method in TORCH_METHODS:
@tensorrt_converter(method, is_real=False)
def warn_method(ctx):
print('Warning: Encountered known unsupported method %s' % ctx.method_str)
@tensorrt_converter('torch.Tensor.dim', is_real=False)
@tensorrt_converter('torch.Tensor.size', is_real=False)
def dont_warn(ctx):
pass
| 1,114 | 28.342105 | 106 | py |
torch2trt | torch2trt-master/torch2trt/converters/AdaptiveAvgPool2d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.AdaptiveAvgPool2d.forward')
def convert_AdaptiveAvgPool2d(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
output = ctx.method_return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output_size = module.output_size
if not isinstance(output_size, tuple):
output_size = (output_size, ) * 2
stride = (input_trt.shape[-2] // output_size[-2], input_trt.shape[-1] // output_size[-1])
kernel_size = stride
layer = ctx.network.add_pooling(
input=input_trt, type=trt.PoolingType.AVERAGE, window_size=kernel_size)
layer.stride = stride
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_AdaptiveAvgPool2d_1x1():
return torch.nn.AdaptiveAvgPool2d((1, 1))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_AdaptiveAvgPool2d_2x2():
return torch.nn.AdaptiveAvgPool2d((2, 2))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_AdaptiveAvgPool2d_3x3():
return torch.nn.AdaptiveAvgPool2d((3, 3))
| 1,238 | 29.975 | 93 | py |
torch2trt | torch2trt-master/torch2trt/converters/compare.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
def convert_elementwise(ctx, op):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], max(len(input_a_trt.shape), len(input_b_trt.shape)))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, op)
output._trt = layer.get_output(0)
@tensorrt_converter('torch.gt', enabled=trt_version() >= '7.0')
@tensorrt_converter('torch.Tensor.__gt__', enabled=trt_version() >= '7.0')
def convert_gt(ctx):
return convert_elementwise(ctx, trt.ElementWiseOperation.GREATER)
@tensorrt_converter('torch.lt', enabled=trt_version() >= '7.0')
@tensorrt_converter('torch.Tensor.__lt__', enabled=trt_version() >= '7.0')
def convert_gt(ctx):
return convert_elementwise(ctx, trt.ElementWiseOperation.LESS)
@tensorrt_converter('torch.eq', enabled=trt_version() >= '7.0')
@tensorrt_converter('torch.Tensor.__eq__', enabled=trt_version() >= '7.0')
def convert_gt(ctx):
return convert_elementwise(ctx, trt.ElementWiseOperation.EQUAL)
class GT(torch.nn.Module):
def __init__(self):
super(GT, self).__init__()
def forward(self, x, y):
return x > y
class LT(torch.nn.Module):
def __init__(self):
super(LT, self).__init__()
def forward(self, x, y):
return x < y
class EQ(torch.nn.Module):
def __init__(self):
super(EQ, self).__init__()
def forward(self, x, y):
return x == y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 6, 6), (1, 3, 6, 6)], enabled=trt_version() >= '7.0')
def test_gt_basic():
return GT()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 6, 6), (1, 3, 6, 6)], enabled=trt_version() >= '7.0')
def test_gt_basic():
return LT()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 6, 6), (1, 3, 6, 6)], enabled=trt_version() >= '7.0')
def test_gt_basic():
return EQ()
class TensorGTScalar(torch.nn.Module):
def __init__(self, scalar):
super().__init__()
self.scalar = scalar
def forward(self, tensor):
return tensor > self.scalar
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 6, 6)], enabled=trt_version() >= '7.0')
def test_tensor_gt_scalar():
return TensorGTScalar(0.1)
class ScalarGTTensor(torch.nn.Module):
def __init__(self, scalar):
super().__init__()
self.scalar = scalar
def forward(self, tensor):
return self.scalar > tensor
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 6, 6)], enabled=trt_version() >= '7.0')
def test_scalar_gt_scalar():
return ScalarGTTensor(0.1)
class TensorLTScalar(torch.nn.Module):
def __init__(self, scalar):
super().__init__()
self.scalar = scalar
def forward(self, tensor):
return tensor < self.scalar
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 6, 6)], enabled=trt_version() >= '7.0')
def test_tensor_lt_scalar():
return TensorLTScalar(0.1)
class ScalarLTTensor(torch.nn.Module):
def __init__(self, scalar):
super().__init__()
self.scalar = scalar
def forward(self, tensor):
return self.scalar < tensor
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 6, 6)], enabled=trt_version() >= '7.0')
def test_scalar_lt_tensor():
return ScalarLTTensor(0.1)
class TensorEQScalar(torch.nn.Module):
def __init__(self, scalar):
super().__init__()
self.scalar = scalar
def forward(self, tensor):
return tensor == self.scalar
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 6, 6)], enabled=trt_version() >= '7.0')
def test_tensor_eq_scalar():
return TensorEQScalar(0.1)
class ScalarEQTensor(torch.nn.Module):
def __init__(self, scalar):
super().__init__()
self.scalar = scalar
def forward(self, tensor):
return self.scalar == tensor
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 6, 6)], enabled=trt_version() >= '7.0')
def test_scalar_eq_tensor():
return ScalarEQTensor(0.1)
| 4,293 | 28.613793 | 146 | py |
torch2trt | torch2trt-master/torch2trt/converters/reflection_pad_2d.py | import torch
import torch.nn as nn
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import numpy as np
import ctypes
try:
ctypes.CDLL('libtorch2trt_plugins.so')
def create_reflection_pad_2d_plugin(paddingLeft, paddingRight, paddingTop, paddingBottom):
registry = trt.get_plugin_registry()
creator = registry.get_plugin_creator('ReflectionPad2dPlugin', '1', '')
fc = trt.PluginFieldCollection([
trt.PluginField(
'paddingLeft',
np.array([paddingLeft]).astype(np.int32),
trt.PluginFieldType.INT32
),
trt.PluginField(
'paddingRight',
np.array([paddingRight]).astype(np.int32),
trt.PluginFieldType.INT32
),
trt.PluginField(
'paddingTop',
np.array([paddingTop]).astype(np.int32),
trt.PluginFieldType.INT32
),
trt.PluginField(
'paddingBottom',
np.array([paddingBottom]).astype(np.int32),
trt.PluginFieldType.INT32
)
])
return creator.create_plugin('', fc)
@tensorrt_converter(nn.ReflectionPad2d.forward)
def convert_reflection_pad(ctx):
module = get_arg(ctx, 'self', pos=0, default=None)
input = get_arg(ctx, 'x', pos=1, default=None)
output = ctx.method_return
input_trt = input._trt
plugin = create_reflection_pad_2d_plugin(
module.padding[0],
module.padding[1],
module.padding[2],
module.padding[3]
)
layer = ctx.network.add_plugin_v2([input_trt], plugin)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 1, 3, 3)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 2, 3, 3)])
def test_reflection_pad_2d_simple():
return nn.ReflectionPad2d(1)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 1, 3, 3)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 2, 3, 3)])
def test_reflection_pad_2d_simple():
return nn.ReflectionPad2d(2)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 1, 3, 3)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 2, 3, 3)])
def test_reflection_pad_2d_simple():
return nn.ReflectionPad2d((1, 0, 1, 0))
except:
pass | 2,494 | 32.716216 | 94 | py |
torch2trt | torch2trt-master/torch2trt/converters/flatten.py | import tensorrt as trt
import numpy as np
from torch2trt.torch2trt import tensorrt_converter, get_arg, torch_dim_resolve_negative, add_missing_trt_tensors, torch_dim_to_trt_axes
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.flatten')
@tensorrt_converter('torch.Tensor.flatten')
def convert_flatten(ctx):
input = ctx.method_args[0]
start_dim = get_arg(ctx, 'start_dim', pos=1, default=0)
end_dim = get_arg(ctx, 'end_dim', pos=2, default=-1)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
start_dim = torch_dim_resolve_negative(start_dim, input.ndim)[0]
end_dim = torch_dim_resolve_negative(end_dim, input.ndim)[0]
input_shape_trt = ctx.network.add_shape(input_trt).get_output(0)
new_shape_trt = []
# get shape before flatten
for i in range(start_dim):
dim_trt = ctx.network.add_slice(input_shape_trt, [i], [1], [1]).get_output(0)
new_shape_trt.append(dim_trt)
# get flatten reduce dimensions
if start_dim != end_dim:
new_shape_trt.append(
ctx.network.add_constant([1], np.array([-1], dtype=np.int32)).get_output(0)
)
# get shape after flatten
for i in range(end_dim + 1, input.ndim):
dim_trt = ctx.network.add_slice(input_shape_trt, [i], [1], [1]).get_output(0)
new_shape_trt.append(dim_trt)
new_shape_trt = ctx.network.add_concatenation(new_shape_trt).get_output(0)
# do flatten with shuffle layer
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, new_shape_trt)
output._trt = layer.get_output(0) | 1,630 | 36.068182 | 135 | py |
torch2trt | torch2trt-master/torch2trt/converters/transpose.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter("torch.Tensor.transpose", enabled=trt_version() < '7.0')
@tensorrt_converter("torch.transpose", enabled=trt_version() < '7.0')
def convert_transpose(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
# permutation -1 because TRT does not include batch dim
permutation = list(range(len(input.shape)))
dim0 = torch_dim_resolve_negative(ctx.method_args[1], input.ndim)[0]
dim1 = torch_dim_resolve_negative(ctx.method_args[2], input.ndim)[0]
permutation[dim0] = dim1
permutation[dim1] = dim0
layer = ctx.network.add_shuffle(input_trt)
layer.second_transpose = tuple(permutation)
output._trt = layer.get_output(0)
@tensorrt_converter("torch.Tensor.transpose", enabled=trt_version() >= '7.0')
@tensorrt_converter('torch.transpose', enabled=trt_version() >= '7.0')
def convert_transpose_trt7(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
# permutation -1 because TRT does not include batch dim
permutation = list(range(len(input.shape)))
dim0 = torch_dim_resolve_negative(ctx.method_args[1], input.ndim)[0]
dim1 = torch_dim_resolve_negative(ctx.method_args[2], input.ndim)[0]
permutation[dim0] = dim1
permutation[dim1] = dim0
layer = ctx.network.add_shuffle(input_trt)
layer.second_transpose = tuple(permutation)
output._trt = layer.get_output(0)
class Transpose(torch.nn.Module):
def __init__(self, dim0, dim1):
super(Transpose, self).__init__()
self.dim0 = dim0
self.dim1 = dim1
def forward(self, x):
return torch.transpose(x, self.dim0, self.dim1).contiguous()
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 3, 3)])
def test_transpose_12():
return Transpose(1, 2)
class TensorTranspose(torch.nn.Module):
def __init__(self, dim0, dim1):
super(TensorTranspose, self).__init__()
self.dim0 = dim0
self.dim1 = dim1
def forward(self, x):
return x.transpose(self.dim0, self.dim1)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 3, 3)])
def test_tensor_transpose_12():
return TensorTranspose(1, 2)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 3, 3)])
def test_tensor_transpose_12_neg():
return TensorTranspose(-2, -1)
| 2,707 | 34.631579 | 77 | py |
torch2trt | torch2trt-master/torch2trt/converters/max_pool2d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.max_pool2d')
def convert_max_pool2d(ctx):
# parse args
input = get_arg(ctx, 'input', pos=0, default=None)
kernel_size = get_arg(ctx, 'kernel_size', pos=1, default=None)
stride = get_arg(ctx, 'stride', pos=2, default=None)
padding = get_arg(ctx, 'padding', pos=3, default=0)
dilation = get_arg(ctx, 'dilation', pos=4, default=1)
ceil_mode = get_arg(ctx, 'ceil_mode', pos=5, default=False)
# get input trt tensor (or create constant if it doesn't exist)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
# get kernel size
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, ) * 2
# get stride
if not isinstance(stride, tuple):
stride = (stride, ) * 2
# get padding
if not isinstance(padding, tuple):
padding = (padding, ) * 2
layer = ctx.network.add_pooling(
input=input_trt, type=trt.PoolingType.MAX, window_size=kernel_size)
layer.stride = stride
layer.padding = padding
if ceil_mode:
layer.padding_mode = trt.PaddingMode.EXPLICIT_ROUND_UP
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 5, 7)])
def test_MaxPool2d_without_ceil_mode():
return torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 5, 7)])
def test_MaxPool2d_with_ceil_mode():
return torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) | 1,836 | 33.660377 | 82 | py |
torch2trt | torch2trt-master/torch2trt/converters/max.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
from .unary import UnaryModule
def __convert_max_elementwise(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.MAX)
output._trt = layer.get_output(0)
def __convert_max_reduce(ctx):
input = ctx.method_args[0]
dim = get_arg(ctx, 'dim', pos=1, default=tuple(range(0, len(input.shape))))
keepdim = get_arg(ctx, 'keepdim', pos=2, default=False)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
if isinstance(ctx.method_return, torch.Tensor):
output_val = ctx.method_return
else:
output_val = ctx.method_return[0]
output_idx = ctx.method_return[1]
layer = ctx.network.add_reduce(input_trt, trt.ReduceOperation.MAX, torch_dim_to_trt_axes(dim), keepdim)
output_val._trt = layer.get_output(0)
@tensorrt_converter('torch.max')
@tensorrt_converter('torch.Tensor.max')
def convert_max(ctx):
if len(ctx.method_args) > 1 and isinstance(ctx.method_args[1], torch.Tensor):
__convert_max_elementwise(ctx)
else:
__convert_max_reduce(ctx)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(3, 3, 3)])
def test_max():
# Can't exit the network with a 0D tensor so we unsqueeze a dim.
return UnaryModule(lambda x: torch.max(x).unsqueeze(0))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_max_reduce_dim1():
return UnaryModule(lambda x: torch.max(x, 1)[0])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_max_reduce_dim22():
return UnaryModule(lambda x: torch.max(x, 2)[0])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_max_reduce_dim1_keepdim():
return UnaryModule(lambda x: torch.max(x, 1, keepdim=True)[0])
class MaxElementwise(torch.nn.Module):
def forward(self, x, y):
return torch.max(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3), (1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3), (1,)]) # broadcast
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3), (1, 3, 3)]) # broadcast
def test_max_elementwise():
return MaxElementwise()
| 2,813 | 37.027027 | 112 | py |
torch2trt | torch2trt-master/torch2trt/converters/BatchNorm3d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter("torch.nn.BatchNorm3d.forward", enabled=trt_version() < "7.0")
def convert_BatchNorm3d(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
scale = module.weight.detach().cpu().numpy() / np.sqrt(
module.running_var.detach().cpu().numpy() + module.eps
)
bias = (
module.bias.detach().cpu().numpy()
- module.running_mean.detach().cpu().numpy() * scale
)
power = np.ones_like(scale)
layer = ctx.network.add_scale(input_trt, trt.ScaleMode.CHANNEL, bias, scale, power)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 16, 16, 16)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 16, 16, 16)], max_batch_size=2)
def test_BatchNorm3d_basic():
return torch.nn.BatchNorm3d(3) | 1,007 | 33.758621 | 93 | py |
torch2trt | torch2trt-master/torch2trt/converters/clone.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.clone')
@tensorrt_converter('torch.Tensor.clone')
def convert_clone(ctx):
input = ctx.method_args[0]
input_trt = trt_(ctx.network, input)
# Clone by making identity layer.
layer = ctx.network.add_identity(input_trt)
set_layer_precision(ctx, layer)
output = ctx.method_return
output._trt = layer.get_output(0)
class Clone(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.clone()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 64, 64)])
def test_clone_basic():
return Clone()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 64, 64)], fp16_mode=True)
def test_clone_fp16_mode():
return Clone()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 64, 64)], int8_mode=True)
def test_clone_int8_mode():
return Clone()
class TorchClone(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.clone(x)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 64, 64)])
def test_torch_clone_basic():
return TorchClone()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 64, 64)], fp16_mode=True)
def test_torch_clone_fp16_mode():
return TorchClone()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 64, 64)], int8_mode=True)
def test_torch_clone_int8_mode():
return TorchClone()
| 1,529 | 23.285714 | 84 | py |
torch2trt | torch2trt-master/torch2trt/converters/view.py | from torch2trt.torch2trt import *
# from torch2trt.shape_conversion import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.Tensor.view')
@tensorrt_converter('torch.Tensor.reshape')
def convert_view(ctx):
input = ctx.method_args[0]
if not hasattr(input, '_trt'):
return
try:
iter(ctx.method_args[1])
size = make_size_wrapper(ctx.method_args[1])
except:
size = make_size_wrapper(ctx.method_args[1:])
output = ctx.method_return
layer = ctx.network.add_shuffle(input._trt)
layer.set_input(1, size._trt)
output._trt = layer.get_output(0)
class View(torch.nn.Module):
def __init__(self, *dims):
super(View, self).__init__()
self.dims = dims
def forward(self, x):
return x.view(*self.dims)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)], max_batch_size=2)
def test_view_1d():
return View(1, -1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)], max_batch_size=2)
def test_view_2d():
return View(1, 1, -1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3, 3, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3, 3, 6)], max_batch_size=2)
def test_view_3d():
return View(1, 3, 3, -1)
| 1,791 | 29.896552 | 93 | py |
torch2trt | torch2trt-master/torch2trt/converters/relu6.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.relu6')
def convert_functional_relu6(ctx):
ctx.method_args = (torch.nn.ReLU6(),) + ctx.method_args
convert_relu6(ctx)
@tensorrt_converter('torch.nn.ReLU6.forward')
def convert_relu6(ctx):
input = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input, 6])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_activation(
input=input_a_trt, type=trt.ActivationType.RELU)
layer = ctx.network.add_elementwise(
layer.get_output(0), input_b_trt, trt.ElementWiseOperation.MIN)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)])
def test_relu6_basic():
return torch.nn.ReLU6()
class FunctionalRelu6(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.relu6(x)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)])
def test_functional_relu6_basic():
return FunctionalRelu6()
| 1,212 | 28.585366 | 112 | py |
torch2trt | torch2trt-master/torch2trt/converters/conv_functional.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.conv2d', enabled=trt_version() >= '7.0')
@tensorrt_converter('torch.nn.functional.conv3d', enabled=trt_version() >= '7.0')
def convert_Conv_trt7_functional(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
weight = get_arg(ctx, 'weight', pos=1, default=None)
bias = get_arg(ctx, 'bias', pos=2, default=None)
stride = get_arg(ctx, 'stride', pos=3, default=1)
padding = get_arg(ctx, 'padding', pos=4, default=0)
dilation = get_arg(ctx, 'dilation', pos=5, default=1)
groups = get_arg(ctx, 'groups', pos=6, default=1)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
input_dim = input.dim() - 2
out_channels = int(weight.shape[0])
kernel_size = tuple(weight.shape[2:])
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, ) * input_dim
if not isinstance(stride, tuple):
stride = (stride, ) * input_dim
if not isinstance(padding, tuple):
padding = (padding, ) * input_dim
if not isinstance(dilation, tuple):
dilation = (dilation, ) * input_dim
kernel = weight.detach().cpu().numpy()
if bias is not None:
bias = bias.detach().cpu().numpy()
layer = ctx.network.add_convolution_nd(
input=input_trt,
num_output_maps=out_channels,
kernel_shape=kernel_size,
kernel=kernel,
bias=bias)
layer.stride_nd = stride
layer.padding_nd = padding
layer.dilation_nd = dilation
if groups is not None:
layer.num_groups = groups
output._trt = layer.get_output(0)
class FunctionalConv2d(torch.nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.conv = torch.nn.Conv2d(*args, **kwargs)
def forward(self, x):
x = torch.nn.functional.conv2d(
x,
self.conv.weight,
self.conv.bias,
self.conv.stride,
self.conv.padding,
self.conv.dilation,
self.conv.groups
)
return x
class FunctionalConv3d(torch.nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.conv = torch.nn.Conv3d(*args, **kwargs)
def forward(self, x):
x = torch.nn.functional.conv3d(
x,
self.conv.weight,
self.conv.bias,
self.conv.stride,
self.conv.padding,
self.conv.dilation,
self.conv.groups
)
return x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_basic_trt7_functional():
return FunctionalConv2d(10, 5, kernel_size=1, stride=1, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_stride2_trt7_functional():
return FunctionalConv2d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_kernel3_trt7_functional():
return FunctionalConv2d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_dilation2_trt7_functional():
return FunctionalConv2d(10, 5, kernel_size=3, stride=1, padding=1, dilation=2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_basic_trt7_functional():
return FunctionalConv3d(10, 5, kernel_size=1, stride=1, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_stride2_trt7_functional():
return FunctionalConv3d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_kernel3_trt7_functional():
return FunctionalConv3d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_dilation2_trt7_functional():
return FunctionalConv3d(10, 5, kernel_size=3, stride=1, padding=1, dilation=2)
| 4,453 | 33.796875 | 108 | py |
torch2trt | torch2trt-master/torch2trt/converters/matmul.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter("torch.matmul")
@tensorrt_converter("torch.Tensor.__matmul__")
def convert_matmul(ctx):
x = ctx.method_args[0]
y = ctx.method_args[1]
z = ctx.method_return
x_trt, y_trt = add_missing_trt_tensors(ctx.network, [x, y])
layer = ctx.network.add_matrix_multiply(
x_trt,
trt.MatrixOperation.NONE,
y_trt,
trt.MatrixOperation.NONE
)
z._trt = layer.get_output(0)
class Matmul(torch.nn.Module):
def forward(self, x, y):
return x @ y
@add_module_test(torch.float32, torch.device('cuda'), [(3, 4), (4, 3)])
def test_matmul_basic():
return Matmul() | 723 | 22.354839 | 71 | py |
torch2trt | torch2trt-master/torch2trt/converters/prod.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
from .unary import UnaryModule
@tensorrt_converter('torch.prod')
@tensorrt_converter('torch.Tensor.prod')
def convert_prod(ctx):
input = ctx.method_args[0]
dim = get_arg(ctx, 'dim', pos=1, default=tuple(range(1, len(input.shape))))
keepdim = get_arg(ctx, 'keepdim', pos=2, default=False)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
layer = ctx.network.add_reduce(input_trt, trt.ReduceOperation.PROD, torch_dim_to_trt_axes(dim), keepdim)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_prod_reduce_all():
return UnaryModule(lambda x: torch.prod(x))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_prod_reduce_dim1():
return UnaryModule(lambda x: torch.prod(x, 1))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_prod_reduce_dim22():
return UnaryModule(lambda x: torch.prod(x, 2))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
def test_prod_reduce_dim1_keepdim():
return UnaryModule(lambda x: torch.prod(x, 1, keepdim=True))
| 1,462 | 36.512821 | 109 | py |
torch2trt | torch2trt-master/torch2trt/converters/adaptive_max_pool2d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.adaptive_max_pool2d')
def convert_adaptive_max_pool2d(ctx):
input = ctx.method_args[0]
output = ctx.method_return
output_size = ctx.method_args[1]
if isinstance(output_size, int):
output_size = (output_size, ) * 2
stride = (input._trt.shape[-2] // output_size[-2], input._trt.shape[-1] // output_size[-1])
kernel_size = stride
layer = ctx.network.add_pooling(
input=input._trt, type=trt.PoolingType.MAX, window_size=kernel_size)
layer.stride = stride
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_adaptive_max_pool2d_1x1():
return torch.nn.AdaptiveMaxPool2d((1, 1))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_adaptive_max_pool2d_2x2():
return torch.nn.AdaptiveMaxPool2d((2, 2))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_adaptive_max_pool2d_3x3():
return torch.nn.AdaptiveMaxPool2d((3, 3))
| 1,146 | 30 | 95 | py |
torch2trt | torch2trt-master/torch2trt/converters/permute.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.Tensor.permute')
def convert_permute(ctx):
input = ctx.method_args[0]
if not hasattr(input, '_trt'):
return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
# permutation -1 because TRT does not include batch dim
if isinstance(ctx.method_args[1], int):
permutation = tuple(ctx.method_args[1:]) # handle permute(a, b, c)
else:
permutation = tuple(ctx.method_args[1]) # handle permute([a, b, c])
# assert(permutation[0] == 0) # cannot move batch dim
layer = ctx.network.add_shuffle(input_trt)
layer.second_transpose = tuple(permutation)
output._trt = layer.get_output(0)
class Permute(torch.nn.Module):
def __init__(self, *args):
super(Permute, self).__init__()
self.args = args
def forward(self, x):
return x.permute(*self.args).contiguous()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 4, 5)], max_batch_size=2)
def test_permute_2d_0123():
return Permute(0, 1, 2, 3)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 4, 5)], max_batch_size=2)
def test_permute_2d_0312():
return Permute(0, 3, 1, 2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 4, 5, 6)], max_batch_size=2)
def test_permute_3d_01234():
return Permute(0, 1, 2, 3, 4)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 4, 5, 6)], max_batch_size=2)
def test_permute_3d_04132():
return Permute(0, 4, 1, 3, 2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 4, 5, 6)], max_batch_size=2)
def test_permute_list():
return Permute([0, 4, 1, 3, 2])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 3, 4, 5, 6)], max_batch_size=2)
def test_permute_tuple():
return Permute((0, 4, 1, 3, 2)) | 2,379 | 35.060606 | 90 | py |
torch2trt | torch2trt-master/torch2trt/converters/silu.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.silu')
def convert_silu(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
output = ctx.method_return
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
layer = ctx.network.add_activation(input_trt, trt.ActivationType.SIGMOID)
layer = ctx.network.add_elementwise(input_trt, layer.get_output(0), trt.ElementWiseOperation.PROD)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3, 3)])
def test_silu():
return torch.nn.SiLU() | 791 | 36.714286 | 102 | py |
torch2trt | torch2trt-master/torch2trt/converters/div.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.div')
@tensorrt_converter('torch.Tensor.__div__') # py2
@tensorrt_converter('torch.Tensor.__idiv__') # py2
@tensorrt_converter('torch.Tensor.__truediv__') # py3
@tensorrt_converter('torch.Tensor.__itruediv__') # py3
def convert_div(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.DIV)
output._trt = layer.get_output(0)
@tensorrt_converter('torch.Tensor.__rdiv__') # py2
@tensorrt_converter('torch.Tensor.__rtruediv__') # py3
def convert_rdiv(ctx):
input_a = ctx.method_args[1] # inputs switched for rdiv
input_b = ctx.method_args[0]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.DIV)
output._trt = layer.get_output(0)
class Div(torch.nn.Module):
def __init__(self):
super(Div, self).__init__()
def forward(self, x, y):
return x / y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_div_basic():
return Div()
class IDiv(torch.nn.Module):
def __init__(self):
super(IDiv, self).__init__()
def forward(self, x, y):
x /= y
return x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_div_idiv():
return IDiv()
class TorchDiv(torch.nn.Module):
def __init__(self):
super(TorchDiv, self).__init__()
def forward(self, x, y):
return torch.div(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_div_torchdiv():
return TorchDiv()
class RDivInt(torch.nn.Module):
def __init__(self):
super(RDivInt, self).__init__()
def forward(self, x):
return 100 / x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_rdiv_int():
return RDivInt()
class RDivFloat(torch.nn.Module):
def __init__(self):
super(RDivFloat, self).__init__()
def forward(self, x):
return 100.0 / x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_rdiv_float():
return RDivFloat()
class DivConstantNoBatch(torch.nn.Module):
def __init__(self):
super(DivConstantNoBatch, self).__init__()
self.register_buffer('y', torch.ones((3, 10, 10)))
def forward(self, x):
return x / self.y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10, 10)])
def test_div_constant_nobatch():
return DivConstantNoBatch()
class DivConstantBatch(torch.nn.Module):
def __init__(self):
super(DivConstantBatch, self).__init__()
self.register_buffer('y', torch.ones((1, 3, 10, 10)))
def forward(self, x):
return x / self.y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10, 10)])
def test_div_constant_batch():
return DivConstantBatch()
| 3,524 | 27.427419 | 112 | py |
torch2trt | torch2trt-master/torch2trt/converters/sub.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.sub')
@tensorrt_converter('torch.Tensor.__isub__')
@tensorrt_converter('torch.Tensor.__sub__')
def convert_sub(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.SUB)
output._trt = layer.get_output(0)
@tensorrt_converter('torch.Tensor.__rsub__')
def convert_sub(ctx):
input_a = ctx.method_args[1]
input_b = ctx.method_args[0] # flipped for rsub
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.SUB)
output._trt = layer.get_output(0)
class Sub(torch.nn.Module):
def __init__(self):
super(Sub, self).__init__()
def forward(self, x, y):
return x - y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_sub_basic():
return Sub()
class ISub(torch.nn.Module):
def __init__(self):
super(ISub, self).__init__()
def forward(self, x, y):
x -= y
return x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_sub_isub():
return ISub()
class TorchSub(torch.nn.Module):
def __init__(self):
super(TorchSub, self).__init__()
def forward(self, x, y):
return torch.sub(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224), (1, 3, 224, 224)])
def test_torch_sub():
return TorchSub()
class RSubInt(torch.nn.Module):
def __init__(self):
super(RSubInt, self).__init__()
def forward(self, x):
return 1 - x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_rsub_int():
return RSubInt()
class RSubFloat(torch.nn.Module):
def __init__(self):
super(RSubFloat, self).__init__()
def forward(self, x):
return 1.0 - x
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_rsub_float():
return RSubFloat()
class SubConstantNoBatch(torch.nn.Module):
def __init__(self):
super(SubConstantNoBatch, self).__init__()
self.register_buffer('y', torch.ones((3, 10, 10)))
def forward(self, x):
return x - self.y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10, 10)])
def test_sub_constant_nobatch():
return SubConstantNoBatch()
class SubConstantBatch(torch.nn.Module):
def __init__(self):
super(SubConstantBatch, self).__init__()
self.register_buffer('y', torch.ones((1, 3, 10, 10)))
def forward(self, x):
return x - self.y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 10, 10)])
def test_sub_constant_batch():
return SubConstantBatch()
| 3,332 | 27.008403 | 112 | py |
torch2trt | torch2trt-master/torch2trt/converters/unary.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
def __convert_unary(ctx, op):
input = get_arg(ctx, 'input', pos=0, default=None)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
layer = ctx.network.add_unary(input_trt, op)
output._trt = layer.get_output(0)
class UnaryModule(torch.nn.Module):
def __init__(self, fn):
super(UnaryModule, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
# EXP : Exponentiation
@tensorrt_converter('torch.exp')
@tensorrt_converter('torch.exp_')
@tensorrt_converter('torch.Tensor.exp')
@tensorrt_converter('torch.Tensor.exp_')
def convert_exp(ctx):
__convert_unary(ctx, trt.UnaryOperation.EXP)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_exp():
return UnaryModule(lambda x: torch.exp(x))
# LOG : Log (base e)
@tensorrt_converter('torch.log')
@tensorrt_converter('torch.log_')
@tensorrt_converter('torch.Tensor.log')
@tensorrt_converter('torch.Tensor.log_')
def convert_log(ctx):
__convert_unary(ctx, trt.UnaryOperation.LOG)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_log():
return UnaryModule(lambda x: torch.log(x))
# SQRT : Square root
@tensorrt_converter('torch.sqrt')
@tensorrt_converter('torch.sqrt_')
@tensorrt_converter('torch.Tensor.sqrt')
@tensorrt_converter('torch.Tensor.sqrt_')
def convert_sqrt(ctx):
__convert_unary(ctx, trt.UnaryOperation.SQRT)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_sqrt():
return UnaryModule(lambda x: torch.sqrt(x))
# RECIP : Reciprocal
@tensorrt_converter('torch.reciprocal')
@tensorrt_converter('torch.reciprocal_')
@tensorrt_converter('torch.Tensor.reciprocal')
@tensorrt_converter('torch.Tensor.reciprocal_')
def convert_reciprocal(ctx):
__convert_unary(ctx, trt.UnaryOperation.RECIP)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_reciprocal():
return UnaryModule(lambda x: torch.reciprocal(x))
# ABS : Absolute value
@tensorrt_converter('torch.abs')
@tensorrt_converter('torch.abs_')
@tensorrt_converter('torch.Tensor.abs')
@tensorrt_converter('torch.Tensor.abs_')
def convert_abs(ctx):
__convert_unary(ctx, trt.UnaryOperation.ABS)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_abs():
return UnaryModule(lambda x: torch.abs(x))
# NEG : Negation
@tensorrt_converter('torch.neg')
@tensorrt_converter('torch.neg_')
@tensorrt_converter('torch.Tensor.neg')
@tensorrt_converter('torch.Tensor.__neg__')
@tensorrt_converter('torch.Tensor.neg_')
def convert_neg(ctx):
__convert_unary(ctx, trt.UnaryOperation.NEG)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_neg():
return UnaryModule(lambda x: torch.neg(x))
# SIN : Sine
@tensorrt_converter('torch.sin')
@tensorrt_converter('torch.sin_')
@tensorrt_converter('torch.Tensor.sin')
@tensorrt_converter('torch.Tensor.sin_')
def convert_sin(ctx):
__convert_unary(ctx, trt.UnaryOperation.SIN)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_sin():
return UnaryModule(lambda x: torch.sin(x))
# COS : Cosine
@tensorrt_converter('torch.cos')
@tensorrt_converter('torch.cos_')
@tensorrt_converter('torch.Tensor.cos')
@tensorrt_converter('torch.Tensor.cos_')
def convert_cos(ctx):
__convert_unary(ctx, trt.UnaryOperation.COS)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_cos():
return UnaryModule(lambda x: torch.cos(x))
# | TAN : Tangent
@tensorrt_converter('torch.tan')
@tensorrt_converter('torch.tan_')
@tensorrt_converter('torch.Tensor.tan')
@tensorrt_converter('torch.Tensor.tan_')
def convert_cos(ctx):
__convert_unary(ctx, trt.UnaryOperation.TAN)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_tan():
return UnaryModule(lambda x: torch.tan(x))
# | SINH : Hyperbolic sine
@tensorrt_converter('torch.sinh')
@tensorrt_converter('torch.sinh_')
@tensorrt_converter('torch.Tensor.sinh')
@tensorrt_converter('torch.Tensor.sinh_')
def convert_sinh(ctx):
__convert_unary(ctx, trt.UnaryOperation.SINH)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_sinh():
return UnaryModule(lambda x: torch.sinh(x))
# | COSH : Hyperbolic cosine
@tensorrt_converter('torch.cosh')
@tensorrt_converter('torch.cosh_')
@tensorrt_converter('torch.Tensor.cosh')
@tensorrt_converter('torch.Tensor.cosh_')
def convert_cosh(ctx):
__convert_unary(ctx, trt.UnaryOperation.COSH)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_cosh():
return UnaryModule(lambda x: torch.cosh(x))
# | ASIN : Inverse sine
@tensorrt_converter('torch.asin')
@tensorrt_converter('torch.asin_')
@tensorrt_converter('torch.Tensor.asin')
@tensorrt_converter('torch.Tensor.asin_')
def convert_asin(ctx):
__convert_unary(ctx, trt.UnaryOperation.ASIN)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_asin():
return UnaryModule(lambda x: torch.asin(x))
# | ACOS : Inverse cosine
@tensorrt_converter('torch.acos')
@tensorrt_converter('torch.acos_')
@tensorrt_converter('torch.Tensor.acos')
@tensorrt_converter('torch.Tensor.acos_')
def convert_acos(ctx):
__convert_unary(ctx, trt.UnaryOperation.ACOS)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_acos():
return UnaryModule(lambda x: torch.acos(x))
# | ATAN : Inverse tangent
@tensorrt_converter('torch.atan')
@tensorrt_converter('torch.atan_')
@tensorrt_converter('torch.Tensor.atan')
@tensorrt_converter('torch.Tensor.atan_')
def convert_atan(ctx):
__convert_unary(ctx, trt.UnaryOperation.ATAN)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_atan():
return UnaryModule(lambda x: torch.atan(x))
# | ASINH : Inverse hyperbolic sine
# |
# | ACOSH : Inverse hyperbolic cosine
# |
# | ATANH : Inverse hyperbolic tangent
# |
# CEIL : Ceiling
@tensorrt_converter('torch.ceil')
@tensorrt_converter('torch.ceil_')
@tensorrt_converter('torch.Tensor.ceil')
@tensorrt_converter('torch.Tensor.ceil_')
def convert_ceil(ctx):
__convert_unary(ctx, trt.UnaryOperation.CEIL)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_ceil():
return UnaryModule(lambda x: torch.ceil(x))
# FLOOR : Floor
@tensorrt_converter('torch.floor')
@tensorrt_converter('torch.floor_')
@tensorrt_converter('torch.Tensor.floor')
@tensorrt_converter('torch.Tensor.floor_')
def convert_floor(ctx):
__convert_unary(ctx, trt.UnaryOperation.FLOOR)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
def test_floor():
return UnaryModule(lambda x: torch.floor(x)) | 6,929 | 23.661922 | 66 | py |
torch2trt | torch2trt-master/torch2trt/converters/max_pool3d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter("torch.nn.functional.max_pool3d")
@tensorrt_converter("torch.max_pool3d")
def convert_max_pool3d(ctx):
# parse args
input = get_arg(ctx, "input", pos=0, default=None)
kernel_size = get_arg(ctx, "kernel_size", pos=1, default=None)
stride = get_arg(ctx, "stride", pos=2, default=None)
padding = get_arg(ctx, "padding", pos=3, default=0)
dilation = get_arg(ctx, "dilation", pos=4, default=1)
ceil_mode = get_arg(ctx, "ceil_mode", pos=5, default=False)
# get input trt tensor (or create constant if it doesn't exist)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
# get kernel size
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size,) * 3
# get stride
if not isinstance(stride, tuple):
stride = (stride,) * 3
# get padding
if not isinstance(padding, tuple):
padding = (padding,) * 3
layer = ctx.network.add_pooling_nd(
input=input_trt, type=trt.PoolingType.MAX, window_size=kernel_size
)
layer.stride_nd = stride
layer.padding_nd = padding
if ceil_mode:
layer.padding_mode = trt.PaddingMode.EXPLICIT_ROUND_UP
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 4, 6, 7)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 5, 7, 8)])
def test_MaxPool3d_without_ceil_mode():
return torch.nn.MaxPool3d(kernel_size=3, stride=2, padding=1, ceil_mode=False)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 4, 6, 7)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 5, 7, 8)])
def test_MaxPool3d_with_ceil_mode():
return torch.nn.MaxPool3d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
| 1,876 | 32.517857 | 82 | py |
torch2trt | torch2trt-master/torch2trt/converters/tanh.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.tanh')
@tensorrt_converter('torch.tanh')
def convert_tanh(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
layer = ctx.network.add_activation(input_trt, trt.ActivationType.TANH)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_tanh_basic():
return torch.nn.Tanh() | 561 | 30.222222 | 74 | py |
torch2trt | torch2trt-master/torch2trt/converters/stack.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
def unsqueeze(ctx, input, dim):
layer = ctx.network.add_shuffle(trt_(ctx.network, input))
shape = input.shape[:dim] + (1,) + input.shape[dim:]
layer.reshape_dims = tuple(shape)
return layer.get_output(0)
@tensorrt_converter('torch.stack', enabled=trt_version() >= '7.0')
def convert_cat_trt7(ctx):
inputs = get_arg(ctx, 'input', pos=0, default=None)
dim = get_arg(ctx, 'dim', pos=1, default=0)
# Reverse negative dims.
if dim < 0:
dim = len(inputs[0].shape) - abs(dim + 1)
output = ctx.method_return
trt_inputs = [unsqueeze(ctx, i, dim) for i in inputs]
layer = ctx.network.add_concatenation(inputs=trt_inputs)
layer.axis = dim
output._trt = layer.get_output(0)
class Stack(torch.nn.Module):
def __init__(self, dim):
super(Stack, self).__init__()
self.dim = dim
def forward(self, *x):
return torch.stack(x, dim=self.dim)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 4), (1, 4, 4), (1, 4, 4)], enabled=trt_version() >= '7.0')
def test_Stack_basic_trt7():
return Stack(3)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 4), (1, 4, 4), (1, 4, 4)], enabled=trt_version() >= '7.0')
def test_Stack_basic2_trt7():
return Stack(1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 4), (1, 4, 4), (1, 4, 4)], enabled=trt_version() >= '7.0')
def test_Stack_neg1_dim_trt7():
return Stack(-1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 4), (1, 4, 4), (1, 4, 4)], enabled=trt_version() >= '7.0')
def test_Stack_neg2_dim_trt7():
return Stack(-2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 4), (1, 4, 4), (1, 4, 4)], enabled=trt_version() >= '7.0')
def test_Stack_neg3_dim_trt7():
return Stack(-3)
| 1,882 | 28.888889 | 120 | py |
torch2trt | torch2trt-master/torch2trt/converters/adaptive_avg_pool3d.py | from torch2trt.torch2trt import *
from .AdaptiveAvgPool3d import *
@tensorrt_converter("torch.nn.functional.adaptive_avg_pool3d")
def convert_adaptive_avg_pool3d(ctx):
ctx.method_args = (
torch.nn.AdaptiveAvgPool3d(ctx.method_args[1]),
ctx.method_args[0],
)
convert_AdaptiveAvgPool3d(ctx)
| 319 | 25.666667 | 62 | py |
torch2trt | torch2trt-master/torch2trt/converters/layer_norm.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.layer_norm')
def convert_layernorm(ctx):
input = get_arg(ctx, 'input', 0, None)
shape = get_arg(ctx, 'normalized_shape', 1, None)
weight = get_arg(ctx, 'weight', 2, None)
bias = get_arg(ctx, 'bias', 3, None)
eps = get_arg(ctx, 'eps', 4, 1e-05)
output = ctx.method_return
input_trt, eps_trt = add_missing_trt_tensors(
ctx.network,
[input, eps]
)
input_trt, eps_trt = broadcast_trt_tensors(
ctx.network,
[input_trt, eps_trt],
len(output.shape)
)
if weight is not None:
_, weight_trt = add_missing_trt_tensors(
ctx.network,
[input, weight]
)
_, weight_trt = broadcast_trt_tensors(
ctx.network,
[input_trt, weight_trt],
len(output.shape)
)
if bias is not None:
_, bias_trt = add_missing_trt_tensors(
ctx.network,
[input, bias]
)
_, bias_trt = broadcast_trt_tensors(
ctx.network,
[input_trt, bias_trt],
len(output.shape)
)
if isinstance(shape, int):
shape = (shape,)
dim = tuple([-i - 1 for i in range(len(shape))])
dim = torch_dim_resolve_negative(dim, len(input.shape))
axes = torch_dim_to_trt_axes(dim)
ux = ctx.network.add_reduce(input_trt, trt.ReduceOperation.AVG, axes, keep_dims=True).get_output(0)
numerator = ctx.network.add_elementwise(input_trt, ux, trt.ElementWiseOperation.SUB).get_output(0)
varx = ctx.network.add_elementwise(numerator, numerator, trt.ElementWiseOperation.PROD).get_output(0)
varx = ctx.network.add_reduce(varx, trt.ReduceOperation.AVG, axes, keep_dims=True).get_output(0)
denom = ctx.network.add_elementwise(varx, eps_trt, trt.ElementWiseOperation.SUM).get_output(0)
denom = ctx.network.add_unary(denom, trt.UnaryOperation.SQRT).get_output(0)
y = ctx.network.add_elementwise(numerator, denom, trt.ElementWiseOperation.DIV).get_output(0)
if weight is not None:
y = ctx.network.add_elementwise(y, weight_trt, trt.ElementWiseOperation.PROD).get_output(0)
if bias is not None:
y = ctx.network.add_elementwise(y, bias_trt, trt.ElementWiseOperation.SUM).get_output(0)
output._trt = y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 5, 3)])
def test_layer_norm_1d():
return torch.nn.LayerNorm(3)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 5, 3)])
def test_layer_norm_2d():
return torch.nn.LayerNorm((5, 3))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 5, 3)])
def test_layer_norm_3d():
return torch.nn.LayerNorm((5, 5, 3))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 5, 3)])
def test_layer_norm_1d_nonaffine():
return torch.nn.LayerNorm(3, elementwise_affine=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 5, 3)])
def test_layer_norm_2d_nonaffine():
return torch.nn.LayerNorm((5, 3), elementwise_affine=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 5, 3)])
def test_layer_norm_3d_nonaffine():
return torch.nn.LayerNorm((5, 5, 3), elementwise_affine=False) | 3,767 | 35.582524 | 105 | py |
torch2trt | torch2trt-master/torch2trt/converters/Conv1d.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.Conv1d.forward')
def convert_Conv1d(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
kernel_size = (module.kernel_size[0], 1)
stride = (module.stride[0], 1)
padding = (module.padding[0], 0)
dilation = (module.dilation[0], 1)
kernel = module.weight.detach().cpu().numpy()[..., None]
bias = trt.Weights(torch_dtype_to_trt(module.weight.dtype))
if module.bias is not None:
bias = module.bias.detach().cpu().numpy()
# reshape to 2D
layer = ctx.network.add_shuffle(input_trt)
layer.reshape_dims = (0, -1, 0, 1)
layer = ctx.network.add_convolution(
input=layer.get_output(0),
num_output_maps=module.out_channels,
kernel_shape=kernel_size,
kernel=kernel,
bias=bias)
layer.stride = stride
layer.padding = padding
layer.dilation = dilation
if module.groups is not None:
layer.num_groups = module.groups
# reshape back to 1D
layer = ctx.network.add_shuffle(layer.get_output(0))
layer.reshape_dims = (0, -1, 0)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 10, 224)], max_batch_size=2)
def test_Conv1d_basic():
return torch.nn.Conv1d(10, 5, kernel_size=1, stride=1, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 10, 224)], max_batch_size=2)
def test_Conv1d_stride2():
return torch.nn.Conv1d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 10, 224)], max_batch_size=2)
def test_Conv1d_kernel3():
return torch.nn.Conv1d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224)])
@add_module_test(torch.float32, torch.device('cuda'), [(2, 10, 224)], max_batch_size=2)
def test_Conv1d_dilation2():
return torch.nn.Conv1d(10, 5, kernel_size=3, stride=1, padding=1, dilation=2)
| 2,370 | 33.362319 | 87 | py |
torch2trt | torch2trt-master/torch2trt/converters/adaptive_avg_pool2d.py | from torch2trt.torch2trt import *
from .AdaptiveAvgPool2d import *
@tensorrt_converter('torch.nn.functional.adaptive_avg_pool2d')
def convert_adaptive_avg_pool2d(ctx):
ctx.method_args = (torch.nn.AdaptiveAvgPool2d(ctx.method_args[1]), ctx.method_args[0])
convert_AdaptiveAvgPool2d(ctx)
| 296 | 32 | 90 | py |
torch2trt | torch2trt-master/torch2trt/converters/group_norm.py | import torch.nn as nn
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.group_norm')
def convert_group_norm(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
num_groups = get_arg(ctx, 'num_groups', pos=1, default=None)
weight = get_arg(ctx, 'weight', pos=2, default=None)
bias = get_arg(ctx, 'bias', pos=3, default=None)
eps = get_arg(ctx, 'eps', pos=4, default=1e-5)
output = ctx.method_return
input_trt, eps_trt = add_missing_trt_tensors(ctx.network, [input, eps])
shape = list(input.shape)
split_shape = [shape[0]] + [num_groups, shape[1] // num_groups] + shape[2:]
split_shape = tuple(split_shape)
keepdim = True
# split into groups
layer = ctx.network.add_shuffle(input_trt)
layer.reshape_dims = split_shape
a = layer.get_output(0)
# compute mean over groups
reduce_dims = tuple(range(2, len(split_shape)))
axes = torch_dim_to_trt_axes(reduce_dims)
layer = ctx.network.add_reduce(a, trt.ReduceOperation.AVG, axes, keepdim)
a_mean = layer.get_output(0)
# compute stdev over groups
a_diff = ctx.network.add_elementwise(a, a_mean, trt.ElementWiseOperation.SUB).get_output(0)
a_dist = ctx.network.add_elementwise(a_diff, a_diff, trt.ElementWiseOperation.PROD).get_output(0)
a_var = ctx.network.add_reduce(a_dist, trt.ReduceOperation.AVG, axes, keepdim).get_output(0)
a_var, eps_trt = broadcast_trt_tensors(ctx.network, [a_var, eps_trt], len(split_shape))
a_var_eps = ctx.network.add_elementwise(a_var, eps_trt, trt.ElementWiseOperation.SUM).get_output(0)
a_std = ctx.network.add_unary(a_var_eps, trt.UnaryOperation.SQRT).get_output(0)
# divide by stdev
b = ctx.network.add_elementwise(a_diff, a_std, trt.ElementWiseOperation.DIV).get_output(0)
# reshape
layer = ctx.network.add_shuffle(b)
layer.reshape_dims = shape
c = layer.get_output(0)
# handle affine version
if weight is not None or bias is not None:
if weight is not None:
scale = weight.detach().cpu().numpy()
else:
scale = np.ones(input.shape[1])
if bias is not None:
bias = bias.detach().cpu().numpy()
else:
bias = np.zeros(input.shape[1])
power = np.ones_like(scale)
layer = ctx.network.add_scale_nd(c, trt.ScaleMode.CHANNEL, bias, scale, power, 1)
c = layer.get_output(0)
output._trt = c
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)])
def test_group_norm_trt_g2_fp32():
return torch.nn.GroupNorm(2, 10)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)])
def test_group_norm_trt_g2_eps_fp32():
return torch.nn.GroupNorm(2, 10, eps=1e-4)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)])
def test_group_norm_trt_g2_eps_fp32_affine():
module = torch.nn.GroupNorm(2, 10, affine=True, eps=1e-4)
module.weight.data = torch.randn_like(module.weight.data)
module.bias.data = torch.randn_like(module.bias.data)
return module
| 3,179 | 33.193548 | 103 | py |
torch2trt | torch2trt-master/torch2trt/converters/relu.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.relu')
@tensorrt_converter('torch.relu_')
@tensorrt_converter('torch.nn.functional.relu')
@tensorrt_converter('torch.nn.functional.relu_')
@tensorrt_converter('torch.Tensor.relu')
def convert_functional_relu(ctx):
ctx.method_args = (torch.nn.ReLU(),) + ctx.method_args
convert_relu(ctx)
@tensorrt_converter('torch.nn.ReLU.forward')
def convert_relu(ctx):
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
layer = ctx.network.add_activation(
input=input_trt, type=trt.ActivationType.RELU)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)])
def test_relu_basic():
return torch.nn.ReLU()
class FunctionalRelu(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.relu(x)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)])
def test_functional_relu_basic():
return FunctionalRelu()
class TensorRelu(torch.nn.Module):
def __init__(self):
super(TensorRelu, self).__init__()
def forward(self, x):
return x.relu()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20)])
def test_tensor_relu():
return TensorRelu()
| 1,388 | 26.78 | 71 | py |
torch2trt | torch2trt-master/torch2trt/converters/avg_pool.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter("torch.nn.functional.avg_pool2d", enabled=trt_version() < '7.0')
def convert_avg_pool2d(ctx):
# parse args
input = get_arg(ctx, "input", pos=0, default=None)
kernel_size = get_arg(ctx, "kernel_size", pos=1, default=None)
stride = get_arg(ctx, "stride", pos=2, default=None)
padding = get_arg(ctx, "padding", pos=3, default=0)
ceil_mode = get_arg(ctx, "ceil_mode", pos=4, default=False)
count_include_pad = get_arg(ctx, "count_include_pad", pos=5, default=True)
# get input trt tensor (or create constant if it doesn't exist)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
# get kernel size
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size,) * 2
# get stride
if not isinstance(stride, tuple):
stride = (stride,) * 2
# get padding
if not isinstance(padding, tuple):
padding = (padding,) * 2
layer = ctx.network.add_pooling(
input=input_trt, type=trt.PoolingType.AVERAGE, window_size=kernel_size
)
layer.stride = stride
layer.padding = padding
layer.average_count_excludes_padding = not count_include_pad
if ceil_mode:
layer.padding_mode = trt.PaddingMode.EXPLICIT_ROUND_UP
output._trt = layer.get_output(0)
@tensorrt_converter('torch.nn.functional.avg_pool2d', enabled=trt_version() >= '7.0')
@tensorrt_converter('torch.nn.functional.avg_pool3d', enabled=trt_version() >= '7.0')
def convert_avg_pool_trt7(ctx):
# parse args
input = get_arg(ctx, 'input', pos=0, default=None)
kernel_size = get_arg(ctx, 'kernel_size', pos=1, default=None)
stride = get_arg(ctx, 'stride', pos=2, default=None)
padding = get_arg(ctx, 'padding', pos=3, default=0)
ceil_mode = get_arg(ctx, 'ceil_mode', pos=4, default=False)
count_include_pad = get_arg(ctx, 'count_include_pad', pos=5, default=True)
# get input trt tensor (or create constant if it doesn't exist)
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
input_dim = input.dim() - 2
# get kernel size
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, ) * input_dim
# get stride
if not isinstance(stride, tuple):
stride = (stride, ) * input_dim
# get padding
if not isinstance(padding, tuple):
padding = (padding, ) * input_dim
layer = ctx.network.add_pooling_nd(
input=input_trt, type=trt.PoolingType.AVERAGE, window_size=kernel_size)
layer.stride_nd = stride
layer.padding_nd = padding
layer.average_count_excludes_padding = not count_include_pad
if ceil_mode:
layer.padding_mode = trt.PaddingMode.EXPLICIT_ROUND_UP
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 4, 6)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 5, 7)])
def test_avg_pool2d_without_ceil_mode():
return torch.nn.AvgPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 4, 6)])
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 5, 7)])
def test_avg_pool2d_with_ceil_mode():
return torch.nn.AvgPool2d(
kernel_size=3, stride=2, padding=1, ceil_mode=True, count_include_pad=False
) # TRT does not support ceil_mode=True && count_include_pad=True
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 4, 6)], enabled=trt_version() >= '7.0')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 5, 7)], enabled=trt_version() >= '7.0')
def test_avg_pool3d_without_ceil_mode_trt7():
return torch.nn.AvgPool3d(kernel_size=3, stride=2, padding=1, ceil_mode=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 4, 6)], enabled=trt_version() >= '7.0')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 5, 7)], enabled=trt_version() >= '7.0')
def test_avg_pool3d_with_ceil_mode_trt7():
return torch.nn.AvgPool3d(kernel_size=3, stride=2, padding=1, ceil_mode=True, count_include_pad=False) # TRT does not support ceil_mode=True && count_include_pad=True
| 4,301 | 37.410714 | 170 | py |
torch2trt | torch2trt-master/torch2trt/converters/ConvTranspose.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.ConvTranspose2d.forward', enabled=trt_version() >= '7.0')
@tensorrt_converter('torch.nn.ConvTranspose3d.forward', enabled=trt_version() >= '7.0')
def convert_ConvTranspose2d_trt7(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
input_dim = input.dim() - 2
kernel_size = module.kernel_size
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, ) * input_dim
stride = module.stride
if not isinstance(stride, tuple):
stride = (stride, ) * input_dim
padding = module.padding
if not isinstance(padding, tuple):
padding = (padding, ) * input_dim
assert module.dilation == 1 or all([d == 1 for d in module.dilation]), \
"Transposed convolution dilation is not supported in TensorRT"
kernel = module.weight.detach().cpu().numpy()
bias = trt.Weights(torch_dtype_to_trt(module.weight.dtype))
if module.bias is not None:
bias = module.bias.detach().cpu().numpy()
layer = ctx.network.add_deconvolution_nd(
input=input_trt,
num_output_maps=module.out_channels,
kernel_shape=kernel_size,
kernel=kernel,
bias=bias)
layer.stride_nd = stride
layer.padding_nd = padding
if module.groups is not None:
layer.num_groups = module.groups
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 7, 7)], enabled=trt_version() >= '7.0')
def test_ConvTranspose2d_basic_trt7():
return torch.nn.ConvTranspose2d(10, 5, kernel_size=1, stride=1, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 8, 8)], enabled=trt_version() >= '7.0')
def test_ConvTranspose2d_stride2_trt7():
return torch.nn.ConvTranspose2d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 9, 9)], enabled=trt_version() >= '7.0')
def test_ConvTranspose2d_kernel3_trt7():
return torch.nn.ConvTranspose2d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 7, 7, 7)], enabled=trt_version() >= '7.0')
def test_ConvTranspose3d_basic_trt7():
return torch.nn.ConvTranspose3d(10, 5, kernel_size=1, stride=1, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 7, 7, 7)], enabled=trt_version() >= '7.0')
def test_ConvTranspose3d_stride2_trt7():
return torch.nn.ConvTranspose3d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 6, 6, 6)], enabled=trt_version() >= '7.0')
def test_ConvTranspose3d_kernel3_trt7():
return torch.nn.ConvTranspose3d(10, 5, kernel_size=3, stride=2, padding=1)
| 2,927 | 35.6 | 105 | py |
torch2trt | torch2trt-master/torch2trt/converters/normalize.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.normalize')
def convert_normalize(ctx):
# get args
input = get_arg(ctx, 'input', pos=0, default=None)
p = get_arg(ctx, 'p', pos=1, default=2)
dim = get_arg(ctx, 'dim', pos=2, default=1)
eps = get_arg(ctx, 'eps', pos=3, default=1e-12)
# input_trt = input._trt
output = ctx.method_return
# add broadcastable scalar constants to network
input_trt, eps_trt, p_trt, p_inv_trt = add_missing_trt_tensors(ctx.network, [input, eps, p, 1.0 / p])
input_trt, eps_trt, p_trt, p_inv_trt = broadcast_trt_tensors(ctx.network, [input_trt, eps_trt, p_trt, p_inv_trt], len(input_trt.shape))
# compute norm = sum(abs(x)**p, dim=dim)**(1./p)
norm = ctx.network.add_unary(input_trt, trt.UnaryOperation.ABS).get_output(0)
norm = ctx.network.add_elementwise(norm, p_trt, trt.ElementWiseOperation.POW).get_output(0)
norm = ctx.network.add_reduce(norm, trt.ReduceOperation.SUM, torch_dim_to_trt_axes(dim), keep_dims=True).get_output(0)
norm = ctx.network.add_elementwise(norm, p_inv_trt, trt.ElementWiseOperation.POW).get_output(0)
# clamp norm = max(norm, eps)
norm = ctx.network.add_elementwise(norm, eps_trt, trt.ElementWiseOperation.MAX).get_output(0)
# divide input by norm
output._trt = ctx.network.add_elementwise(input_trt, norm, trt.ElementWiseOperation.DIV).get_output(0)
class Normalize(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(Normalize, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, x):
return torch.nn.functional.normalize(x, *self.args, **self.kwargs)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_normalize_basic():
return Normalize()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_normalize_l1_basic():
return Normalize(p=1.0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_normalize_l1p5_basic():
return Normalize(p=1.5)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_normalize_l2_height():
return Normalize(p=2.0, dim=2) | 2,756 | 40.149254 | 139 | py |
torch2trt | torch2trt-master/torch2trt/converters/narrow.py | import tensorrt as trt
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.Tensor.narrow')
@tensorrt_converter('torch.narrow')
def convert_narrow(ctx):
inputs = get_arg(ctx, 'input', pos=0, default=None)
start = get_arg(ctx, 'start', pos=2, default=None)
output = ctx.method_return
shape = list(inputs.shape)
start = [0]*len(shape)
stride = [1]*len(shape)
dim = ctx.method_args[1] if get_arg(ctx, 'dim', pos=1, default=0) >=0 else len(shape)+get_arg(ctx, 'dim', pos=1, default=0)
start[dim] = ctx.method_args[2]
shape[dim] = ctx.method_args[3]
# not consider batch dimension
input_trt = trt_(ctx.network,inputs)
layer = ctx.network.add_slice(input=input_trt,start=start, shape=shape,stride=stride)
output._trt = layer.get_output(0)
class Narrow(torch.nn.Module):
def __init__(self, dim, start, length):
super(Narrow, self).__init__()
self.dim = dim
self.start = start
self.length = length
def forward(self, x):
return torch.narrow(x,self.dim,self.start,self.length)
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,224,224)])
def test_narrow1():
return Narrow(1,0,2)
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,224,224)])
def test_narrow2():
return Narrow(2,0,50)
| 1,369 | 32.414634 | 127 | py |
torch2trt | torch2trt-master/torch2trt/converters/pad.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.pad')
def convert_pad(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
pad = ctx.method_args[1]
pre_padding = (pad[2], pad[0])
post_padding = (pad[3], pad[1])
# mode / value are ignored since not supported by TensorRT
layer = ctx.network.add_padding(input_trt, pre_padding, post_padding)
output._trt = layer.get_output(0)
class Pad(torch.nn.Module):
def __init__(self, pad):
super(Pad, self).__init__()
self.pad = pad
def forward(self, x):
return torch.nn.functional.pad(x, self.pad)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_pad_basic():
return Pad((1, 2, 3, 4)) | 920 | 26.909091 | 73 | py |
torch2trt | torch2trt-master/torch2trt/converters/tensor.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.tensor')
def convert_mod(ctx):
output = ctx.method_return
layer = ctx.network.add_constant(tuple(output.shape), output.detach().cpu().numpy() )
output._trt = layer.get_output(0)
class TorchTensor(torch.nn.Module):
def __init__(self):
super(TorchTensor, self).__init__()
def forward(self, x):
return x + torch.tensor([[1., 2., 3.], [4., 5., 6.]], device=torch.device('cuda'))
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 3)])
def test_tensor_creation():
return TorchTensor()
| 649 | 27.26087 | 90 | py |
torch2trt | torch2trt-master/torch2trt/converters/prelu.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.prelu')
def convert_prelu(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
weight = get_arg(ctx, 'weight', pos=1, default=None)
output = ctx.method_return
weight_shape = [1] * (len(input.shape))
weight_shape[1] = weight.numel()
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
# y = prelu(x) = relu(x) - alpha * relu(-x)
weight_trt = ctx.network.add_constant(weight_shape, -weight.detach().view(weight_shape).cpu().numpy()).get_output(0) # detach so considered leaf
# x >= 0
a = ctx.network.add_activation(input_trt, trt.ActivationType.RELU).get_output(0)
# x <= 0
b = ctx.network.add_unary(input_trt, trt.UnaryOperation.NEG).get_output(0)
b = ctx.network.add_activation(b, trt.ActivationType.RELU).get_output(0)
b = ctx.network.add_elementwise(b, weight_trt, trt.ElementWiseOperation.PROD).get_output(0)
# y = a + b
y = ctx.network.add_elementwise(a, b, trt.ElementWiseOperation.SUM)
output._trt = y.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3, 3)])
def test_prelu_scalar():
return torch.nn.PReLU()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 5, 3, 3)])
def test_prelu_vector():
m = torch.nn.PReLU(5)
m.weight = torch.nn.Parameter(torch.randn(5)) # randn so each channel different
return m | 1,779 | 36.87234 | 148 | py |
torch2trt | torch2trt-master/torch2trt/converters/LogSoftmax.py | from torch2trt.torch2trt import *
@tensorrt_converter('torch.nn.LogSoftmax.forward')
def convert_LogSoftmax(ctx):
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
layer = ctx.network.add_softmax(input=input_trt)
layer = ctx.network.add_unary(input=layer.get_output(0),
op=trt.UnaryOperation.LOG)
output._trt = layer.get_output(0) | 433 | 35.166667 | 64 | py |
torch2trt | torch2trt-master/torch2trt/converters/Conv.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.Conv2d.forward', enabled=trt_version() >= '7.0')
@tensorrt_converter('torch.nn.Conv3d.forward', enabled=trt_version() >= '7.0')
def convert_Conv_trt7(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
input_dim = input.dim() - 2
kernel_size = module.kernel_size
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, ) * input_dim
stride = module.stride
if not isinstance(stride, tuple):
stride = (stride, ) * input_dim
padding = module.padding
if not isinstance(padding, tuple):
padding = (padding, ) * input_dim
dilation = module.dilation
if not isinstance(dilation, tuple):
dilation = (dilation, ) * input_dim
kernel = module.weight.detach().cpu().numpy()
bias = None #trt.Weights(torch_dtype_to_trt(module.weight.dtype))
if module.bias is not None:
bias = module.bias.detach().cpu().numpy()
layer = ctx.network.add_convolution_nd(
input=input_trt,
num_output_maps=module.out_channels,
kernel_shape=kernel_size,
kernel=kernel,
bias=bias)
layer.stride_nd = stride
layer.padding_nd = padding
layer.dilation_nd = dilation
if module.groups is not None:
layer.num_groups = module.groups
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_basic_trt7():
return torch.nn.Conv2d(10, 5, kernel_size=1, stride=1, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_stride2_trt7():
return torch.nn.Conv2d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_kernel3_trt7():
return torch.nn.Conv2d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 224, 224)], enabled=trt_version() >= '7.0')
def test_Conv2d_dilation2_trt7():
return torch.nn.Conv2d(10, 5, kernel_size=3, stride=1, padding=1, dilation=2)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_basic_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=1, stride=1, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_stride2_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=1, stride=2, padding=0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_kernel3_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=3, stride=2, padding=1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 64, 64, 64)], enabled=trt_version() >= '7.0')
def test_Conv3d_dilation2_trt7():
return torch.nn.Conv3d(10, 5, kernel_size=3, stride=1, padding=1, dilation=2)
| 3,256 | 34.402174 | 108 | py |
torch2trt | torch2trt-master/torch2trt/converters/interpolate.py | import torch.nn.functional as F
import torch.nn as nn
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import collections
def has_interpolate_plugin():
try:
from torch2trt.torch_plugins import InterpolatePlugin
return True
except:
return False
def get_interpolate_plugin(size, mode, align_corners):
from torch2trt.torch_plugins import InterpolatePlugin
PLUGIN_NAME = 'interpolate'
registry = trt.get_plugin_registry()
creator = [c for c in registry.plugin_creator_list if c.name == PLUGIN_NAME and c.plugin_namespace == 'torch2trt'][0]
torch2trt_plugin = InterpolatePlugin(size=size, mode=mode, align_corners=align_corners)
return creator.deserialize_plugin(PLUGIN_NAME, torch2trt_plugin.serializeToString())
@tensorrt_converter('torch.nn.functional.interpolate', enabled=trt_version() < '7.1' and has_interpolate_plugin())
def convert_interpolate_plugin(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
try:
mode = get_arg(ctx, 'mode', pos=3, default='nearest')
except KeyError:
mode = 'nearest'
try:
align_corners = get_arg(ctx, 'align_corners', pos=4, default=None)
except KeyError:
align_corners = False
# currently only works for NCHW
size = list(output.shape[2:])
plugin = get_interpolate_plugin(size=size, mode=mode, align_corners=align_corners)
layer = ctx.network.add_plugin_v2([input_trt], plugin)
output._trt = layer.get_output(0)
@tensorrt_converter('torch.nn.functional.interpolate', enabled=trt_version() >= '7.1')
@tensorrt_converter('torch.nn.functional.upsample', enabled=trt_version() >= '7.1')
def convert_interpolate_trt7(ctx):
#parse args
input = get_arg(ctx, 'input', pos=0, default=None)
size = get_arg(ctx, 'size', pos=1, default=None)
scale_factor=get_arg(ctx, 'scale_factor', pos=2, default=None)
mode = get_arg(ctx, 'mode', pos=3, default='nearest')
align_corners = get_arg(ctx, 'align_corners', pos=4, default=None)
input_dim = input.dim() - 2
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
layer = ctx.network.add_resize(input=input_trt)
shape = size
if shape != None:
if isinstance(shape, collections.Sequence):
shape = [input.size(0), input.size(1)] + list(shape)
shape = make_size_wrapper(shape)
else:
shape = [input.size(0), input.size(1)] + [shape] * input_dim
shape = make_size_wrapper(shape)
# layer.shape = shape (old, static shape)
layer.set_input(1, shape._trt)
scales = scale_factor
if scales != None:
if not isinstance(scales, collections.Sequence):
scales = [scales] * input_dim
layer.scales = [1, 1] + list(scales)
resize_mode = mode
if resize_mode.lower() in ["linear","bilinear","trilinear"]:
layer.resize_mode = trt.ResizeMode.LINEAR
else:
layer.resize_mode=trt.ResizeMode.NEAREST
if align_corners != None:
if trt_version() > '8.0':
if align_corners:
layer.coordinate_transformation = trt.ResizeCoordinateTransformation.ALIGN_CORNERS
else:
layer.align_corners = align_corners
output._trt = layer.get_output(0)
class Interpolate(torch.nn.Module):
def __init__(self, size=None,scale_factor=None, mode=None, align_corners=None):
super(Interpolate, self).__init__()
## Use either size or scale factor.
self.size = size
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return F.interpolate(x, size=self.size, scale_factor=self.scale_factor,mode=self.mode, align_corners=self.align_corners)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)], enabled=trt_version() < '7.1' and has_interpolate_plugin())
def test_interpolate_nearest():
return Interpolate(size=(224, 224), mode='nearest', align_corners=None)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)], enabled=trt_version() < '7.1' and has_interpolate_plugin())
def test_interpolate_bilinear():
return Interpolate(size=(224, 224), mode= 'bilinear', align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)], enabled=trt_version() < '7.1' and has_interpolate_plugin())
def test_interpolate_bicubic():
return Interpolate(size=(224, 224), mode='bicubic',align_corners= False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)], enabled=trt_version() < '7.1' and has_interpolate_plugin())
def test_interpolate_area():
return Interpolate(size=(56, 56), mode='area',align_corners= None)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)], enabled=trt_version() < '7.1' and has_interpolate_plugin())
def test_upsample_scale_factor2():
return nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1,2,12,12)], enabled=trt_version() >= '7.1')
def test_nearest_mode():
return torch.nn.Upsample(scale_factor=2, mode="nearest")
@add_module_test(torch.float32, torch.device('cuda'), [(1,4,12,12)], enabled=trt_version() >= '7.1')
def test_bilinear_mode():
return torch.nn.Upsample(scale_factor=3, mode="bilinear",align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,12,12)], enabled=trt_version() >= '7.1')
def test_align_corner():
return torch.nn.Upsample(scale_factor=2.0, mode="bilinear", align_corners=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,12,12)], enabled=trt_version() >= '7.1')
def test_align_corner_functional():
return Interpolate(scale_factor=2.0, mode="bilinear", align_corners=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1,5,13,13)], enabled=trt_version() >= '7.1')
def test_bilinear_mode_odd_input_shape():
return torch.nn.Upsample(scale_factor=2,mode="bilinear",align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1,4,12,12)], enabled=trt_version() >= '7.1')
def test_size_parameter():
return torch.nn.Upsample(size=3,mode="nearest")
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,13,13)], enabled=trt_version() >= '7.1')
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,1,1)], enabled=trt_version() >= '7.1')
def test_size_parameter_odd_input():
return torch.nn.Upsample(size=[6,3],mode="nearest")
@add_module_test(torch.float32, torch.device('cuda'), [(1,4,6,6,6)], enabled=trt_version() >= '7.1')
def test_nearest_mode_3d():
return torch.nn.Upsample(scale_factor=2, mode="nearest")
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,5,5,5)], enabled=trt_version() >= '7.1')
def test_bilinear_mode_3d():
return torch.nn.Upsample(scale_factor=3, mode="trilinear",align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1,4,8,8,8)], enabled=trt_version() >= '7.1')
def test_align_corner_3d():
return torch.nn.Upsample(scale_factor=4, mode="trilinear", align_corners=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1,6,7,7,7)], enabled=trt_version() >= '7.1')
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,2,4,4)], enabled=trt_version() >= '7.1')
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,1,1,1)], enabled=trt_version() >= '7.1')
def test_bilinear_mode_odd_input_shape_3d():
return torch.nn.Upsample(scale_factor=2, mode="trilinear",align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1,1,12,12,12)], enabled=trt_version() >= '7.1')
def test_size_parameter_3d():
return torch.nn.Upsample(size=3,mode="trilinear", align_corners=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,7,9,5)], enabled=trt_version() >= '7.1')
@add_module_test(torch.float32, torch.device('cuda'), [(1,4,3,5,1)], enabled=trt_version() >= '7.1')
def test_size_parameter_odd_input_3d():
return torch.nn.Upsample(size=[11,14,17],mode="trilinear", align_corners=False)
| 8,404 | 42.549223 | 134 | py |
torch2trt | torch2trt-master/torch2trt/converters/sigmoid.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.nn.functional.sigmoid')
@tensorrt_converter('torch.sigmoid')
@tensorrt_converter('torch.Tensor.sigmoid')
def convert_sigmoid(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
layer = ctx.network.add_activation(input_trt, trt.ActivationType.SIGMOID)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_sigmoid_basic():
return torch.nn.Sigmoid()
class TensorSigmoid(torch.nn.Module):
def __init__(self):
super(TensorSigmoid, self).__init__()
def forward(self, x):
return x.sigmoid()
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20)])
def test_tensor_sigmoid():
return TensorSigmoid()
| 916 | 26.787879 | 77 | py |
torch2trt | torch2trt-master/torch2trt/converters/ne.py | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
@tensorrt_converter('torch.ne')
@tensorrt_converter('torch.Tensor.__ne__')
def convert_ne(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
output = ctx.method_return
input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input_a, input_b])
input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape))
layer_1 = ctx.network.add_elementwise(input_a_trt, input_b_trt, trt.ElementWiseOperation.EQUAL)
layer_2 = ctx.network.add_unary(layer_1.get_output(0), trt.UnaryOperation.NOT)
output._trt = layer_2.get_output(0)
class NotEqual(torch.nn.Module):
def __init__(self):
super(NotEqual, self).__init__()
def forward(self, x, y):
return x != y
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20), (1, 3, 1, 20)])
def test_ne_op():
return NotEqual()
class NotEqualConst(torch.nn.Module):
def __init__(self):
super(NotEqualConst, self).__init__()
def forward(self, x):
return x != 13.62
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20)])
def test_ne_op_const():
return NotEqualConst()
class TorchNotEqual(torch.nn.Module):
def __init__(self):
super(TorchNotEqual, self).__init__()
def forward(self, x, y):
return torch.ne(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 40, 20), (1, 3, 1, 20)])
def test_ne_torch():
return TorchNotEqual()
| 1,579 | 27.727273 | 112 | py |
dct_vae | dct_vae-main/run_experiment.py | import os
import torch
import numpy as np
import os.path as osp
import wandb
import copy
from pprint import pprint
import hydra.utils
from hydra.utils import instantiate
import omegaconf
from torch.nn.parallel.distributed import DistributedDataParallel
import torch.distributed as dist
import utils.trainer as trainer
import utils.tester as tester
from utils.wandb import get_checkpoint
from utils.distributed_training import setup_mpi, is_main_process, cleanup
def optimizer_to(optim, device):
for param in optim.state.values():
# Not sure there are any global tensors in the state dict
if isinstance(param, torch.Tensor):
param.data = param.data.to(device)
if param._grad is not None:
param._grad.data = param._grad.data.to(device)
elif isinstance(param, dict):
for subparam in param.values():
if isinstance(subparam, torch.Tensor):
subparam.data = subparam.data.to(device)
if subparam._grad is not None:
subparam._grad.data = subparam._grad.data.to(device)
def compute_z_L_size(args):
if args.model.name == 'ladder':
im_size = args.dataset.image_size[1]
hw_size = int(im_size / (2 ** len(args.model.latent_width)))
z_L_size = (args.model.latent_width[-1], hw_size, hw_size)
elif args.model.name == 'context_ladder':
z_L_size = (args.dataset.image_size[0], args.model.ctx_size, args.model.ctx_size)
if 'Diffusion' in args.model.decoder.z_L_prior._target_:
args.model.decoder.z_L_prior.model.image_size = z_L_size[1]
args.model.decoder.z_L_prior.model.in_channels = z_L_size[0]
args.model.decoder.z_L_prior.model.out_channels = z_L_size[0]
if args.model.decoder.z_L_prior.parametrization != 'eps':
args.model.decoder.z_L_prior.model.out_channels *= 2
else:
args.model.decoder.z_L_prior.size = z_L_size
return args
def init_model(args, train_loader):
model = instantiate(args.model)
if 'context' in args.model.name:
model.decoder.init_dct_normalization(train_loader)
ema_model = None
if args.train.ema_rate > 0:
ema_model = instantiate(args.model)
model_params = copy.deepcopy(model.state_dict())
ema_model.load_state_dict(model_params)
ema_model.requires_grad_(False)
return model, ema_model
def load_from_checkpoint(args, model, ema_model, optimizer, scheduler, scaler=None):
chpt = get_checkpoint(args.wandb.setup,
idx=args.train.resume_id,
device='cpu'
)
args.train.start_epoch = chpt['epoch']
model.load_state_dict(chpt['model_state_dict'])
if chpt['ema_model_state_dict'] is not None:
ema_model.load_state_dict(chpt['ema_model_state_dict'])
optimizer.load_state_dict(chpt['optimizer_state_dict'])
if scheduler is not None:
scheduler.load_state_dict(chpt['scheduler_state_dict'])
optimizer_to(optimizer, args.train.device)
if scaler is not None:
scaler.load_state_dict(chpt['scaler_state_dict'])
return args, model, ema_model, optimizer, scheduler, scaler
def compute_params(model, args):
# add network size
vae = model.module if args.train.ddp else model
num_param = sum(p.numel() for p in vae.parameters() if p.requires_grad)
enc_param = sum(p.numel() for p in vae.encoder.parameters() if p.requires_grad)
dec_param = sum(p.numel() for p in vae.decoder.parameters() if p.requires_grad)
prior_param = sum(
p.numel() for p in vae.decoder.z_L_prior.parameters() if p.requires_grad
)
wandb.run.summary['num_parameters'] = num_param
wandb.run.summary['encoder_parameters'] = enc_param
wandb.run.summary['decoder_parameters'] = dec_param
wandb.run.summary['prior_parameters'] = prior_param
@hydra.main(version_base="1.2", config_path="configs", config_name="defaults.yaml")
def run(args: omegaconf.DictConfig) -> None:
if args.train.device[-1] == '0':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
args.train.device = 'cuda'
elif args.train.device[-1] == '1':
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
args.train.device = 'cuda'
if args.train.ddp:
args = setup_mpi(args)
# infer z_L size, update the prior params
args = compute_z_L_size(args)
# Set the seed
torch.manual_seed(args.train.seed)
torch.cuda.manual_seed(args.train.seed)
np.random.seed(args.train.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
wandb_cfg = omegaconf.OmegaConf.to_container(
args, resolve=True, throw_on_missing=True
)
pprint(wandb_cfg)
# ------------
# data
# ------------
dset_params = {
'root': os.path.join(hydra.utils.get_original_cwd(), 'datasets/')
}
if args.train.ddp:
dset_params['ddp'] = True
dset_params['mpi_size'] = args.mpi_size
dset_params['rank'] = args.rank
if 'context' in args.model.name:
dset_params['ctx_size'] = args.model.ctx_size
data_module = instantiate(args.dataset.data_module, **dset_params)
data_module.setup('fit')
train_loader = data_module.train_dataloader()
val_loader = data_module.val_dataloader()
# ------------
# model & optimizer
# ------------
model, ema_model = init_model(args, train_loader)
optimizer = instantiate(args.train.optimizer, params=model.parameters())
scheduler = None
if hasattr(args.train, "scheduler"):
scheduler = instantiate(args.train.scheduler, optimizer=optimizer)
if args.train.use_amp:
scaler = torch.cuda.amp.GradScaler()
else:
scaler = None
if args.train.resume_id is not None:
print(f'Resume training {args.train.resume_id}')
args, model, ema_model, optimizer, scheduler, scaler = \
load_from_checkpoint(args, model, ema_model, optimizer, scheduler, scaler)
model.train()
if args.train.ddp:
model = model.cuda(args.local_rank)
model = DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
if ema_model is not None:
ema_model = ema_model.cuda(args.local_rank)
else:
model.to(args.train.device)
if ema_model is not None:
ema_model.to(args.train.device)
# ------------
# logging
# ------------
wandb.require("service")
if is_main_process():
if args.wandb.api_key is not None:
os.environ["WANDB_API_KEY"] = args.wandb.api_key
tags = [
'train_vae',
args.dataset.name,
args.model.name,
]
if args.train.resume_id is not None:
wandb.init(
**args.wandb.setup,
id=args.train.resume_id,
resume='must',
settings=wandb.Settings(start_method="thread")
)
else:
wandb.init(
**args.wandb.setup,
config=wandb_cfg,
group=f'{args.model.name}_{args.dataset.name}' if args.wandb.group is None else args.wandb.group,
tags=tags,
dir=hydra.utils.get_original_cwd(),
settings=wandb.Settings(start_method="thread")
)
wandb.watch(model, **args.wandb.watch)
# define our custom x axis metric
wandb.define_metric("epoch")
for pref in ['train', 'val', 'z_L_prior', 'ladder_sample', 'ladder', 'misc',
'pic']:
wandb.define_metric(f"{pref}/*", step_metric="epoch")
wandb.define_metric("val/loss", summary="min", step_metric="epoch")
# add network size
compute_params(model, args)
if args.train.ddp:
dist.barrier()
# ------------
# training & testing
# ------------
# train
trainer.train(args.train, train_loader, val_loader, model, optimizer, scheduler, ema_model, scaler)
# save the best model
if is_main_process():
if osp.exists(osp.join(wandb.run.dir, 'last_chpt.pth')):
chpt = torch.load(osp.join(wandb.run.dir, 'last_chpt.pth'))
else:
chpt = get_checkpoint(args.wandb.setup, idx=args.train.resume_id, device=args.train.device)
model, ema_model = init_model(args, train_loader)
model.load_state_dict(chpt['model_state_dict'])
model.to(args.train.device)
if ema_model is not None:
ema_model.load_state_dict(chpt['ema_model_state_dict'])
ema_model.to(args.train.device)
# test
data_module.setup('test')
tester.test(args.train,
data_module.test_dataloader(),
model if ema_model is None else ema_model,
)
print('Test finished')
wandb.finish()
# if args.train.ddp:
# print('Cleanup')
# cleanup()
if __name__ == "__main__":
run()
| 9,117 | 36.065041 | 113 | py |
dct_vae | dct_vae-main/datasets/mnists.py | import numpy as np
import torch
from torch.utils.data import random_split, DataLoader, Dataset
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
from torchvision import datasets
import hydra
import os
from utils.distributed_training import get_rank, mpi_size, is_main_process
from datasets.dct import DCT_dataset
# fix mnist download problem (https://github.com/pytorch/vision/issues/1938)
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
class BernoulliSample:
def __call__(self, x):
return torch.bernoulli(x)
class MNIST:
def __init__(self, batch_size, test_batch_size, model, ctx_size, root, mode, ddp=False, mpi_size=None, rank=None):
self.root = root
self.ddp = ddp
self.mpi_size = mpi_size
self.rank = rank
self.transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
self.test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5),
# BernoulliSample()
])
self.batch_size = batch_size
self.test_batch_size = test_batch_size
self.model = model
self.ctx_size = ctx_size
self.mode = mode
self.prepare_data()
def prepare_data(self):
datasets.MNIST(self.root, train=True, download=True)
datasets.MNIST(self.root, train=False, download=True)
def setup(self, stage=None):
if stage == 'fit' or stage is None:
mnist_full = datasets.MNIST(self.root, train=True, transform=self.transforms)
if 'context' in self.model:
mnist_full = DCT_dataset(mnist_full, self.ctx_size, mode=self.mode)
self.train, self.val = random_split(mnist_full, [55000, 5000])
if stage == 'test' or stage is None:
self.test = datasets.MNIST(self.root, train=False, transform=self.test_transforms)
if 'context' in self.model:
self.test = DCT_dataset(self.test, self.ctx_size, mode=self.mode)
def train_dataloader(self):
params = {
'pin_memory': True,
'drop_last': True
}
if self.ddp:
train_sampler = DistributedSampler(self.train, shuffle=True,
num_replicas=mpi_size(),
rank=get_rank())
params['sampler'] = train_sampler
else:
params['shuffle'] = True
params['num_workers'] = 0
return DataLoader(self.train, self.batch_size, **params)
def val_dataloader(self):
params = {
'pin_memory': True,
'drop_last': False
}
if self.ddp:
val_sampler = DistributedSampler(self.val,
shuffle=False,
num_replicas=mpi_size(),
rank=get_rank())
params['sampler'] = val_sampler
else:
params['shuffle'] = True
params['num_workers'] = 0
return DataLoader(self.val, self.test_batch_size, **params)
def test_dataloader(self):
num_workers = 0
if is_main_process():
return DataLoader(self.test, self.test_batch_size,
num_workers=num_workers, shuffle=False, pin_memory=True)
return None
class FashionMNIST(MNIST):
def __init__(self, batch_size, test_batch_size, model, ctx_size, root, mode):
super().__init__(batch_size, test_batch_size, model, ctx_size, root, mode)
def prepare_data(self):
datasets.FashionMNIST(self.root, train=True, download=True)
datasets.FashionMNIST(self.root, train=False, download=True)
def setup(self, stage=None):
if stage == 'fit' or stage is None:
fmnist_full = datasets.FashionMNIST(self.root, train=True,
transform=self.transforms)
if 'context' in self.model:
fmnist_full = DCT_dataset(fmnist_full, self.ctx_size, mode=self.mode)
self.train, self.val = random_split(fmnist_full, [55000, 5000])
if stage == 'test' or stage is None:
self.test = datasets.FashionMNIST(self.root, train=False,
transform=self.test_transforms)
if 'context' in self.model:
self.test = DCT_dataset(self.test, self.ctx_size, mode=self.mode)
| 4,715 | 37.032258 | 118 | py |
dct_vae | dct_vae-main/datasets/omniglot.py | import torch
from torchvision import datasets
from torchvision.datasets.mnist import read_image_file, read_label_file
from torch.utils.data import random_split, TensorDataset, Dataset
from PIL import Image
import os
from torchvision import transforms
import urllib
from scipy.io import loadmat
from datasets.mnists import MNIST
from datasets.dct import DCT_dataset
class omniglot_dset(Dataset):
def __init__(self, root, train=True, transform=None, download=False):
super(omniglot_dset, self).__init__()
self.root = root
self.train = train
self.transform = transform
self.download_omniglot()
omni = loadmat(os.path.join(self.processed_folder, 'chardata.mat'))
if self.train:
self.data = 255 * omni['data'].astype('float32').reshape(
(28, 28, -1)).transpose((2, 1, 0))
else:
self.data = 255 * omni['testdata'].astype('float32').reshape(
(28, 28, -1)).transpose((2, 1, 0))
self.data = self.data.astype('uint8')
print(self.data.shape)
def download_omniglot(self):
filename = 'chardata.mat'
dir = self.processed_folder
if not os.path.exists(dir):
os.mkdir(dir)
url = 'https://raw.github.com/yburda/iwae/master/datasets/OMNIGLOT/chardata.mat'
filepath = os.path.join(dir, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(url, filepath)
print('Downloaded', filename)
def __getitem__(self, index: int):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], 0
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return self.data.shape[0]
@property
def processed_folder(self) -> str:
return os.path.join(self.root, 'OMNIGLOT')
@property
def raw_folder(self) -> str:
return os.path.join(self.root, 'OMNIGLOT')
class OMNIGLOT(MNIST):
def __init__(self, batch_size, test_batch_size, model, ctx_size, root, mode, ddp=False, mpi_size=None, rank=None):
super(OMNIGLOT, self).__init__(batch_size, test_batch_size, model, ctx_size, root, mode, ddp, mpi_size, rank)
self.transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
self.test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5),
])
def prepare_data(self):
# download_omniglot(self.root)
omniglot_dset(self.root, train=True, download=True)
omniglot_dset(self.root, train=False, download=True)
def setup(self, stage=None):
if stage == 'fit' or stage is None:
omniglot_full = omniglot_dset(self.root, train=True, transform=self.transforms)
N = len(omniglot_full)
print(f'{N} training images')
if 'context' in self.model:
omniglot_full = DCT_dataset(omniglot_full, self.ctx_size, mode=self.mode)
self.train, self.val = random_split(omniglot_full, [N-1000, 1000])
if stage == 'test' or stage is None:
self.test = omniglot_dset(self.root, train=False, transform=self.transforms)
print(f'{len(self.test)} test images')
if 'context' in self.model:
self.test = DCT_dataset(self.test, self.ctx_size, mode=self.mode)
| 3,674 | 33.669811 | 118 | py |
dct_vae | dct_vae-main/datasets/cifar10.py | import numpy as np
import torch
from torch.utils.data import random_split
from torchvision import transforms
from torchvision import datasets
import hydra
import os
from datasets.mnists import MNIST
from datasets.dct import DCT_dataset
from datasets.svhn import Normalize
class CIFAR10(MNIST):
def __init__(self, batch_size, test_batch_size, model, ctx_size, root, mode, ddp=False, mpi_size=None, rank=None):
super().__init__(batch_size, test_batch_size, model, ctx_size, root, mode, ddp, mpi_size, rank)
self.transforms = transforms.Compose([
Normalize(dequant=False),
transforms.RandomHorizontalFlip(),
])
self.test_transforms = transforms.Compose([
Normalize(dequant=False)
])
def prepare_data(self):
datasets.CIFAR10(self.root, train=True, download=True)
datasets.CIFAR10(self.root, train=False, download=True)
def setup(self, stage=None):
if stage == 'fit' or stage is None:
cifar_full = datasets.CIFAR10(self.root, train=True, transform=self.transforms)
cifar_full.processed_folder = os.path.join(self.root, cifar_full.base_folder)
if 'context' in self.model:
cifar_full = DCT_dataset(cifar_full, self.ctx_size, mode=self.mode)
N = len(cifar_full)
self.train, self.val = random_split(cifar_full, [N-5000, 5000])
if stage == 'test' or stage is None:
self.test = datasets.CIFAR10(self.root, train=False, transform=self.test_transforms)
self.test.processed_folder = os.path.join(self.root, self.test.base_folder)
if 'context' in self.model:
self.test = DCT_dataset(self.test, self.ctx_size, mode=self.mode)
| 1,765 | 39.136364 | 118 | py |
dct_vae | dct_vae-main/datasets/svhn.py | import numpy as np
import torch
from torch.utils.data import random_split
from torchvision import transforms
from torchvision import datasets
import os
from datasets.mnists import MNIST
from datasets.dct import DCT_dataset
from PIL import Image
class Normalize:
def __init__(self, dequant=False, num_bits=8):
self.dequant = dequant
self.num_bits = num_bits
def __call__(self, x):
x = torch.FloatTensor(np.asarray(x, dtype=np.float32)).permute(2, 0, 1)
# shift_loss = -127.5
# scale_loss = 1. / 127.5
# dequantize and scale to [0, 1]
if self.dequant:
x = (x + torch.rand_like(x).detach()) / (2 ** self.num_bits)
x = 2 * x - 1
else:
x = (x - 127.5)/127.5
# x = x / (2 ** self.num_bits - 1) #[0, 255] -> [0, 1]
# map to [-1, 1]
# return 2*x - 1
return x
class svhn(datasets.SVHN):
def __init__(self, root, split, transform=None, download=False):
root = os.path.join(root, 'SVHN')
super(svhn, self).__init__(root, split=split, transform=transform, download=download)
self.train = False
if split == 'train':
self.train = True
@property
def processed_folder(self) -> str:
return self.root
class SVHN(MNIST):
def __init__(self, batch_size, test_batch_size, model, ctx_size, root, mode, ddp=False, mpi_size=None, rank=None):
super().__init__(batch_size, test_batch_size, model, ctx_size, root, mode, ddp, mpi_size, rank)
self.transforms = transforms.Compose([
Normalize(dequant=False)
])
self.test_transforms = transforms.Compose([
Normalize(dequant=False)
])
def prepare_data(self):
svhn(self.root, split='train', download=True)
svhn(self.root, split='test', download=True)
def setup(self, stage=None):
if stage == 'fit' or stage is None:
svhn_full = svhn(self.root, split='train', transform=self.transforms)
if 'context' in self.model:
svhn_full = DCT_dataset(svhn_full, self.ctx_size, mode=self.mode)
N = 73257
self.train, self.val = random_split(svhn_full, [N-5000, 5000])
if stage == 'test' or stage is None:
self.test = svhn(self.root, split='test', transform=self.test_transforms)
if 'context' in self.model:
self.test = DCT_dataset(self.test, self.ctx_size, mode=self.mode)
| 2,508 | 32.453333 | 118 | py |
dct_vae | dct_vae-main/datasets/dct.py | import math
import os
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torch.nn.functional as F
def RGB_to_YCBCR(x):
# [-1, 1] to [0, 1]
x = (x+1)/2
# PIL image
x_pil = transforms.ToPILImage(mode='RGB')(x)
# convert to YCbCr
x_y = np.array(x_pil.convert('YCbCr'))
# map to [0, 1]
x_y = torch.FloatTensor(x_y).permute(2, 0, 1) / 255.
return x_y * 2 - 1
def YCBCR_to_RGB(x):
MB, _, h, w = x.shape
tr_mat = torch.FloatTensor([[1, 0, 1.5748],
[1, -0.1873, -0.4681],
[1, 1.8556, 0]]).unsqueeze(0)
tr_mat = tr_mat.to(x.device)
x_rgb = torch.clamp(torch.matmul(tr_mat.repeat(MB, 1, 1),
x.reshape(MB, 3, -1)).reshape(MB, 3, h, w), -1, 1)
return x_rgb
# def YCBCR_to_RGB(x):
# # [-1, 1] to [0, 1]
# x = (x+1)/2
# # PIL image
# x_pil = transforms.ToPILImage(mode='YCbCr')(x)
# # convert to YCbCr
# x_y = np.array(x_pil.convert('RGB'))
# # map to [0, 1]
# x_y = torch.FloatTensor(x_y).permute(2, 0, 1) / 255.
# return x_y * 2 - 1
class DCT(nn.Module):
def __init__(self, im_width=8, im_height=8, mode='RGB', type='old'):
super(DCT, self).__init__()
self.mode = mode
self.type = type
assert type in ['old', 'ortho']
# forward DCT
A_w = math.pi * (torch.arange(0, im_width) + 0.5) / im_width
A_h = math.pi * (torch.arange(0, im_height) + 0.5) / im_height
ints_w = torch.arange(0, im_width)
ints_h = torch.arange(0, im_height)
self.basis_function_w = nn.Parameter(
torch.cos(torch.einsum('ij,jk->ik', ints_w.unsqueeze(1), A_w.unsqueeze(0))),
requires_grad=False
)
self.basis_function_h = nn.Parameter(
torch.cos(torch.einsum('ij,jk->ik', ints_h.unsqueeze(1), A_h.unsqueeze(0))),
requires_grad=False
)
# inverse DCT
B_w = (math.pi * (torch.arange(0, im_width) + 0.5) / im_width)
B_h = (math.pi * (torch.arange(0, im_height) + 0.5) / im_height)
indx_w = torch.arange(1, im_width)
indx_h = torch.arange(1, im_height)
self.reverse_function_w = nn.Parameter(
torch.cos(torch.einsum('ij,jk->ik', B_w.unsqueeze(1), indx_w.unsqueeze(0))),
requires_grad=False
)
self.reverse_function_h = nn.Parameter(
torch.cos(torch.einsum('ij,jk->ik', B_h.unsqueeze(1), indx_h.unsqueeze(0))),
requires_grad=False
)
def dct2(self, x):
assert len(x.shape) == 4
# covert RGB to YCbCr if required
if self.mode == 'YCbCr':
ims = []
for im in x:
ims.append(RGB_to_YCBCR(im))
x = torch.stack(ims, 0)
# map to [0, 1]
if x.min() < 0:
x = 0.5 * (x + 1)
# x - B x C x H x W
s = x.shape
# X - B*C*H x W
f = 2. * torch.einsum('ij,jk->ik', x.reshape(s[0]*s[1]*s[2], s[3]),
self.basis_function_w.t())
if self.type == 'ortho':
# normalize
w = self.basis_function_w.shape[1]
f[:, 0] = f[:, 0] / math.sqrt(4 * w)
f[:, 1:] = f[:, 1:] / math.sqrt(2 * w)
# B*C*H x W -> B*C*W x H
f = f.reshape(*s).permute(0, 1, 3, 2).reshape(s[0]*s[1]*s[3], s[2])
F = 2. * torch.einsum('ij,jk->ik', f, self.basis_function_h.t())
if self.type == 'ortho':
# normalize
h = self.basis_function_h.shape[1]
F[:, 0] = F[:, 0] / math.sqrt(4 * h)
F[:, 1:] = F[:, 1:] / math.sqrt(2 * h)
F = F.reshape(s[0], s[1], s[3], s[2]).permute(0, 1, 3, 2)
return F
def idct2(self, x):
assert len(x.shape) == 4
# x - B x C x H x W
s = x.shape
# X - B*C*H x W
x = x.reshape(s[0]*s[1]*s[2], s[3])
if self.type == 'old':
f = (x[:, [0]] + 2. * torch.einsum('ij,jk->ik', x[:, 1:], self.reverse_function_w.t())) / (2.*s[3])
elif self.type == 'ortho':
f = (x[:, [0]] + math.sqrt(2.) * torch.einsum('ij,jk->ik', x[:, 1:],
self.reverse_function_w.t())) / (math.sqrt(s[3]))
# B*C*H x W -> B*C*W x H
f = f.reshape(*s).permute(0, 1, 3, 2).reshape(s[0]*s[1]*s[3], s[2])
if self.type == 'old':
F = (f[:, [0]] + 2. * torch.einsum('ij,jk->ik', f[:, 1:], self.reverse_function_h.t())) / (2.*s[2])
elif self.type == 'ortho':
F = (f[:, [0]] + math.sqrt(2.) * torch.einsum('ij,jk->ik', f[:, 1:],
self.reverse_function_h.t())) / (math.sqrt(s[2]))
F = F.reshape(s[0], s[1], s[3], s[2]).permute(0, 1, 3, 2)
return F
class DCT_dataset(Dataset):
def __init__(self, base_dataset, ctx_size, mode='RGB'):
self.base_dataset = base_dataset
self.ctx_size = ctx_size
_, h, w = base_dataset[0][0].shape
self.dct = DCT(h, w, mode=mode)
self.x_dim = h
assert mode in ['RGB', 'YCbCr', 'BW'], f'Mode should be BW, RGB or YCbCr, got {mode} instead'
self.mode = mode
if hasattr(self.base_dataset, 'train'):
self.file_name = f'training_dct_{mode}.pt' if self.base_dataset.train else f'test_dct_{mode}.pt'
elif hasattr(self.base_dataset, 'split'):
if self.base_dataset.split == 'train':
self.file_name = f'training_dct_{mode}.pt'
elif self.base_dataset.split == 'valid':
self.file_name = f'valid_dct_{mode}.pt'
else:
self.file_name = f'test_dct_{mode}.pt'
path = os.path.join(self.base_dataset.processed_folder, self.file_name)
if os.path.exists(path):
self.dct_data = torch.load(path)
else:
self.dct_data = self.calculate_dct()
self.preprocess_dct()
def preprocess_dct(self):
# crop the context
self.dct_data = self.dct_data[:, :, :self.ctx_size, :self.ctx_size]
# compute stats for normalization
self.mean = self.dct_data.mean(0)
self.std = self.dct_data.std(0)
self.scale = torch.floor(self.dct_data).abs().max(0)[0]
def calculate_dct(self):
dloader = DataLoader(self.base_dataset, batch_size=1, drop_last=False, shuffle=False)
res = []
for x, _ in dloader:
res.append(self.dct.dct2(x))
all_dcts = torch.cat(res)
if not os.path.exists(self.base_dataset.processed_folder):
os.makedirs(self.base_dataset.processed_folder)
torch.save(all_dcts,
os.path.join(self.base_dataset.processed_folder, self.file_name))
return all_dcts
def __len__(self):
return len(self.base_dataset)
def __getitem__(self, item):
batch = self.base_dataset[item]
x = batch[0]
x_dct = self.dct.dct2(x.unsqueeze(0))[0, :, :self.ctx_size, :self.ctx_size]
batch += (x_dct, )
return batch
| 7,236 | 36.497409 | 111 | py |
dct_vae | dct_vae-main/utils/tester.py | import torch
import numpy as np
import wandb
from tqdm import tqdm
def test(args, loader, model):
model.eval()
history = {}
with torch.no_grad():
for batch_idx, batch in tqdm(enumerate(loader)):
for i in range(len(batch)):
# batch[i] = batch[i].to(args.device)
batch[i] = batch[i].cuda(non_blocking=True)
# calculate VAE Loss
logs = model.test_step(batch, compute_fid=args.compute_fid)
# update running sums
for k in logs.keys():
if f'test/{k}' not in history.keys():
history[f'test/{k}'] = 0.
history[f'test/{k}'] += logs[k]
# divide by num points
for k in history.keys():
history[k] /= len(loader.dataset)
# compute fid
if args.compute_fid:
print('Compute FID')
history['test/fid'] = model.fid.compute()
# bpd
size_coef = args.image_size[0]*args.image_size[1]*args.image_size[2]
bpd_coeff = 1. / np.log(2.) / size_coef
history['test/bpd'] = history['test/nll'] * bpd_coeff
# get random samples
history['test/X'] = wandb.Image(batch[0][:100])
# add reconstructions
plot_rec = model(batch)[0]
plot_rec = plot_rec.data.cpu()[:100]
history['test/Recon'] = wandb.Image(plot_rec)
# add samples
for temp in [1., 0.8, 0.6, 0.4]:
sample = model.generate_x(100, t=temp)
history[f'test/Samples (t={temp})'] = wandb.Image(sample)
# save metrics
print('Save Metrics')
wandb.log(history)
| 1,662 | 29.796296 | 76 | py |
dct_vae | dct_vae-main/utils/notebook_helpers.py | import os
import wandb
import torch
import numpy as np
from torchvision import transforms
from torchvision.transforms.functional import to_pil_image
from PIL import Image
from utils.wandb import api, get_checkpoint
nice_fonts = {
"text.usetex": True,
"font.family": "serif",
"font.serif" : "Times New Roman",
"font.size": 34,
"lines.linewidth":3
}
save_fig_opt = {
'dpi': 800,
'transparent':True,
'bbox_inches':'tight',
'pad_inches': 0
}
USER = 'anna_jakub'
PROJECT = 'context_vae'
def get_vals(i, filename):
run_pth = os.path.join(USER, PROJECT, i)
run = api.run(run_pth)
file = wandb.restore(f'{filename}.pt', run_path=run_pth, replace=True,
root='_loaded/')
return torch.load(file.name, map_location='cpu')
def line_with_std(arr, ax, label='', plot_std=True):
m = arr.mean(0)
s = arr.std(0)
n_sq = np.sqrt(arr.shape[0])
ax.plot(m, label=label)
if plot_std:
ax.fill_between(range(len(m)), y1=m - 2 * s/ n_sq, y2=m + 2 * s / n_sq, alpha=0.2)
ax.grid();
def get_im(tensor):
return tensor.permute(1, 2, 0).detach().numpy()
def get_psnr(mse, max_val=1.):
if not isinstance(mse, torch.Tensor):
mse = torch.from_numpy(mse)
psnr = 20 * torch.log10(max_val / torch.sqrt(mse))
return psnr
def get_jpeg_compressed(idx, quality):
image = transforms.ToTensor()(Image.open(f'datasets/kodak/kodim{idx}.png'))
file = 'compressed.jpg'
# map to [0, 255.]
image = 255. * image
pil_im = image.squeeze().permute(1, 2, 0).detach().numpy().astype(np.uint8)
pil_im = to_pil_image(pil_im)
pil_im.save(file, subsampling=0, quality=int(quality))
image_compressed = torch.FloatTensor(np.array(Image.open(file))).permute(2, 0, 1)
image_compressed = image_compressed / 255.
return image / 255., image_compressed
def get_full_bpp(idx):
idx += 1
if idx < 10:
idx = '0' + str(idx)
file = f'datasets/kodak/kodim{idx}.png'
image = transforms.ToTensor()(Image.open(f'datasets/kodak/kodim{idx}.png'))
bpp = (os.path.getsize(file) * 8.) / (
image.shape[1] * image.shape[2])
return bpp | 2,185 | 25.337349 | 90 | py |
dct_vae | dct_vae-main/utils/distribution.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
# from utils.flow_layers import AffineCoupling1d, AffineCoupling2d
from utils.nn import Siren
# from utils.arm_layers import CausalConv1d, GatedResidualLayer
class Distribution(nn.Module):
def __init__(self):
super(Distribution, self).__init__()
def log_prob(self, x):
raise NotImplementedError
def sample(self, N=1, t=None):
raise NotImplementedError
class Normal(Distribution):
def __init__(self, mu, log_var, *args, **kwargs):
super(Normal, self).__init__()
self.mu = mu
self.log_var = log_var
def log_prob(self, x, reduce_dim=True):
MB = x.shape[0]
if len(x.shape) > len(self.mu.shape):
MB = x.shape[0]
log_p = -0.5 * (math.log(2.0*math.pi) +
self.log_var +
torch.pow(x - self.mu, 2) / (torch.exp(self.log_var) + 1e-10))
if reduce_dim:
return log_p.reshape(MB, -1).sum(1)
else:
return log_p.reshape(MB, -1)
def sample(self, N=None, t=None):
size = self.mu.shape
if N is not None:
size = torch.Size([N]) + size
z_sample = torch.empty(size, device=self.mu.device)
if t is not None:
sigma = (0.5 * (self.log_var + torch.ones_like(self.log_var) * math.log(t))).exp()
else:
sigma = (0.5*self.log_var).exp()
eps = z_sample.normal_()
return self.mu + sigma*eps
def update(self, delta_mu, delta_logvar):
self.mu = self.mu + delta_mu
self.log_var = self.log_var + delta_logvar
def get_E(self):
return self.mu
def entropy(self):
c = 1 + math.log(math.pi*2)
return 0.5 * (c + self.log_var).sum()
def kl(self, dist):
"""
compute kl-divergence with the given distribution
"""
assert isinstance(dist, Normal), 'Can only compute analytical kl for gaussians'
log_v_r = dist.log_var - self.log_var
mu_r_sq = (self.mu - dist.mu) ** 2
kl = 0.5 * (-1 + log_v_r + (self.log_var.exp() + mu_r_sq) / dist.log_var.exp())
return kl
def create_standard_normal_prior(size):
size = list(size)
mu = nn.Parameter(torch.zeros(size), requires_grad=False)
logvar = nn.Parameter(torch.zeros(size), requires_grad=False)
return Normal(mu, logvar)
def create_gaussian_prior(size):
size = list(size)
mu = nn.Parameter(torch.zeros(size), requires_grad=True)
logvar = nn.Parameter(torch.randn(size)*0.01, requires_grad=True)
return Normal(mu, logvar)
class Delta(Distribution):
def __init__(self, x):
self.x = x
def log_prob(self, x, reduce_dim=True):
out = torch.zeros(x.shape, device=x.device).reshape(x.shape[0], -1)
if reduce_dim:
out = out.sum(1)
return out
def sample(self, N=None):
x_sample = self.x.clone()
if N is not None:
size = torch.Size([N]) + self.x.size()
x_sample = x_sample.unsqueeze(0).repeate(size)
return x_sample
def get_E(self):
return self.x
class Bernoulli(Distribution):
def __init__(self, p, *args, **kwargs):
super(Bernoulli, self).__init__()
self.p = torch.clamp(p, min=1e-7, max=1.-1e-7)
def log_prob(self, x):
MB = x.shape[0]
assert torch.max(x).item() <= 1.0 and torch.min(x).item() >= 0.0
log_p = x * torch.log(self.p) + (1. - x) * torch.log(1. - self.p)
return log_p.reshape(MB, -1).sum(1)
def sample(self, N=None):
p = self.p
if N is not None:
p = p.unsqueeze(0).repeat([N] + [1 for _ in range(len(p.shape))])
return torch.bernoulli(p)
def get_E(self):
return self.p
class Logistic256(Distribution):
def __init__(self, mean, var, *args, **kwargs):
super(Logistic256, self).__init__()
self.mean = mean
softplus = nn.Softplus(0.4)
self.log_var = torch.log(softplus(torch.clamp(var, min=-20.)))
def log_prob(self, x, low_bit=False):
assert x.min() >= -1. and x.max() <= 1.
# rescale x to [-1, 1] if needed
if x.min() >= 0:
x = 2. * x - 1
if low_bit:
max_bit = 31.
else:
max_bit = 255.
centered = x - self.mean # B, C, H, W
inv_stdv = torch.exp(- self.log_var)
# each pixel has a bin of width 2/n_bit -> half of the bin is 1/n_bit
plus_in = inv_stdv * (centered + 1. / max_bit)
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered - 1. / max_bit)
cdf_min = torch.sigmoid(min_in)
# probability to be in the bin
# cdf_delta = cdf_plus - cdf_min
cdf_delta = torch.clamp(cdf_plus - cdf_min, min=1e-10)
log_probs = torch.log(cdf_delta)
# for pixel 0 we have -\inf instead of min_in
log_cdf_plus = plus_in - F.softplus(plus_in)
pix_0 = -1. + 1./max_bit
log_probs = torch.where(x <= pix_0,
log_cdf_plus,
log_probs)
# for pixel 255 we have \inf instead of plus_in
log_one_minus_cdf_min = -F.softplus(min_in)
pix_255 = 1. - 1./max_bit
log_probs = torch.where(x >= pix_255,
log_one_minus_cdf_min,
log_probs)
log_probs = log_probs.sum(dim=[1, 2, 3]) # MB
return log_probs
def sample(self, N=None, t=None):
size = self.mean.shape
if N is not None:
size = torch.Size([N]) + size
u = torch.Tensor(size).uniform_(1e-5, 1. - 1e-5)
u = u.to(self.mean.device)
if t is not None:
scale = torch.exp(self.log_var + torch.ones_like(self.log_var) * math.log(t))
else:
scale = torch.exp(self.log_var)
x = self.mean + scale * (torch.log(u) - torch.log(1. - u))
return x
def get_E(self):
return self.mean
def entropy(self):
return self.logvar + 2
class MixtureLogistic256(Distribution):
# Using the implementations from
# https://github.com/Rayhane-mamah/Efficient-VDVAE/blob/main/efficient_vdvae_torch/model/losses.py
# https://github.com/openai/vdvae/blob/ea35b490313bc33e7f8ac63dd8132f3cc1a729b4/vae_helpers.py
def __init__(self, logit_probs, mean, log_var): #, coeffs):
super(MixtureLogistic256, self).__init__()
self.logit_probs = logit_probs # MB, M, H, W
self.data_ch = 3
mb, self.num_mix, h, w = logit_probs.shape
self.means = mean # MB, 3, M, H, W
softplus = nn.Softplus(0.4)
self.log_var = torch.log(softplus(torch.clamp(log_var, min=-20.))) # MB, 3, M, H, W
def log_prob(self, x, low_bit=False):
assert x.min() >= -1. and x.max() <= 1.
# rescale x to [-1, 1] if needed
if x.min() >= 0:
x = 2. * x - 1
if low_bit:
max_bit = 31.
else:
max_bit = 255.
x = x.unsqueeze(2) # MB, 3, 1, H, W
centered_x = x - self.means # B, C, M, H, W
inv_stdv = torch.exp(-self.log_var)
# each pixel has a bin of width 2/n_bit -> half of the bin is 1/n_bit
plus_in = inv_stdv * (centered_x + 1. / max_bit)
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1. / max_bit)
cdf_min = torch.sigmoid(min_in)
# probability to be in the bin
cdf_delta = torch.clamp(cdf_plus - cdf_min, min=1e-10)
log_probs = torch.log(cdf_delta)
# for pixel 0 we have -\inf instead of min_in
log_cdf_plus = plus_in - F.softplus(plus_in)
pix_0 = -1. + 1./max_bit
log_probs = torch.where(x.repeat(1, 1, self.num_mix, 1, 1) <= pix_0,
log_cdf_plus,
log_probs)
# for pixel 255 we have \inf instead of plus_in
log_one_minus_cdf_min = -F.softplus(min_in)
pix_255 = 1. - 1./max_bit
log_probs = torch.where(x.repeat(1, 1, self.num_mix, 1, 1) >= pix_255,
log_one_minus_cdf_min,
log_probs)
# MB x M x H x W
log_probs = torch.sum(log_probs, dim=1) + F.log_softmax(self.logit_probs, dim=1)
# now get rid of the mixtures with log sum exp
log_probs = torch.logsumexp(log_probs, 1) # MB x H x W
log_probs = log_probs.sum(dim=[1, 2]) # MB
return log_probs
def sample(self, t=None):
# sample mixture num
eps = torch.empty_like(self.logit_probs).uniform_(1e-5, 1. - 1e-5) # MB, M, H, W
amax = torch.argmax(self.logit_probs - torch.log(-torch.log(eps)), dim=1)
sel = one_hot(amax, self.logit_probs.size()[1], dim=1, device=self.means.device).unsqueeze(1) # MB, 1, M, H, W
# select logistic parameters -> MB, 3, H, W
means = (self.means * sel).sum(2)
log_scales = (self.log_var * sel).sum(2)
if t is not None:
log_scales = log_scales + torch.ones_like(self.log_scales) * math.log(t)
# sample from logistic & clip to interval
u = torch.empty_like(means).uniform_(1e-5, 1. - 1e-5)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))
return torch.clamp(x, -1, 1)
def get_E(self):
raise NotImplementedError
def one_hot(indices, depth, dim, device):
"""
https://github.com/Rayhane-mamah/Efficient-VDVAE/blob/cfecc7b1776b85d09d9336f07a6b886c3ca8e486/efficient_vdvae_torch/utils/utils.py#L43
"""
indices = indices.unsqueeze(dim)
size = list(indices.size())
size[dim] = depth
y_onehot = torch.zeros(size, device=device)
y_onehot.zero_()
y_onehot.scatter_(dim, indices, 1)
return y_onehot
| 9,955 | 32.521886 | 139 | py |
dct_vae | dct_vae-main/utils/nn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.thirdparty.blurpool import BlurPool
class Siren(nn.Module):
def __init__(self):
super(Siren, self).__init__()
self.w_0 = nn.Parameter(torch.ones(1), requires_grad=True)
def forward(self, x):
return torch.sin(self.w_0 * x)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
class ConvBlock(nn.Module):
"""
if forward true:
BatchNorm (if needed) + Activation + Convolution
else:
Convolution + BatchNorm (if needed) + Activation
"""
def __init__(self, in_ch, out_ch, kernel_size, stride=1, padding=0, dilation=1, groups=1,
act=nn.ReLU(), weight_norm=False, batch_norm=False, forward=True):
super(ConvBlock, self).__init__()
conv = nn.Conv2d(in_ch, out_ch, kernel_size, stride=1, padding=padding,
dilation=dilation, groups=groups)
if weight_norm:
conv = nn.utils.weight_norm(conv)
net = []
if forward:
if batch_norm:
net.append(nn.BatchNorm2d(in_ch, momentum=0.05))
if act is not None:
net.append(act)
net += [conv]
if stride == 2:
net += [nn.AvgPool2d(kernel_size=2, stride=2)]
else:
net.append(conv)
if stride == 2:
net += [nn.AvgPool2d(kernel_size=2, stride=2)]
if batch_norm:
net.append(nn.BatchNorm2d(out_ch, momentum=0.05))
if act is not None:
net.append(act)
self.net = nn.Sequential(*net)
def forward(self, x):
return self.net(x)
class ConvTransposeBlock(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size, stride=1, padding=0, dilation=1,
output_padding=0, act=nn.ReLU(), weight_norm=False, batch_norm=False):
super(ConvTransposeBlock, self).__init__()
self.conv = nn.ConvTranspose2d(in_ch, out_ch, kernel_size, stride=stride,
padding=padding, dilation=dilation,
output_padding=output_padding)
self.activation = act
self.bn = nn.BatchNorm2d(out_ch, momentum=0.05) if batch_norm else None
if weight_norm:
self.conv = nn.utils.weight_norm(self.conv)
def forward(self, x):
out = self.conv(x)
if self.bn is not None:
out = self.bn(out)
if self.activation is not None:
out = self.activation(out)
return out
class _ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride, use_res=True):
super(_ResBlock, self).__init__()
self.use_res = use_res
self.beta = nn.Parameter(torch.tensor([0.]), requires_grad=True)
if stride == 1:
if in_channels == out_channels:
self.skip = nn.Identity()
else:
self.skip = nn.Conv2d(in_channels, out_channels, 1)
elif stride == 2:
self.skip = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1),
nn.AvgPool2d(kernel_size=2, stride=2)
)
elif stride == -1:
self.skip = nn.Sequential(nn.Upsample(scale_factor=2),
nn.Conv2d(in_channels, out_channels, 1))
self.net = None
def forward(self, x):
if self.use_res:
return self.skip(x) + self.beta * self.net(x)
else:
return self.net(x)
class ConcatELU(nn.Module):
"""
Activation function that applies ELU in both direction (inverted and plain).
Allows non-linearity while providing strong gradients for any input (important for final convolution)
"""
def forward(self, x):
return torch.cat([F.elu(x), F.elu(-x)], dim=1)
class Linear1d(nn.Module):
def __init__(self, num_channels):
super(Linear1d, self).__init__()
self.gain = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
def forward(self, x):
return x * self.gain + self.bias
| 4,520 | 33.25 | 105 | py |
dct_vae | dct_vae-main/utils/wandb.py | import wandb
import os
import torch
import omegaconf
from hydra.utils import instantiate
api = wandb.Api()
def get_checkpoint(wandb_args, idx, device='cpu'):
# download the checkpoint from wandb to the local machine.
file = wandb.restore('last_chpt.pth',
run_path=os.path.join(wandb_args.entity,
wandb_args.project,
idx),
replace=True)
# load the checkpoint
chpt = torch.load(file.name, map_location=device)
return chpt
def load_model(idx, wandb_args):
pth = os.path.join(wandb_args.entity, wandb_args.project, idx)
run = api.run(pth)
config = omegaconf.OmegaConf.create(run.config)
# LOAD THE MODEL
vae = instantiate(config.model)
file = wandb.restore('last_chpt.pth', run_path=pth, replace=True)
chpt = torch.load(file.name, map_location='cpu')
if chpt['ema_model_state_dict'] is not None:
vae.load_state_dict(chpt['ema_model_state_dict'])
else:
vae.load_state_dict(chpt['model_state_dict'])
return vae, config
def load_data_module(idx, wandb_args, test_batch_size=None):
pth = os.path.join(wandb_args.entity, wandb_args.project, idx)
run = api.run(pth)
config = omegaconf.OmegaConf.create(run.config)
dset_params = {
'root': 'datasets/'
}
if 'context' in config.model.name:
dset_params['ctx_size'] = config.model.ctx_size
if test_batch_size is not None:
config.dataset.data_module.test_batch_size = test_batch_size
data_module = instantiate(config.dataset.data_module, **dset_params)
return data_module
| 1,704 | 31.169811 | 72 | py |
dct_vae | dct_vae-main/utils/vae_layers.py | import torch.nn as nn
from utils.nn import _ResBlock, ConvBlock
class EncoderResBlock(_ResBlock):
def __init__(self, in_channels, hid_channels, out_channels, activation,
weight_norm, batch_norm, stride=1, num_blocks=2, use_res=True):
super(EncoderResBlock, self).__init__(in_channels, out_channels, stride, use_res)
conv_params = {
'act': activation,
'weight_norm': weight_norm,
'batch_norm': batch_norm,
'forward': True
}
h_blocks = [
ConvBlock(in_ch=in_channels, out_ch=hid_channels, kernel_size=1, **conv_params)
]
for i in range(num_blocks):
h_blocks.append(
ConvBlock(
in_ch=hid_channels,
out_ch=hid_channels,
kernel_size=3,
padding=1,
stride=stride if i + 1 == num_blocks else 1,
**conv_params
))
h_blocks.append(
ConvBlock(in_ch=hid_channels, out_ch=out_channels, kernel_size=1,
**conv_params)
)
self.net = nn.Sequential(*h_blocks)
class DecoderResBlock(_ResBlock):
def __init__(self, in_channels, hid_channels, out_channels, activation,
weight_norm, batch_norm, stride=1, mode='1x5', use_res=True, zero_last=False):
super(DecoderResBlock, self).__init__(in_channels, out_channels, stride, use_res)
assert mode in ['1x5', '2x3']
h_blocks = []
if stride == -1:
h_blocks.append(nn.Upsample(scale_factor=2, mode='nearest'))
if batch_norm:
h_blocks.append(nn.BatchNorm2d(in_channels, momentum=0.05))
params = {
'batch_norm': batch_norm,
'weight_norm': weight_norm,
'act': activation
}
h_blocks += [
ConvBlock(in_channels, hid_channels, 1, forward=False, **params)
]
if mode == '1x5':
h_blocks += [
ConvBlock(hid_channels, hid_channels, 5, forward=False, padding=2,
groups=hid_channels, stride=stride, **params)
]
elif mode == '2x3':
h_blocks += [
ConvBlock(hid_channels, hid_channels, 3, forward=False, padding=1,
stride=stride, **params),
ConvBlock(hid_channels, hid_channels, 3, forward=False, padding=1,
stride=1, **params),
]
h_blocks += [
nn.Conv2d(hid_channels, out_channels, 1)
]
self.last_conv_id = len(h_blocks) - 1
if batch_norm:
h_blocks.append(nn.BatchNorm2d(out_channels, momentum=0.05))
self.net = nn.Sequential(*h_blocks)
if zero_last:
nn.init.zeros_(self.net[self.last_conv_id].weight)
nn.init.zeros_(self.net[self.last_conv_id].bias)
| 2,972 | 35.703704 | 95 | py |
dct_vae | dct_vae-main/utils/distributed_training.py | # """
# Code from https://github.com/openai/vdvae/blob/ea35b490313bc33e7f8ac63dd8132f3cc1a729b4/utils.py#L117
# """
import os
import socket
import torch
import torch.distributed as dist
import omegaconf
#
# # Change this to reflect your cluster layout.
# # The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 4
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# SETUP_RETRY_COUNT = 3
# GPU_ID = ""
def cleanup():
dist.destroy_process_group()
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
def setup_mpi(args):
if dist.is_initialized():
return
print(torch.cuda.device_count(), 'GPUs available')
with omegaconf.open_dict(args):
args.mpi_size = mpi_size()
args.local_rank = local_mpi_rank()
args.rank = mpi_rank()
print('Worls size:', args.mpi_size)
print('RANK:', args.rank)
hostname = "localhost"
from mpi4py import MPI
os.environ['MASTER_ADDR'] = MPI.COMM_WORLD.bcast(hostname, root=0)
os.environ["RANK"] = str(args.rank)
os.environ["WORLD_SIZE"] = str(args.mpi_size)
port = MPI.COMM_WORLD.bcast(_find_free_port(), root=0)
os.environ['MASTER_PORT'] = str(port)
dist.init_process_group(
backend="nccl", init_method=f"env://",world_size=args.mpi_size, rank=args.rank)
torch.cuda.set_device(args.local_rank)
return args
def mpi_size():
from mpi4py import MPI
return MPI.COMM_WORLD.Get_size()
def mpi_rank():
from mpi4py import MPI
return MPI.COMM_WORLD.Get_rank()
def num_nodes():
nn = mpi_size()
if nn % GPUS_PER_NODE == 0:
return nn // GPUS_PER_NODE
return nn // GPUS_PER_NODE + 1
def gpus_per_node():
size = mpi_size()
if size > 1:
return max(size // num_nodes(), 1)
return 1
def local_mpi_rank():
return mpi_rank() % gpus_per_node()
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0 | 2,343 | 24.758242 | 103 | py |
dct_vae | dct_vae-main/utils/trainer.py | import torch
import math
import time
import os
import numpy as np
import datasets.dct
import wandb
import torch
from torch.optim import Adamax, AdamW
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingLR, CyclicLR
import torch.nn.functional as F
import torch.distributed as dist
from hydra.utils import instantiate
from utils.distributed_training import is_main_process
def train(args, train_loader, val_loader, model, optimizer, scheduler, ema_model=None, scaler=None):
# compute metrics on initialization
with torch.no_grad():
if args.ddp:
history_val = run_epoch(args=args,
epoch=args.start_epoch,
model=model.module,
loader=val_loader,
optimizer=None,
mode='val',
scaler=scaler)
else:
history_val = run_epoch(args=args,
epoch=args.start_epoch,
model=model,
loader=val_loader,
optimizer=None,
mode='val',
scaler=scaler)
if is_main_process():
wandb.log({**history_val, 'epoch': 0})
best_loss = history_val['val/loss']
e = torch.zeros(1, device=args.device)
for epoch in range(args.start_epoch, args.max_epochs):
if args.ddp:
train_loader.sampler.set_epoch(epoch)
val_loader.sampler.set_epoch(epoch)
time_start = time.time()
print('Training')
history_train = run_epoch(args,
epoch=epoch,
model=model.module if args.ddp else model,
loader=train_loader,
optimizer=optimizer,
mode='train',
ema_model=ema_model,
scaler=scaler)
history_val = {}
if is_main_process():
with torch.no_grad():
print('Validating')
if ema_model is not None:
vae = ema_model
else:
vae = model.module if args.ddp else model
history_val = run_epoch(args,
epoch = args.start_epoch,
model=vae,
loader=val_loader,
optimizer=None,
mode='val',
scaler=scaler)
if scheduler is not None:
if args.scheduler == 'plateau':
scheduler.step(history_val['val/loss'])
else:
scheduler.step()
time_elapsed = time.time() - time_start
hist = {**history_train, **history_val, 'time': time_elapsed}
# save stats to wandb
if is_main_process():
wandb.log(hist)
if hist['val/loss'] < best_loss:
e, best_loss = torch.zeros(1, device=args.device), hist['val/loss']
# save checkpoint
print('->model saved<-\n')
chpt = {
'epoch': epoch,
'model_state_dict': model.module.state_dict() if args.ddp else model.state_dict(),
'ema_model_state_dict': None if ema_model is None else ema_model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': None if scheduler is None else scheduler.state_dict(),
'scaler_state_dict': None if scaler is None else scaler.state_dict(),
'loss': hist['val/loss'],
}
torch.save(chpt, os.path.join(wandb.run.dir, 'last_chpt.pth'))
wandb.save(os.path.join(wandb.run.dir, 'last_chpt.pth'))
else:
e += 1
print('Epoch: {}/{}, Time elapsed: {:.2f}s\n'
'* Train loss: {:.2f} || Val. loss: {:.2f} \n'
'\t Early stopping: {}/{} (BEST: {:.2f})\n'.format(
epoch + 1, args.max_epochs, time_elapsed,
hist['train/loss'], hist['val/loss'],
int(e.item()), args.early_stopping_epochs, best_loss)
)
if math.isnan(hist['val/loss']):
break
# finish training if val loss is not improving anymore
if args.ddp:
dist.all_reduce(e)
if e > args.early_stopping_epochs:
break
if not is_main_process():
e *= 0
def run_epoch(args, epoch, model, loader, optimizer, mode='train', ema_model=None, scaler=None):
if mode == 'train':
model.train()
model.current_epoch = epoch
lr = optimizer.param_groups[0]["lr"]
history = {"lr": lr, 'epoch': epoch+1, 'beta': model.get_beta()}
elif mode == 'val':
model.eval()
histograms = {}
history = {}
for batch_idx, batch in enumerate(loader):
if 'cuda' in args.device:
for i in range(len(batch)):
batch[i] = batch[i].cuda(non_blocking=True)
# calculate VAE Loss
if scaler is not None:
with torch.autocast(device_type=args.device, dtype=torch.float16):
loss, logs = model.train_step(batch, mode=mode)
if args.loss_per_pixel:
n_dim = np.prod(batch[0][0].shape)
loss = loss / n_dim
else:
loss, logs = model.train_step(batch, mode=mode)
if args.loss_per_pixel:
n_dim = np.prod(batch[0][0].shape)
loss = loss / n_dim
if mode == 'train':
if scaler is not None:
scaler.scale(loss).backward()
else:
loss.backward()
nans = torch.isnan(loss).sum()
if batch_idx % args.acc_grad == 0:
if nans == 0:
optim_step(args, model, optimizer, scaler)
with torch.no_grad():
if ema_model is not None:
update_ema(model, ema_model, args.ema_rate)
optimizer.zero_grad()
# update running means
for k in logs.keys():
h_key = k
if '/' not in k:
h_key = f'{mode}/{k}'
if isinstance(logs[k], list):
if k not in histograms.keys():
histograms[k] = []
histograms[k] += logs[k]
else:
if h_key not in history.keys():
history[h_key] = 0.
history[h_key] += logs[k] / len(loader)
if mode == 'val' and is_main_process():
if epoch % args.eval_freq == 0:
# add images
history['pic/X'] = wandb.Image(batch[0][:8])
fwd_output = model.forward(batch)
plot_rec = fwd_output[0][:8]
history['pic/Recon'] = wandb.Image(plot_rec)
for t in [0.6, 0.85, 1.]:
sample = model.generate_x(8, t=t)
history[f'pic/Samples (t={t})'] = wandb.Image(sample)
# add histograms
for k in histograms.keys():
history[f'hist/{k}'] = wandb.Histogram(torch.cat(histograms[k]))
history.update(model.val_pics(batch, fwd_output))
return history
def optim_step(args, model, optimizer, scaler):
if args.grad_clip > 0:
if scaler is not None:
scaler.unscale_(optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip).item()
logs = {
'grad_norm': grad_norm,
'skipped_steps': 1
}
if args.grad_skip_thr == 0 or grad_norm < args.grad_skip_thr:
if scaler is not None:
scaler.step(optimizer)
else:
optimizer.step()
logs['skipped_steps'] = 0
if scaler is not None:
scaler.update()
if is_main_process():
wandb.log(logs)
def update_ema(model, ema_model, ema_rate):
for p, p_ema in zip(model.parameters(), ema_model.parameters()):
p_ema.data.mul_(ema_rate)
p_ema.data.add_(p.data * (1 - ema_rate))
| 8,569 | 38.675926 | 102 | py |
dct_vae | dct_vae-main/utils/thirdparty/blurpool.py | # source: https://github.com/adobe/antialiased-cnns/blob/master/antialiased_cnns/blurpool.py
# Copyright (c) 2019, Adobe Inc. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike
# 4.0 International Public License. To view a copy of this license, visit
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode.
import torch
import torch.nn.parallel
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class BlurPool(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=4, stride=2, pad_off=0):
super(BlurPool, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1.*(filt_size-1)/2), int(np.ceil(1.*(filt_size-1)/2)), int(1.*(filt_size-1)/2), int(np.ceil(1.*(filt_size-1)/2))]
self.pad_sizes = [pad_size+pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride-1)/2.)
self.channels = channels
if(self.filt_size==1):
a = np.array([1.,])
elif(self.filt_size==2):
a = np.array([1., 1.])
elif(self.filt_size==3):
a = np.array([1., 2., 1.])
elif(self.filt_size==4):
a = np.array([1., 3., 3., 1.])
elif(self.filt_size==5):
a = np.array([1., 4., 6., 4., 1.])
elif(self.filt_size==6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(self.filt_size==7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:,None]*a[None,:])
filt = filt/torch.sum(filt)
self.register_buffer('filt', filt[None,None,:,:].repeat((self.channels,1,1,1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size==1):
if(self.pad_off==0):
return inp[:,:,::self.stride,::self.stride]
else:
return self.pad(inp)[:,:,::self.stride,::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
def get_pad_layer(pad_type):
if(pad_type in ['refl','reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl','replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type=='zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized'%pad_type)
return PadLayer
class BlurPool1D(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(BlurPool1D, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
# print('Filter size [%i]' % filt_size)
if(self.filt_size == 1):
a = np.array([1., ])
elif(self.filt_size == 2):
a = np.array([1., 1.])
elif(self.filt_size == 3):
a = np.array([1., 2., 1.])
elif(self.filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(self.filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(self.filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(self.filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a)
filt = filt / torch.sum(filt)
self.register_buffer('filt', filt[None, None, :].repeat((self.channels, 1, 1)))
self.pad = get_pad_layer_1d(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride]
else:
return F.conv1d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
def get_pad_layer_1d(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad1d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad1d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad1d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer | 4,496 | 37.110169 | 143 | py |
dct_vae | dct_vae-main/utils/thirdparty/pytorch_msssim.py | # https://github.com/AKuzina/defend_vae_mcmc/tree/main/thirdparty/pytorch_msssim
import torch
import torch.nn.functional as F
from math import exp
import numpy as np
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = v1 / v2 # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
cs = cs.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
cs = cs.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=None):
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
ssims = []
mcs = []
for _ in range(levels):
sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
# Relu normalize (not compliant with original definition)
if normalize == "relu":
ssims.append(torch.relu(sim))
mcs.append(torch.relu(cs))
else:
ssims.append(sim)
mcs.append(cs)
if img1.shape[-1] > 1:
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
ssims = torch.stack(ssims, dim=-1)
mcs = torch.stack(mcs, dim=-1)
# Simple normalize (not compliant with original definition)
# TODO: remove support for normalize == True (kept for backward support)
if normalize == "simple" or normalize == True:
ssims = (ssims + 1) / 2
mcs = (mcs + 1) / 2
if not size_average:
weights = weights.unsqueeze(0)
pow1 = mcs ** weights
pow2 = ssims ** weights
if not size_average:
pow1 = pow1.transpose(1, 0)
pow2 = pow2.transpose(1, 0)
# From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
output = torch.prod(pow1[:-1] * pow2[-1], dim=0)
return output
# Classes to re-use window
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, val_range=None):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
# Assume 1 channel for SSIM
self.channel = 1
self.window = create_window(window_size)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.dtype == img1.dtype:
window = self.window
else:
window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
self.window = window
self.channel = channel
return ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average)
class MSSSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, channel=3):
super(MSSSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = channel
def forward(self, img1, img2):
# TODO: store window between calls if possible
return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average) | 5,005 | 32.597315 | 118 | py |
dct_vae | dct_vae-main/utils/thirdparty/unet.py | from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from utils.nn import conv_nd
# UNet implemenatation from https://github.com/openai/guided-diffusion
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
)
self.qkv_proj = nn.Conv1d(embed_dim, 3 * embed_dim, 1)
self.c_proj = nn.Conv1d(embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class GroupNorm16(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
# normalization(channels),
GroupNorm16(16, channels),
# nn.GroupNorm(16, channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
nn.Linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
GroupNorm16(16, self.out_channels),
# nn.GroupNorm(16, self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
use_new_attention_order=False,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = GroupNorm16(16, channels)
# self.norm = nn.GroupNorm(16, channels)
self.qkv = conv_nd(1, channels, channels * 3, 1)
if use_new_attention_order:
# split qkv before split heads
self.attention = QKVAttention(self.num_heads)
else:
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
def forward(self, x):
return checkpoint(self._forward, (x,), self.parameters(), self.use_checkpoint)
def _forward(self, x):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
x_norm = self.norm(x)
qkv = self.qkv(x_norm)
h = self.attention(qkv)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial ** 2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(th.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with th.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with th.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = th.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
):
super().__init__()
attention_resolutions = eval(attention_resolutions)
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.__dict__.update(locals())
self.dtype = th.float32
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 1
if self.num_classes is not None:
time_embed_out = time_embed_dim - self.num_classes
else:
time_embed_out = time_embed_dim
self.time_embed = nn.Sequential(
nn.Linear(model_channels, time_embed_dim),
nn.SiLU(),
nn.Linear(time_embed_dim, time_embed_out),
)
if self.num_classes is not None:
def emb_one_hot(x):
return F.one_hot(x.long(), self.num_classes)
self.label_emb = emb_one_hot
ch = input_ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=int(model_channels * mult),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(model_channels * mult)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
print('Out')
self.out = nn.Sequential(
GroupNorm16(16, ch),
# nn.GroupNorm(16, ch),
nn.SiLU(),
zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
)
def forward(self, x, timesteps, y=None):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = th.cat([emb, self.label_emb(y)], 1)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
h = self.middle_block(h, emb)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb)
h = h.type(x.dtype)
return self.out(h)
| 25,741 | 35.154494 | 124 | py |
dct_vae | dct_vae-main/model/context_ladder_vae.py | import math
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import wandb
import torch.nn as nn
import torch.nn.functional as F
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from model.vae import LADDER_VAE, compute_sample_kl
from model.ddgm import DiffusionPrior
class CTX_LADDER_VAE(LADDER_VAE):
def __init__(self,
encoder,
decoder,
likelihood,
beta_start,
beta_end,
warmup,
is_k,
latent_scales,
free_bits_thr,
**kwargs):
super().__init__(encoder,
decoder,
likelihood,
beta_start,
beta_end,
warmup,
is_k,
latent_scales,
free_bits_thr)
def encode(self, batch):
x = batch[0]
if self.decoder.ctx_type == 'dct':
y = batch[-1]
else:
y = self.decoder.decoder_blocks[0].x_to_ctx(x)
encoder_s = self.encoder(x)
return encoder_s + [y]
def generate_x(self, N=25, t=None):
enc_s = [None for _ in range(sum(self.latent_scales)+1)]
p_xz_params, _, _, _ = self.decoder(enc_s, N=N, t=t)
p_xz = self.likelihood(*p_xz_params)
return self.get_x_from_px(p_xz)
def process_z_L_samples(self, z_L):
return self.decoder.decoder_blocks[0].ctx_to_x(z_L) | 1,645 | 28.927273 | 64 | py |
dct_vae | dct_vae-main/model/context_decoder.py | import math
from typing import Optional, Union
import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
from utils.vae_layers import DecoderResBlock
from datasets.dct import DCT
from utils.distribution import Normal, Delta
from model.ddgm import DiffusionPrior, DiffusionDCTPrior
from utils.thirdparty.unet import UNetModel
from model.decoder import LadderDecoder, quantize
class _CtxDecoderBlock(nn.Module):
def __init__(self,
x_size: list,
ctx_size: list,
ctx_bits: Union[int, None],
ctx_posterior: str,
ctx_prior: nn.Module,
max_scale: int,
next_ch: int,
):
super().__init__()
self.__dict__.update(locals())
self.ctx_prior = ctx_prior
self.ctx_posterior = self.get_ctx_posterior(ctx_posterior)
self.posprocess_block = nn.Sequential(
nn.AvgPool2d(kernel_size=int(max_scale), stride=int(max_scale)),
nn.Conv2d(self.x_size[0], next_ch, kernel_size=3, padding=1)
)
def get_ctx_posterior(self, type, var_init=-10):
if 'fixed' in type:
self.ctx_logvar = nn.Parameter(var_init*torch.ones([1] + self.ctx_size), requires_grad=False)
return lambda x: Normal(x, self.ctx_logvar.repeat(x.shape[0], 1, 1, 1))
elif 'train' in type:
self.ctx_logvar = nn.Parameter(var_init*torch.ones([1] + self.ctx_size), requires_grad=True)
return lambda x: Normal(x, torch.clamp(self.ctx_logvar, -10, 10).repeat(x.shape[0], 1, 1, 1))
elif 'conditional' in type:
self.ctx_logvar = nn.Sequential(
nn.Conv2d(self.ctx_size[0], 100, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(100, self.ctx_size[0], kernel_size=3, padding=1),
nn.Softplus(0.7),
)
return lambda x: Normal(x, torch.clamp(torch.log(self.ctx_logvar(x)), -7, 7))
elif 'delta' in type:
return lambda x: Delta(x)
else:
raise ValueError(f'unknown ctx posterior: {type}')
def forward(self, ctx_val, s_dec, mode, t=None):
"""
:param ctx_val: Analog of s_enc in LadderVAE.
:param s_dec: Here for compatibility with decoder block interface
:param mode: train, test
:param t: temperature
:return: (p_dist, q_dist, z_sample, s_dec)
"""
if ctx_val is None:
q_dist = None
ctx_val = self.ctx_prior.sample(s_dec.shape[0], t=t)
ctx_val = quantize(ctx_val, self.ctx_bits)
else:
ctx_val = self.preprocess_ctx(ctx_val)
q_dist = self.ctx_posterior(ctx_val)
ctx_val = q_dist.sample()
if isinstance(self.ctx_prior, DiffusionPrior) or isinstance(self.ctx_prior, DiffusionDCTPrior):
if mode == 'test':
p_dist = self.ctx_prior.eval_is_ll(ctx_val, is_k=10)
else:
p_dist = self.ctx_prior.log_prob(ctx_val, mode=mode, reduce_dim=False)
else:
p_dist = self.ctx_prior
x_ctx_val = self.ctx_to_x(ctx_val)
x_ctx_val = self.posprocess_block(x_ctx_val)
s_dec = s_dec + x_ctx_val
return p_dist, q_dist, ctx_val, s_dec
def ctx_to_x(self, ctx):
assert NotImplementedError
def x_to_ctx(self, x):
assert NotImplementedError
def preprocess_ctx(self, ctx):
"""
In needed, precprocess context that was created on the dataset construction stage.
E.g. for DCT context we will do normalization and quantization on this step.
:param ctx:
:return:
"""
return ctx
class DCTDecoderBlock(_CtxDecoderBlock):
def __init__(self,
x_size: list,
ctx_size: list,
ctx_bits: Union[int, None],
ctx_posterior: str,
ctx_prior: nn.Module,
max_scale: int,
next_ch: int,
# mode='RGB'
):
super(DCTDecoderBlock, self).__init__(
x_size=x_size,
ctx_size=ctx_size,
ctx_bits=ctx_bits,
ctx_posterior=ctx_posterior,
ctx_prior=ctx_prior,
max_scale=max_scale,
next_ch=next_ch,
)
self.dct = DCT(x_size[1], x_size[2])
# DCT scaling parameters
self.dct_mean = nn.Parameter(torch.zeros(ctx_size), requires_grad=False)
self.dct_std = nn.Parameter(torch.zeros(ctx_size), requires_grad=False)
self.dct_scale = nn.Parameter(torch.zeros(ctx_size), requires_grad=False)
self.std_mult = 4
def ctx_to_x(self, ctx):
# unnormalize
ctx = ctx * self.dct_scale
# pad with 0 and invert DCT
pad = self.x_size[1] - ctx.shape[-1]
x = self.dct.idct2(F.pad(ctx, (0, pad, 0, pad)))
x = 2 * torch.clamp(x, 0, 1) - 1
# if self.mode == 'YCbCr':
# x = YCBCR_to_RGB(x)
return x
def x_to_ctx(self, x, preprocess=True):
dct = self.dct.dct2(x)[:, :, :self.ctx_size[1], :self.ctx_size[1]]
if preprocess:
dct = self.preprocess_ctx(dct)
return dct
def preprocess_ctx(self, y_dct):
# normalize
y_dct = y_dct / self.dct_scale
# exactly [-1, 1]
y_dct = torch.clamp(y_dct, -1, 1)
y_dct = quantize(y_dct, self.ctx_bits)
return y_dct
class DownsampleDecoderBlock(_CtxDecoderBlock):
def __init__(self,
x_size: list,
ctx_size: list,
ctx_bits: Union[int, None],
ctx_posterior: str,
ctx_prior: nn.Module,
max_scale: int,
next_ch: int,
# mode='RGB'
):
super(DownsampleDecoderBlock, self).__init__(
x_size=x_size,
ctx_size=ctx_size,
ctx_bits=ctx_bits,
ctx_posterior=ctx_posterior,
ctx_prior=ctx_prior,
max_scale=max_scale,
next_ch=next_ch,
)
self.kernel_size = int(np.ceil(self.x_size[1] / self.ctx_size[1]))
self.pad_size = int(
np.ceil((self.kernel_size * self.ctx_size[1] - self.x_size[1]) // 2))
def ctx_to_x(self, ctx):
x_up = nn.Upsample(scale_factor=self.kernel_size, mode='bilinear')(ctx)
up_lim = self.x_size[1] + self.pad_size
x = x_up[:, :, self.pad_size:up_lim, self.pad_size:up_lim]
return x
def x_to_ctx(self, x, preprocess=True):
x_padded = F.pad(x, (self.pad_size,) * 4)
x_dwn = nn.AvgPool2d(self.kernel_size)(x_padded)
if preprocess:
x_dwn = self.preprocess_ctx(x_dwn)
return x_dwn
def preprocess_ctx(self, y_dct):
y_dct = quantize(y_dct, self.ctx_bits)
return y_dct
class ContextLadderDecoder(LadderDecoder):
def __init__(self,
num_ch: int,
scale_ch_mult: float,
block_ch_mult: float,
data_ch: int,
num_postprocess_blocks: int,
likelihood: str,
num_mix: int,
data_dim: int,
weight_norm: bool,
batch_norm: bool,
latent_scales: list,
latent_width: list,
num_blocks_per_scale: int,
activation: str,
arch_mode: str,
var_mode: str,
softplus_beta: int,
z_L_prior: dict,
z_L_bits: Union[int, None],
disconnect: bool,
ctx_type: str,
# mode: str,
ctx_size: int,
ctx_posterior_var: str,
ctx_posterior_var_init: float,
condition_on_last: bool = False,
# add_y_last: bool=False,
start_scale_at_x: bool = False,
):
self.__dict__.update(locals())
self.max_scale = 2 ** len(latent_scales)
if self.start_scale_at_x:
self.max_scale /= 2
super(ContextLadderDecoder, self).__init__(
num_ch=num_ch,
scale_ch_mult=scale_ch_mult,
block_ch_mult=block_ch_mult,
data_ch=data_ch,
num_postprocess_blocks=num_postprocess_blocks,
likelihood=likelihood,
num_mix=num_mix,
data_dim=data_dim,
weight_norm=weight_norm,
batch_norm=batch_norm,
latent_scales=latent_scales,
latent_width=latent_width,
num_blocks_per_scale=num_blocks_per_scale,
activation=activation,
arch_mode=arch_mode,
var_mode=var_mode,
softplus_beta=softplus_beta,
z_L_prior=None,
z_L_bits=z_L_bits,
disconnect=disconnect,
condition_on_last=condition_on_last,
start_scale_at_x=start_scale_at_x
)
self.init()
self.z_L_prior = z_L_prior
# init context decoder block
self.ctx_size = [data_ch, ctx_size, ctx_size]
if ctx_type == 'dct':
ctx_decoder = DCTDecoderBlock(x_size=self.image_size,
ctx_size=self.ctx_size,
ctx_bits=self.z_L_bits,
ctx_posterior=ctx_posterior_var,
ctx_prior=z_L_prior,
max_scale= self.max_scale,
next_ch=self.num_ch[0],
)
elif ctx_type == 'downsample':
ctx_decoder = DownsampleDecoderBlock(x_size=self.image_size,
ctx_size=self.ctx_size,
ctx_bits=self.z_L_bits,
ctx_posterior=ctx_posterior_var,
ctx_prior=z_L_prior,
max_scale=self.max_scale,
next_ch=self.num_ch[0],
)
# add to the rest of the blocks
self.decoder_blocks = nn.ModuleList([ctx_decoder, *self.decoder_blocks])
if not self.disconnect:
self.y_block = nn.Sequential(
nn.AvgPool2d(kernel_size=int(self.max_scale), stride=int(self.max_scale)),
nn.Conv2d(self.image_size[0], self.num_ch[0], kernel_size=3, padding=1)
)
# self.ctx_q = self.get_ctx_posterior(ctx_posterior_var, ctx_posterior_var_init)
# if self.disconnect or self.condition_on_last: # or self.add_ctx_to_p:
# self.z_L_up = self.init_cond_blocks(data_ch, max_scale=self.max_scale)
def init_cond_blocks(self, cond_width):
z_L_up = nn.ModuleList()
scale_sizes = [self.max_scale // (2 ** i) for i in range(self.num_scales)]
for s_num, (s, w) in enumerate(zip(self.latent_scales, self.latent_width)):
if s > 0 and s_num > 0:
z_L_up.append(nn.Sequential(
# reshape to the latent's size
nn.AvgPool2d(kernel_size=int(scale_sizes[s_num]),
stride=int(scale_sizes[s_num])),
# change num channels
nn.Conv2d(self.data_ch, self.num_ch[s_num], kernel_size=3, padding=1),
))
return z_L_up
def z_L_post_proc(self, z_L):
z_L = self.decoder_blocks[0].ctx_to_x(z_L)
return z_L
def init_dct_normalization(self, loader):
if self.ctx_type == 'dct':
if hasattr(loader.dataset, 'dataset'):
self.decoder_blocks[0].dct_mean.data = loader.dataset.dataset.mean
self.decoder_blocks[0].dct_std.data = loader.dataset.dataset.std
self.decoder_blocks[0].dct_scale.data = loader.dataset.dataset.scale
else:
self.decoder_blocks[0].dct_mean.data = loader.dataset.mean
self.decoder_blocks[0].dct_std.data = loader.dataset.std
self.decoder_blocks[0].dct_scale.data = loader.dataset.scale
if isinstance(self.decoder_blocks[0].ctx_prior, DiffusionPrior):
S = self.decoder_blocks[0].dct_scale.data
if self.decoder_blocks[0].ctx_prior.use_noise_scale:
self.decoder_blocks[0].ctx_prior.noise_scale = nn.Parameter(S, requires_grad=False)
| 12,805 | 37.806061 | 105 | py |
dct_vae | dct_vae-main/model/encoder.py | import torch.nn as nn
import torch
from utils.vae_layers import EncoderResBlock
from utils.thirdparty.blurpool import BlurPool
class _Encoder(nn.Module):
def __init__(self):
super(_Encoder, self).__init__()
def init(self) -> None:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
def get_activation(self, name):
return {
'relu': nn.ReLU(),
'silu': nn.SiLU(),
'elu': nn.ELU(),
'gelu': nn.GELU()
}[name]
class PlainEncoder(_Encoder):
def __init__(self,
z_dim: int,
num_ch: int,
data_ch: int,
data_dim: int,
weight_norm: bool,
batch_norm: bool,
**kwargs,
):
super(PlainEncoder, self).__init__()
self.num_q_param = 2
self.z_dim = z_dim
conv_param = self.get_conv_params(data_dim)
n_layers = len(conv_param['strides'])
layers = [nn.Conv2d(data_ch, num_ch, kernel_size=1)]
channels = num_ch
for i in range(n_layers):
params = {
'in_channels': channels,
'hid_channels': channels,
'out_channels': channels * 2,
'stride': conv_param['strides'][i],
'activation': None if i+1 == n_layers else nn.SiLU(),
'weight_norm': weight_norm,
'batch_norm': batch_norm,
'num_blocks': 1
}
channels *= 2
layers.append(
EncoderResBlock(**params)
)
layers.append(nn.Conv2d(channels, 2*z_dim, kernel_size=1, padding=0))
layers.append(nn.Flatten())
self.q_z = nn.Sequential(*layers)
self.init()
def forward(self, x) -> tuple:
q_param = self.q_z(x)
return torch.chunk(q_param, self.num_q_param, dim=1)
@staticmethod
def get_conv_params(data_dim: int) -> dict:
return {
28: {
'strides': [2]*5,
},
32: {
'strides': [2]*5,
},
64: {
'strides': [2]*6,
}
}[data_dim]
class LadderEncoder(_Encoder):
def __init__(self,
num_ch: int,
scale_ch_mult: float,
block_ch_mult: float,
data_ch: int,
num_init_blocks: int,
data_dim: int,
weight_norm: bool,
batch_norm: bool,
latent_scales: list,
latent_width: list,
num_blocks_per_scale: int,
activation: str,
dset: str = 'mnist',
start_scale_at_x: bool = False,
):
super(LadderEncoder, self).__init__()
self.dset = dset
self.conv_block_params = {
'batch_norm': batch_norm,
'weight_norm': weight_norm,
'activation': self.get_activation(activation),
'use_res': True
}
self.image_size = [data_ch, data_dim, data_dim]
self.block_ch_mult = block_ch_mult
assert len(latent_width) == len(latent_scales)
self.num_ch = [num_ch]
self.start_scale_at_x = start_scale_at_x
for i in range(len(latent_scales)-1):
self.num_ch += [int(self.num_ch[-1] * scale_ch_mult)]
print('Encoder channels', self.num_ch)
self.pre_process = self.init_pre_process(num_init_blocks)
self.latent_scales = latent_scales
self.latent_width = latent_width
self.encoder_blocks = self.init_encoder_blocks(num_blocks_per_scale)
self.init()
def init_pre_process(self, num_init_blocks: int) -> nn.Module:
pre_process = [nn.Conv2d(self.image_size[0], self.num_ch[0], kernel_size=1)]
for i in range(num_init_blocks):
pre_process += [
EncoderResBlock(self.num_ch[0],
int(self.num_ch[0] * self.block_ch_mult),
self.num_ch[0],
stride=1,
num_blocks=2,
**self.conv_block_params)
]
return nn.Sequential(*pre_process)
def init_encoder_blocks(self, num_blocks_per_scale: int) -> tuple:
backbone = nn.ModuleList()
pool_k = 2
if self.start_scale_at_x: # if first scale of latent == data dim
pool_k = 1
z_size = self.image_size[-1]
for s_num, (s, w) in enumerate(zip(self.latent_scales, self.latent_width)):
for latent in range(s):
curr_net = []
for i in range(num_blocks_per_scale):
in_ch, out_ch = self.num_ch[s_num], self.num_ch[s_num]
if s_num > 0 and latent == 0 and i == 0:
in_ch = self.num_ch[s_num-1]
curr_net += [
EncoderResBlock(
in_ch,
int(in_ch * self.block_ch_mult),
out_ch,
stride=1,
num_blocks=2,
**self.conv_block_params)
]
if latent == 0:
z_size /= pool_k
print(f'Scale {s_num+1}, {s} latents, out shape: {int(z_size)}')
# if this is the first latent of the scale -> reduce size
curr_net += [nn.AvgPool2d(kernel_size=pool_k, stride=pool_k)]
pool_k = 1
backbone.append(nn.Sequential(*curr_net))
pool_k *= 2
return backbone
def forward(self, x) -> list:
x = prep_x(self.dset, x)
d = self.pre_process(x)
s_enc = []
for enc in self.encoder_blocks:
d = enc(d)
s_enc.append(d)
return s_enc
class SmallLadderEncoder(LadderEncoder):
def __init__(self,
num_ch: int,
scale_ch_mult: float,
block_ch_mult: float,
data_ch: int,
num_init_blocks: int,
data_dim: int,
weight_norm: bool,
batch_norm: bool,
latent_scales: list,
latent_width: list,
num_blocks_per_scale: int,
activation: str,
dset: str = 'mnist',
start_scale_at_x: bool = False,
):
super(SmallLadderEncoder, self).__init__(
num_ch=num_ch,
scale_ch_mult=scale_ch_mult,
block_ch_mult=block_ch_mult,
data_ch=data_ch,
num_init_blocks=num_init_blocks,
data_dim=data_dim,
weight_norm=weight_norm,
batch_norm=batch_norm,
latent_scales=latent_scales,
latent_width=latent_width,
num_blocks_per_scale=num_blocks_per_scale,
activation=activation,
dset=dset,
start_scale_at_x=start_scale_at_x,
)
def init_encoder_blocks(self, num_blocks_per_scale: int) -> tuple:
backbone = nn.ModuleList()
pool_k = 2
if self.start_scale_at_x: # if first scale of latent == data dim
pool_k = 1
z_size = self.image_size[-1]
for s_num, (s, w) in enumerate(zip(self.latent_scales, self.latent_width)):
# for latent in range(s):
if s > 0:
curr_net = []
for i in range(num_blocks_per_scale):
in_ch, out_ch = self.num_ch[s_num], self.num_ch[s_num]
if s_num > 0 and i == 0:
in_ch = self.num_ch[s_num - 1]
curr_net += [
EncoderResBlock(
in_ch,
int(in_ch * self.block_ch_mult),
out_ch,
stride=1,
num_blocks=2,
**self.conv_block_params)
]
z_size /= pool_k
print(f'Scale {s_num + 1}, {s} latents, out shape: {int(z_size)}')
# if this is the first latent of the scale -> reduce size
curr_net += [nn.AvgPool2d(kernel_size=pool_k, stride=pool_k)]
pool_k = 1
backbone.append(nn.Sequential(*curr_net))
pool_k *= 2
return backbone
def forward(self, x) -> list:
x = prep_x(self.dset, x)
d = self.pre_process(x)
s_enc = []
i = 0
for enc in self.encoder_blocks:
while self.latent_scales[i] == 0:
i += 1
num_latents = self.latent_scales[i]
d = enc(d)
for _ in range(num_latents):
s_enc.append(d)
i += 1
return s_enc
def prep_x(dset, x):
z = x * 127.5 + 127.5
if dset == 'cifar10':
z = (z - 120.63838) / 64.16736
elif dset == 'imagnet32':
z = (z - 116.2373) / 69.37404
elif dset in ['mnist', 'omniglot', 'svhn', 'celeba']:
z = (z - 127.5) / 127.5
else:
raise ValueError(f'Unknown dataset {dset}')
return z
| 9,663 | 34.399267 | 84 | py |
dct_vae | dct_vae-main/model/vae.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchmetrics
import torchvision
import wandb
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from utils.distribution import Normal, Bernoulli, Logistic256, MixtureLogistic256
from model.ddgm import DiffusionPrior
def compute_sample_kl(q, p, z, reduce_dim=True):
# q is define per point -> returns a scalar
log_q = q.log_prob(z, reduce_dim=reduce_dim)
if hasattr(p, 'log_prob'):
log_p = p.log_prob(z, reduce_dim=reduce_dim)
else:
log_p = p.reshape(p.shape[0], -1)
if reduce_dim:
log_p = log_p.sum(1)
if reduce_dim:
assert len(log_q.shape) == 1, f'Log q should be a vector, got shape {log_q.shape} instead'
assert len(log_p.shape) == 1, f'Log p should be a vector, got shape {log_p.shape} instead'
else:
assert log_q.shape[1] == log_p.shape[1]
# compute sample kl (averaged over the mini-batch):
kl = log_q - log_p
assert kl.shape[0] == z.shape[0]
return kl
class LADDER_VAE(nn.Module):
def __init__(self,
encoder,
decoder,
likelihood,
beta_start,
beta_end,
warmup,
is_k,
latent_scales,
free_bits_thr,
**kwargs):
super(LADDER_VAE, self).__init__()
self.__dict__.update(locals())
self.encoder = encoder
self.decoder = decoder
self.current_epoch = None
self.likelihood = {
'bernoulli': Bernoulli,
'gaussian': lambda mu, logvar: Normal(mu, torch.log(F.softplus(torch.clamp(logvar, -5., 5), 0.7))),
'gaussian_zero': lambda mu: Normal(mu, -1 * math.log(2.0*math.pi) * torch.ones_like(mu, device=mu.device)),
'logistic': Logistic256,
'logistic_mixture': MixtureLogistic256
}[likelihood]
self.fid = None
def encode(self, batch):
x = batch[0]
encoder_s = self.encoder(x)
return encoder_s
def forward(self, batch, mode='train'):
encoder_s = self.encode(batch)
p_xz_params, p_dist, q_dist, z_samples = self.decoder(encoder_s, mode = mode)
p_xz = self.likelihood(*p_xz_params)
x_sample = self.get_x_from_px(p_xz)
return x_sample, p_xz, z_samples, q_dist, p_dist
def generate_x(self, N=25, t=None):
enc_s = [None for _ in range(sum(self.latent_scales))]
p_xz_params, _, _, _ = self.decoder(enc_s, N=N, t=t)
p_xz = self.likelihood(*p_xz_params)
return self.get_x_from_px(p_xz)
def get_x_from_px(self, p_xz):
# for binary output we get the mean and scale to [-1, 1]
if isinstance(p_xz, Bernoulli):
x_rec = p_xz.get_E()
x_rec = x_rec * 2 - 1
# for the rest sample from the likelihood
else:
x_rec = p_xz.sample()
return x_rec
def eval_log_p_x(self, p_xz, x):
# for bernoulli we need to scale to [0, 1] and get a sample.
if isinstance(p_xz, Bernoulli):
x = 0.5 * (x + 1)
x = torch.bernoulli(x)
return p_xz.log_prob(x)
def kl(self, fwd_output, reduce_dim=True):
_, _, z_sample, q_dist, p_dist = fwd_output
logs = {}
kl_vals = []
l = 0
for z, q, p in zip(z_sample, q_dist, p_dist):
if hasattr(q, 'kl'):
try:
MB = z.shape[0]
kl = q.kl(p).reshape(MB, -1) # MB x dim
if self.free_bits_thr > 0:
kl = self.free_bits(kl)
if reduce_dim:
kl = kl.sum(1)
except:
kl = compute_sample_kl(q, p, z, reduce_dim=reduce_dim)
else:
kl = compute_sample_kl(q, p, z, reduce_dim=reduce_dim)
kl_vals.append(kl)
bpd_coef = 1. / np.log(2.) / np.prod(z.shape[1:])
n = '-' + str(l) if l > 0 else ''
logs[f'kl_L{n}'] = kl.mean() * bpd_coef
l += 1
# return tensor MB x L and the logs
# ddgm loss
if hasattr(p_dist[0], 'log_prob'):
nll_ddgm = - p_dist[0].log_prob(z_sample[0])
else:
nll_ddgm = - p_dist[0]
logs['nll_z_L_prior'] = nll_ddgm.reshape(nll_ddgm.shape[0], -1).sum(1).mean(0)
if reduce_dim:
kl_vals = torch.stack(kl_vals, 1)
return kl_vals, logs
def free_bits(self, kl):
min_KL_per_dim = self.free_bits_thr / kl.shape[1] * torch.ones_like(kl)
kl = torch.cat([kl.unsqueeze(-1), min_KL_per_dim.unsqueeze(-1)], -1)
return kl.max(dim=-1)[0]
def calculate_active_units(self, fwd_output):
_, _, z_sample, q_dist, p_dist = fwd_output
MB = z_sample[0].shape[0]
logs = {}
# AU as in Burda'15 (Cov_x E[z])
E_q = [q.get_E().reshape(MB, -1).data.cpu() for q in q_dist]
unit_cnt = sum([mu.shape[1] for mu in E_q])
cov = [torch.var(mu, dim=0).float() for mu in E_q]
logs['misc/Active units (log Cov)'] = [torch.log(torch.clamp(c, 1e-10)) for c in cov]
logs['misc/% Active units (Cov > 0.01)'] = sum(torch.cat(cov) > 0.01)/unit_cnt
return logs
def compute_loss(self, batch, fwd_output, beta):
x_sample = fwd_output[0]
p_xz = fwd_output[1]
# ndim = np.prod(x_sample.shape[1:])
bpd_coef = 1. / np.log(2.) / np.prod(x_sample.shape[1:])
logs = {}
# data term
re = - self.eval_log_p_x(p_xz, batch[0]).mean(0) # MB -> 1
# KL-divergence (L values)
kl, logs_kl = self.kl(fwd_output)
kl = kl.sum(1).mean() # Mb x L -> 1
logs.update(logs_kl)
loss = re + beta * kl
loss_per_pixel = loss * bpd_coef
loss_logs = {
'reconstruction': re.data,
'kl': kl.sum().data,
'loss': loss.data,
'loss_per_pixel': loss_per_pixel.data,
}
logs.update(loss_logs)
return loss_per_pixel, logs
def get_beta(self):
beta = self.beta_end
if self.current_epoch is not None:
if self.current_epoch < self.warmup:
dlt = (self.beta_end - self.beta_start) / self.warmup
beta = self.beta_start + self.current_epoch * dlt
return beta
def train_step(self, batch, mode='train'):
MB = batch[0].shape[0]
fwd_output = self.forward(batch, mode=mode)
x_sample, p_xz, z_samples, q_dist, p_dist = fwd_output
beta = 1
if mode == 'train':
beta = self.get_beta()
loss, logs = self.compute_loss(batch, fwd_output, beta)
if mode == 'val':
logs.update(self.calculate_active_units(fwd_output))
if hasattr(p_xz, 'log_var'):
lv = p_xz.log_var.reshape(MB, -1).mean(0).data.cpu()
logs['misc/lnVar_pxz'] = [lv]
logs['misc/lnVar_pxz_pp'] = lv.mean()
L = len(z_samples) # number of latent variables
for l in range(1):
n = '-' + str(l) if l > 0 else ''
if hasattr(p_dist[l], 'log_var'):
lv = p_dist[l].log_var
lv = lv.reshape(lv.shape[0], -1).mean(0).data.cpu()
# logs[f'misc/lnVar_pz_L{n}'] = [lv]
logs[f'misc/lnVar_pz_L{n}_pp'] = lv.mean()
if hasattr(q_dist[l], 'log_var'):
lv = q_dist[l].log_var.reshape(MB, -1).mean(0).data.cpu()
# logs[f'misc/lnVar_qz_L{n}'] = [lv]
logs[f'misc/lnVar_qz_L{n}_pp'] = lv.mean()
return loss, logs
def test_step(self, batch, compute_fid=True):
"""
Here we return all metrics sumed over the batch
:param batch:
:return: logs (dict)
"""
# remove free bits threshould, otherwise the loss is not correct
self.free_bits_thr = 0
# Forward
fwd_output = self.forward(batch, mode='test')
x_rec, p_xz = fwd_output[0], fwd_output[1]
x = batch[0]
MB = x.shape[0]
# Reconstructions and KL term
kl, _ = self.kl(fwd_output)
logs = {
're': - self.eval_log_p_x(p_xz, x).sum(0),
'kl': kl.sum(),
'elbo': - self.eval_log_p_x(p_xz, x).sum(0) + kl.sum(),
}
# MSE (max squared dist is 4)
mse = (x_rec - x) ** 2
mse = mse.reshape(MB, -1).mean(1).sum()
logs['mse'] = mse
# Compute IWAE
nll, nll_logs = self.estimate_nll(batch, self.is_k)
logs['nll'] = nll.sum(0)
logs.update(nll_logs)
if compute_fid:
# Compute FID
samples = self.generate_x(MB).data
# rescale to [0, 255]
# 0.5 * (x + 1)
samples = (255 * 0.5 * (samples + 1)).type(torch.uint8)
true_data = (255 * 0.5 * (x + 1)).type(torch.uint8)
if self.fid is None:
self.fid = torchmetrics.image.fid.FrechetInceptionDistance()
self.fid = self.fid.to(x.device)
# self.fid = self.fid.cuda(non_blocking=True)
if samples.shape[1] == 1:
samples = samples.repeat(1, 3, 1, 1)
true_data = true_data.repeat(1, 3, 1, 1)
self.fid.update(true_data, real=True)
self.fid.update(samples, real=False)
return logs
def estimate_nll(self, batch, K=100):
"""
Estimate NLL by importance sampling
:param X: mini-batch, (N, x_dim(s))
:param K: Samples per observation
:return: IS estimate for each point in X
"""
x = None
MB = batch[0].shape[0]
elbo = torch.zeros(K, MB)
logs = {'nll_z_L': 0,}
for k in range(K):
fwd_output = self.forward(batch, mode='test')
_, p_xz, z_sample, q_dist, p_dist = fwd_output
if x is None:
x = batch[0]
if isinstance(p_xz, Bernoulli):
x = torch.bernoulli(0.5 * (x + 1))
re = - p_xz.log_prob(x) # MB
# KL-divergence (L values)
kl, kl_logs = self.kl(fwd_output)
kl = kl.sum(1)
assert re.shape == kl.shape
elbo[k] = - re - kl
logs['nll_z_L'] += kl_logs['nll_z_L_prior'] * MB / K
ll = torch.logsumexp(elbo, 0) - np.log(K)
return -ll, logs
def ladder_reconstructions(self, encoder_s, im):
N_z = len(encoder_s)
n_sample = 5
# for im in range(4):
rows = []
for z_num in range(0, N_z, self.get_freq(N_z)):
q_curr = [s_e[im:im+1].repeat(n_sample, 1, 1, 1) for s_e in encoder_s.copy()]
# create [0, 0, .., q_{N-z_num}, ..., q_L]
for i in range(N_z - z_num - 1):
q_curr[i] = None
p_xz_params, _, _, z_samples = self.decoder(q_curr)
p_xz = self.likelihood(*p_xz_params)
x = self.get_x_from_px(p_xz)
rows.append(x)
return torchvision.utils.make_grid(torch.cat(rows, 0), nrow=n_sample,
normalize=True, scale_each=True)
def get_freq(self, N_z):
if N_z < 10:
return 1
elif N_z <= 20:
return 2
elif N_z <= 40:
return 4
else:
return 10
def val_pics(self, batch, fwd_output):
logs = {}
encoder_s = self.encode(batch)
n_sample = 5
freq = self.get_freq(len(encoder_s))
for im in range(4):
logs[f'ladder/h_{im}'] = wandb.Image(
self.ladder_reconstructions(encoder_s, im)
)
# sampling
for im in range(4):
z_L, p_xz_params_rows = self.decoder.forward_sample(N=n_sample, freq=freq)
if len(p_xz_params_rows) > 0:
x_s = [self.get_x_from_px(self.likelihood(*params)) for params in p_xz_params_rows]
z_L = nn.Upsample(size=x_s[0].shape[-1])(z_L)[:, :x_s[0].shape[1]]
x_s = [z_L] + x_s
logs[f'ladder_sample/h_{im}'] = wandb.Image(
torchvision.utils.make_grid(torch.cat(x_s, 0),
nrow=n_sample,
normalize=True,
scale_each=True))
logs.update(self.visualize_z_L_prior(batch, fwd_output))
return logs
def process_z_L_samples(self, z_L):
return z_L[:, :3]
def visualize_z_L_prior(self, batch, fwd_output):
logs = {}
s_dec = self.decoder.s_L.repeat(8, 1, 1, 1)
_, _, z_L_sample, _ = self.decoder.decoder_blocks[0](None, s_dec=s_dec, mode='val', t=1.0)
logs[f'z_L_prior/sample'] = wandb.Image(self.process_z_L_samples(z_L_sample).data.cpu())
if isinstance(self.decoder.z_L_prior, DiffusionPrior):
ddgm = self.decoder.z_L_prior
# compute loss for each step
z_L = fwd_output[2][0]
losses = []
for t in range(ddgm.T):
timestamps = torch.ones(z_L.shape[0], ).long().to(batch[0].device) * t
losses.append(ddgm._step_loss(z_L, timestamps).cpu())
losses = torch.stack(losses, 1)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(8, 6))
ax.errorbar(range(losses.shape[1]), losses.mean(0), yerr=losses.std(0),
label='Val. log_prob')
ax.plot(ddgm._ll_hist, label='Running mean')
ax.legend()
ax.grid(True)
logs[f'z_L_prior/ddgm_loss'] = wandb.Image(fig)
plt.close()
# plot 'reconstructions' for each step
freq = self.get_freq(ddgm.T)
q_samples = [self.process_z_L_samples(z_L).cpu()]
reconstruction = [self.process_z_L_samples(z_L).cpu()]
all_steps = range(ddgm.T)
if ddgm.use_noise_scale:
z_L = z_L * ddgm.noise_scale
# z_L = z_L / ddgm.noise_scale.abs().mean()
for t in all_steps:
timestamps = torch.ones(z_L.shape[0], ).long().to(z_L.device) * t
z_t = ddgm.q_sample(z_L, timestamps)
z_inv = z_t.clone()
if t % freq == 0:
for t_inv in all_steps[:t + 1][::-1]:
timestamps = torch.ones(z_L.shape[0], ).long().to(z_L.device) * t_inv
z_inv = ddgm.p_sample(z_inv, timestamps, temp=0.8)
if ddgm.use_noise_scale:
# z_inv = z_inv * ddgm.noise_scale.abs().mean()
z_inv = z_inv / ddgm.noise_scale
reconstruction.append(self.process_z_L_samples(z_inv).cpu())
if ddgm.use_noise_scale:
# z_t = z_t * ddgm.noise_scale.abs().mean()
z_t = z_t / ddgm.noise_scale
q_samples.append(
self.process_z_L_samples(z_t).cpu()
)
T = ddgm.T + 1
res = []
for i in range(4):
res += [q_samples[t][i:i + 1] for t in range(0, T, freq)]
zs_inv = torchvision.utils.make_grid(torch.cat(res, 0),
nrow=len(range(0, T, freq)),
normalize=True, scale_each=False)
logs[f'z_L_prior/ddgm_q_samples'] = wandb.Image(zs_inv)
timestamps = torch.ones(z_L.shape[0], ).long().to(z_L.device) * (ddgm.T - 1)
z_T = ddgm.q_sample(z_L, timestamps)
fig, ax = plt.subplots(ncols=5, figsize=(15, 3), sharey=False)
cmap = cm.get_cmap('coolwarm')
normalizer = Normalize(-1.5, 1.5)
im = cm.ScalarMappable(norm=normalizer, cmap=cmap)
for i in range(4):
ax[i].plot(z_T[i].reshape(-1).cpu())
n = z_T[i].reshape(-1).shape[0]
ax[i].hlines(y=z_T[i].reshape(-1).mean().cpu(), xmin=0, xmax=n)
ax[i].grid()
ax[-1].matshow(np.corrcoef(z_T.cpu().reshape(-1, n).t()), norm=normalizer,
cmap=cmap)
fig.colorbar(im, ax=ax);
logs[f'z_L_prior/z_T'] = wandb.Image(fig)
plt.close()
res = []
for i in range(4):
res += [reconstruction[t][i:i + 1] for t in range(len(reconstruction))]
ys_inv = torchvision.utils.make_grid(torch.cat(res, 0),
nrow=len(reconstruction),
normalize=True, scale_each=False)
logs[f'z_L_prior/ddgm_reconstructions'] = wandb.Image(ys_inv)
return logs
| 17,187 | 38.512644 | 119 | py |
dct_vae | dct_vae-main/model/decoder.py | import math
from typing import Optional, Union
import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
from hydra.utils import instantiate
from utils.vae_layers import DecoderResBlock
from datasets.dct import DCT, YCBCR_to_RGB
from utils.distribution import Normal, Delta
from model.ddgm import DiffusionPrior, DiffusionDCTPrior
from utils.thirdparty.unet import UNetModel
from model.plain_decoder import _Decoder, PlainDecoder
class _DecoderBlock(nn.Module):
"""
Single block of the Deep Hierarchical VAE decoder
Input: s_enc - features from the encoder, from the same level
s_dec - feature from the previous decoder level
Output: p(z_i | ... ) - prior for the current latent
q(z_i | x, ...) - variational posterior for the current latent
"""
def __init__(self,
in_channels: int,
ch_mult: float,
num_blocks_per_scale: int,
p_width: int,
out_channels: int,
z_width: int,
upsample: Union[int, None],
conv_block_params: dict,
var_mode: str,
softplus_beta: Union[float, None],
top_prior: Union[nn.Module, None] = None,
):
super().__init__()
self.__dict__.update(locals())
# how to model variance (in both q and p)
assert var_mode in ['log', 'softplus'], 'Unknown variance type'
self.z_up = nn.Conv2d(z_width, in_channels, 1, padding=0)
# Define backbone NN
self.resnet = nn.Sequential(*[
DecoderResBlock(in_channels,
int(in_channels * ch_mult),
in_channels if b_num + 1 < num_blocks_per_scale else out_channels,
stride=1,
mode='2x3',
use_res=True,
zero_last=False,
**conv_block_params)
for b_num in range(num_blocks_per_scale)
])
# Upsample output f the block (if required)
self.upsample = nn.Upsample(size=upsample, mode='nearest') \
if upsample is not None else nn.Identity()
# define NN to get parameters of the prior
self.top_prior = top_prior
self.init_p_net()
def init_p_net(self):
if self.top_prior is None:
self.s_to_p = DecoderResBlock(self.in_channels,
int(self.in_channels*self.ch_mult),
self.p_width,
stride=1,
use_res=False,
zero_last=True,
**self.conv_block_params)
def get_logvar(self, lv):
"""
Given the output of the NN, returns the log-variance of the distribution
:param lv: output of the NN for the var
:return:
"""
if self.var_mode == 'softplus':
sp = nn.Softplus(self.softplus_beta)
lv = torch.log(sp(lv))
return torch.clamp(lv, -5, 0)
def get_q_p_dist(self, s_enc, s_dec, mode):
raise NotImplementedError
def forward(self, s_enc, s_dec, mode, t=None):
"""
:param s_enc: [MB, z_width, scale, scale]
:param s_dec: (s_{i+1}) [MB, in_ch, sc, sc]
:param mode: train or test
:return: z sample q an p distributions, deterministic features (s_out) to pass to the next block
"""
p_dist, q_dist, s_dec = self.get_q_p_dist(s_enc, s_dec, mode)
assert mode in ['train', 'val', 'test']
# if mode == 'decode':
if s_enc is not None: # -> decoding
z = q_dist.sample(t=t)
else: # -> sampling
if self.top_prior is None:
z = p_dist.sample(t=t)
else:
N = s_dec.shape[0]
z = p_dist.sample(N, t=t)
s_dec = self.upsample(self.resnet(s_dec + self.z_up(z)))
if isinstance(p_dist, DiffusionPrior):
if mode == 'test':
p_dist = p_dist.eval_is_ll(z, is_k=2)
else:
p_dist = p_dist.log_prob(z, mode=mode)
return p_dist, q_dist, z, s_dec
class DecoderBlock(_DecoderBlock):
def __init__(self,
in_channels: int,
z_width: int,
ch_mult: float,
out_channels: int,
num_blocks_per_scale: int,
conv_block_params: dict,
upsample: Union[int, None],
var_mode: str,
softplus_beta: Union[float, None],
disconnect: bool=False,
top_prior: Union[nn.Module, None] = None,
):
"""
-------------- s_dec---------------
↓ | ↓
s_enc -→ q | p, h
↓
z ~ q if decoder else p
↓
z + s_dec + h
↓ (resnet)
s_out
Implements the decoder block from the vdvae paper
s_enc and s_dec are inputs from the previous blocks (encoder and decoder correspondingly)
"""
super(DecoderBlock, self).__init__(
in_channels=in_channels,
ch_mult=ch_mult,
num_blocks_per_scale=num_blocks_per_scale,
p_width=2 * z_width + in_channels,
out_channels=out_channels,
z_width=z_width,
upsample=upsample,
conv_block_params=conv_block_params,
var_mode=var_mode,
softplus_beta=softplus_beta,
top_prior=top_prior,
)
# if disconnect is True, q will only depend on the s_enc
self.disconnect = disconnect
self.s_to_q = DecoderResBlock(in_channels,
max(int(in_channels*ch_mult), 1),
2*z_width,
stride=1,
mode='2x3',
use_res=False,
zero_last=False,
**conv_block_params)
self.init_q()
def init_q(self):
i = -2
if isinstance(self.s_to_q.net[-1], nn.Conv2d):
i = -1
nn.init.uniform_(self.s_to_q.net[i].weight, -1, 1)
def get_q_p_dist(self, s_enc, s_dec, mode):
if self.top_prior is None:
# get parameters of the prior
p_out = self.s_to_p(s_dec)
p_params, h = p_out[:, :2 * self.z_width], p_out[:, 2 * self.z_width:]
p_mu, p_logvar = torch.chunk(p_params, 2, dim=1)
p_logvar = self.get_logvar(p_logvar)
# if t is not None:
# p_logvar += torch.ones_like(p_logvar) * math.log(t)
p_dist = Normal(p_mu, p_logvar)
else:
p_dist = self.top_prior
if isinstance(p_dist, Normal):
with torch.no_grad():
p_dist.log_var.clamp_(-4, 0)
# get parameters of the variational posterior
if s_enc is not None:
if self.disconnect:
s = s_enc
else:
s = s_enc + s_dec
q_mu, q_logvar = torch.chunk(self.s_to_q(s), 2, dim=1)
if isinstance(p_dist, DiffusionPrior):
q_mu = torch.tanh(q_mu)
q_logvar = self.get_logvar(q_logvar)
q_dist = Normal(q_mu, q_logvar)
else:
q_dist = None
# add prior features to the output
if self.top_prior is None:
s_dec = s_dec + h
return p_dist, q_dist, s_dec
class LadderDecoder(_Decoder):
def __init__(self,
num_ch: int,
scale_ch_mult: float,
block_ch_mult: float,
data_ch: int,
num_postprocess_blocks: int,
likelihood: str,
num_mix: int,
data_dim: int,
weight_norm: bool,
batch_norm: bool,
latent_scales: list,
latent_width: list,
num_blocks_per_scale: int,
activation: str,
arch_mode: str,
var_mode: str,
softplus_beta: int,
z_L_prior: dict,
z_L_bits: Union[int, None],
disconnect: bool,
condition_on_last: bool = False,
start_scale_at_x: bool = False,
):
super(LadderDecoder, self).__init__()
self.__dict__.update(locals())
assert len(latent_width) == len(latent_scales)
self.conv_block_params = {
'batch_norm': batch_norm,
'weight_norm': weight_norm,
'activation': self.get_activation(activation),
}
self._decoder_block = lambda args: {
# 'delta': DecoderBlockDelta,
'separate': DecoderBlock
}[arch_mode](**args,
ch_mult=block_ch_mult,
num_blocks_per_scale=num_blocks_per_scale,
conv_block_params=self.conv_block_params,
var_mode=var_mode,
softplus_beta=softplus_beta,
disconnect=disconnect,
)
self.num_scales = len(latent_scales)
self.num_latents = sum(latent_scales)
self.image_size = [data_ch, data_dim, data_dim]
self.num_ch = [num_ch]
for i in range(self.num_scales-1):
self.num_ch += [int(self.num_ch[-1] * scale_ch_mult)]
# reverse the order of latents: from top (z_L) to bottom (z_1)
self.num_ch.reverse()
self.latent_scales.reverse()
self.latent_width.reverse()
print('Decoder channels', self.num_ch)
self.num_p_param = _Decoder.get_num_lik_params(likelihood)
# create dummy s_L input
L_dim = data_dim // (2 ** self.num_scales)
if start_scale_at_x:
L_dim = (2 * data_dim) // (2 ** self.num_scales)
self.s_L = nn.Parameter(torch.zeros(1, self.num_ch[0], L_dim, L_dim),
requires_grad=False)
# init the NNs
if isinstance(z_L_prior, Normal):
z_L_prior = None
self.decoder_blocks = self.init_decoder_blocks(z_L_prior)
self.post_process = self.init_post_process()
if self.disconnect or self.condition_on_last:
# if we use "disconnected" decoder we need conditioning on z_L for each latent
# init the NN which will reshape z_L to the size of s_enc (they will be summed up)
self.z_L_up = self.init_cond_blocks(self.latent_width[0])
self.init()
def forward(self,
encoder_s: list,
N: Optional[int] = None,
t: Optional[float] = None,
mode: str = 'train',
):
"""
Decoder of the ladder VAE
:param encoder_s: list of deterministic features from encoder
in the bottom-up order [q_1, ..., q_L]
:param N: number of samples from p(z_L)
:param t: temperature
:return: tuple(p_xz_parameters, p_dist, q_dist, z_samples):
parameters of the conditional generative distribution p(x|{z}),
list of prior distributions
list of posterior distributions (or None)
list of latent variables z
"""
encoder_s.reverse() # [s_enc_L, s_enc_{L-1}, ..., s_enc_1]
# init s_dec
if encoder_s[0] is not None: # -> reconstruction
N = encoder_s[0].shape[0]
s_dec = self.s_L.repeat(N, 1, 1, 1)
p_dist = []
q_dist = []
z_list = []
scale, s = s_dec.shape[-1], 0
N_blocks = len(self.decoder_blocks)
for i, dec_block in enumerate(self.decoder_blocks):
s_enc = encoder_s[i]
if i > 0:
if self.disconnect and s_enc is not None:
s_enc = s_enc + self.z_L_up[i](z_L)
# print('s_enc', s_enc.shape, 's_dec', s_dec.shape)
p, q, z, s_dec = dec_block(s_enc, s_dec, mode, t=t)
p_dist.append(p)
q_dist.append(q)
z_list.append(z)
if i == 0:
z_L = self.z_L_post_proc(z_list[0])
if self.condition_on_last:
if scale != s_dec.shape[-1] and i < (N_blocks - 1):
s_dec = s_dec + self.z_L_up[s](z_L)
s += 1
scale = s_dec.shape[-1]
if self.disconnect:
s_dec = s_dec + self.z_L_up[-1](z_L)
p_xz_params = self.get_p_xz_params(self.post_process(s_dec))
return p_xz_params, p_dist, q_dist, z_list
def forward_sample(self, N: int = 1, freq: int =1):
# init s_dec
s_dec = self.s_L
s_saved = [s_dec]
s_ind = [0]
z_L = None
scale, s = self.s_L.shape[-1], 0
N_blocks = len(self.decoder_blocks)
for i, dec_block in enumerate(self.decoder_blocks):
p, q, z, s_dec = dec_block(None, s_dec, 'train')
if i == 0:
z_L = self.z_L_post_proc(z)
if self.condition_on_last:
if scale != s_dec.shape[-1] and i < (N_blocks - 1):
s_dec = s_dec + self.z_L_up[s](z_L)
s += 1
scale = s_dec.shape[-1]
s_ind.append(s)
s_saved.append(s_dec)
rows = []
z_L = z_L.data.repeat(N, 1, 1, 1)
for n_fixed in range(1, len(s_saved), freq):
s_dec = s_saved[n_fixed].repeat(N, 1, 1, 1)
scale = s_dec.shape[-1]
s = s_ind[n_fixed]
for j in range(n_fixed, len(self.decoder_blocks)):
p, q, z, s_dec = self.decoder_blocks[j](None, s_dec, 'train')
if self.condition_on_last:
if scale != s_dec.shape[-1] and i < (N_blocks - 1):
s_dec = s_dec + self.z_L_up[s](z_L)
s += 1
scale = s_dec.shape[-1]
if self.disconnect:
s_dec = s_dec + self.z_L_up[-1](z_L)
p_xz_params = self.get_p_xz_params(self.post_process(s_dec))
rows.append(p_xz_params)
return z_L, rows
def init_cond_blocks(self, cond_width):
z_L_up = nn.ModuleList()
z_L_dim = self.s_L.shape[-1]
scale_sizes = [z_L_dim * (2 ** i) for i in range(self.num_scales+1)]
# for s_num in range(self.num_scales):
for s_num, (s, w) in enumerate(zip(self.latent_scales, self.latent_width)):
if s > 0 and s_num > 0:
# reshape to the latent's size and change num channels
z_L_up.append(nn.Sequential(
nn.Upsample(size=scale_sizes[s_num], mode='nearest'),
nn.Conv2d(cond_width, self.num_ch[s_num], kernel_size=1, padding=0),
))
return z_L_up
def z_L_post_proc(self, z_L):
return z_L
def init_decoder_blocks(self, top_prior) -> nn.ModuleList:
decoder_backbone = nn.ModuleList()
# S_L = self.image_size[1] // (2 ** self.num_scales)
z_L_dim = self.s_L.shape[-1]
scale_sizes = [z_L_dim * (2 ** i) for i in range(1, self.num_scales + 1)]
scale_sizes[-1] = self.image_size[1]
for i in range(self.num_scales)[::-1]:
if self.latent_scales[i] == 0:
scale_sizes[i-1] = scale_sizes[i]
for s_num, (s, w) in enumerate(zip(self.latent_scales, self.latent_width)):
ss = scale_sizes[s_num-1] if s_num > 0 else z_L_dim
print(f'Scale {self.num_scales-s_num}, {s} latents, out shape: {int(ss)}')
for latent in range(s):
out_ch = self.num_ch[s_num]
is_last = latent+1 == s
if is_last and s_num+1 < len(self.latent_scales):
out_ch = self.num_ch[s_num+1]
block_params = {
'in_channels': self.num_ch[s_num],
'z_width': w,
'out_channels': out_ch,
'upsample': scale_sizes[s_num] if is_last else None,
'top_prior': top_prior if (s_num + latent) == 0 else None,
}
decoder_backbone.append(self._decoder_block(block_params))
# stable init for the resnet
res_nn = decoder_backbone[-1].resnet[-1]
res_nn.net[res_nn.last_conv_id].weight.data *= math.sqrt(1. / self.num_latents)
decoder_backbone[-1].z_up.weight.data *= math.sqrt(1. / self.num_latents)
return decoder_backbone
def init_post_process(self) -> nn.Sequential:
act_out = nn.Sigmoid() if self.num_p_param == 1 else nn.Identity()
post_net = []
for i in range(self.num_postprocess_blocks):
is_last = i+1 == self.num_postprocess_blocks
post_net.append(
DecoderResBlock(self.num_ch[-1],
int(self.num_ch[-1]*self.block_ch_mult),
self.num_ch[-1],
stride=1,
mode='2x3',
use_res=True,
zero_last=True if is_last else False,
**self.conv_block_params)
)
out_ch = self.num_p_param * self.image_size[0]
if self.likelihood == 'logistic_mixture':
out_ch = self.num_mix * (out_ch + 1)
post_net += [
nn.Conv2d(self.num_ch[-1], out_ch, kernel_size=3, padding=1),
act_out
]
post_net[-2].bias.data *= 0.
return nn.Sequential(*post_net)
def get_p_xz_params(self, out_feature) -> tuple:
if self.likelihood == 'logistic_mixture':
log_probs, ll = out_feature[:, :self.num_mix], out_feature[:, self.num_mix:]
ll = ll.reshape(-1, self.image_size[0], self.num_mix*self.num_p_param, self.image_size[1], self.image_size[2])
return (log_probs, ) + torch.chunk(ll, self.num_p_param, dim=2)
else:
return torch.chunk(out_feature, self.num_p_param, dim=1)
def quantize(x, n_bits=6):
x = (x + 1) / 2. # [-1, 1] -> [0, 1]
x[x >= 1.] = 0.999
noise = 0.25 * (torch.rand_like(x)*2 - 1)
# noise = torch.zeros_like(x)
x = torch.floor(x * 2. ** n_bits + noise) + 0.5 #[0, 1] -> [0, 2^bits]
x = x / 2. ** n_bits
x = x * 2. - 1 # back to [-1, 1]
return x
def dequantize(x, n_bits=6):
eps = (torch.rand_like(x) * 2 - 1) / (2 ** n_bits)
return x + eps | 19,209 | 39.527426 | 122 | py |
dct_vae | dct_vae-main/model/plain_decoder.py | import math
from typing import Optional, Union
import torch.nn as nn
import torch
from utils.vae_layers import DecoderResBlock
class _Decoder(nn.Module):
def __init__(self):
super(_Decoder, self).__init__()
def init(self) -> None:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
def get_activation(self, name):
return {
'relu': nn.ReLU(),
'silu': nn.SiLU(),
'elu': nn.ELU(),
'gelu': nn.GELU()
}[name]
@staticmethod
def get_num_lik_params(likelihood: str) -> int:
return {
'bernoulli': 1,
'gaussian': 2,
'gaussian_zero': 1,
'logistic': 2,
'logistic_mixture': 2,
}[likelihood]
class PlainDecoder(_Decoder):
def __init__(self,
z_dim: int,
num_ch: int,
data_ch: int,
data_dim: int,
likelihood: str,
weight_norm: bool,
batch_norm: bool,
**kwargs,
):
super(PlainDecoder, self).__init__()
self.num_p_param = _Decoder.get_num_lik_params(likelihood)
self.data_ch = data_ch
if self.num_p_param == 1:
act_out = nn.Sigmoid()
else:
act_out = nn.Identity()
conv_param = self.get_conv_params(data_dim)
n_layers = len(conv_param['strides'])
channels = num_ch * (2 ** n_layers)
layers = [
nn.Upsample(conv_param['upsample'], mode='nearest'),
nn.Conv2d(z_dim, channels, kernel_size=3, padding=1)
]
for i in range(n_layers+1):
stride = conv_param['strides'][i] if i < n_layers else 1
out_ch = int(channels/2) if stride != 1 else channels
params = {
'in_channels': channels,
'hid_channels': channels*6,
'out_channels': out_ch,
'stride': stride,
'activation': nn.SiLU(),
'weight_norm': weight_norm,
'batch_norm': batch_norm
}
channels = out_ch
layers.append(
DecoderResBlock(**params)
)
layers += [
nn.ELU(),
nn.Conv2d(channels, self.num_p_param*self.data_ch, 3, padding=1),
act_out
]
self.p_x = nn.Sequential(*layers)
self.init()
def forward(self, z):
z = z.reshape(z.shape[0], -1, 1, 1)
pxz_param = self.p_x(z)
return torch.chunk(pxz_param, self.num_p_param, dim=1)
@staticmethod
def get_conv_params(data_dim):
return {
28: { # 1 - 7 - 7 - 14 - 14 - 28
'upsample': 7,
'strides': [1, -1, 1, -1],
},
32: { # 1 - 4 - 8 - 8 - 16 - 16 - 32
'upsample': 4,
'strides': [-1, 1, -1, 1, -1],
},
64: { # 1 - 4 - 8 - 8 - 16 - 16 - 32 - 32 - 64
'upsample': 4,
'strides': [-1, 1, -1, 1, -1, 1, -1],
}
}[data_dim]
| 3,274 | 29.045872 | 77 | py |
dct_vae | dct_vae-main/model/ddgm.py | import torch
import torch.nn as nn
import numpy as np
import math
from utils.distribution import Normal
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = torch.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
class DiffusionPrior(nn.Module):
def __init__(self,
model,
T,
beta_schedule,
t_sample='uniform',
parametrization='x',
num_bits=5,
ll='discretized_gaussian',
use_noise_scale=False,
):
super(DiffusionPrior, self).__init__()
# A NeuralNet (unet), which takes as input z and the timestamp
self.parametrization = parametrization
self.num_bits = num_bits
self.ll = ll
self.use_noise_scale = use_noise_scale
if self.use_noise_scale:
shape = [model.in_channels, model.image_size, model.image_size]
self.noise_scale = nn.Parameter(torch.zeros(shape), requires_grad=True)
assert parametrization in ['x', 'eps', 'x_var'], \
f'unknown parametrization {parametrization}. Expect to be x0 or eps'
self.model = model
self.device = None
# num stesps
self.T = T
# how to sample t during training: uniform or loss-aware
self.t_sample = t_sample
self._ll_hist = -1.*torch.ones(self.T)
# create beta schedule
self.beta_schedule = beta_schedule
self.beta = self.get_beta_schedule(beta_schedule)
self.alphas_cumprod = np.cumprod(1.0 - self.beta, axis=0)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
self.beta * (1.0 - np.append(1.0, self.alphas_cumprod[:-1])) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
self.beta * np.sqrt(np.append(1.0, self.alphas_cumprod[:-1])) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - np.append(1.0, self.alphas_cumprod[:-1]))
* np.sqrt(1 - self.beta)
/ (1.0 - self.alphas_cumprod)
)
# variance of the p distribution (fixed)
# self.p_log_variance = self.posterior_log_variance_clipped
def get_beta_schedule(self, name):
s = 0.008
if name == "cosine":
betas = []
max_beta = 0.999
fn = lambda t: math.cos((t + s) / (1 + s) * math.pi / 2) ** 2
for i in range(self.T):
t1 = i / self.T
t2 = (i + 1) / self.T
betas.append(min(1 - fn(t2) / fn(t1), max_beta))
return np.array(betas)
elif name == 'linear':
if self.T < 21:
scale = 100 / self.T
multiply = 0.01
else:
scale = 1000 / self.T
multiply = 0.001
beta_start = scale * multiply
beta_end = scale * 0.02
return np.linspace(beta_start, beta_end, self.T, dtype=np.float64)
else:
raise NotImplementedError(f"unknown beta schedule: {name}")
def forward(self, x):
pass
def sample(self, N, t=1.):
'''
t stand for temperature.
'''
shape = [N, self.model.in_channels, self.model.image_size, self.model.image_size]
img = torch.randn(*shape, device=self.device)
indices = list(range(self.T))[::-1]
for i in indices:
t_step = torch.tensor([i] * shape[0], device=self.device)
with torch.no_grad():
img = self.p_sample(img, t_step, temp=t)
if self.use_noise_scale:
img = img / self.noise_scale
return img
def q_sample(self, z_0, t):
noise = torch.randn_like(z_0)
out = (
_extract_into_tensor(np.sqrt(self.alphas_cumprod), t, z_0.shape) * z_0
+ _extract_into_tensor(np.sqrt(1.0 - self.alphas_cumprod), t, z_0.shape)
* noise
)
return out
def p_sample(self, z_t, t, temp=1.):
'''
Sample from p(z_{t-1}|z_t)
'''
p_dist = self.get_p(z_t, t)
p_sample = p_dist.sample(t=temp)
# no sampling for the step 0
p_sample[t == 0] = p_dist.mu[t==0]
return p_sample
def get_p(self, z_t, t):
if self.parametrization == 'x':
p_mean, p_logvar_coef = torch.chunk(self.model(z_t, t), 2, dim=1)
if self.num_bits is None:
p_logvar_coef = torch.clamp(p_logvar_coef, -5, 0)
p_dist = Normal(p_mean, p_logvar_coef)
elif self.parametrization == 'x_var':
p_mean, p_logvar_coef = torch.chunk(self.model(z_t, t), 2, dim=1)
# p_logvar_coef = torch.tanh(p_logvar_coef)
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, z_t.shape
)
p_dist = Normal(p_mean, min_log)
elif self.parametrization == 'eps':
eps = self.model(z_t, t)
z0_pred = self._predict_z0_from_eps(z_t, t, eps)
p_dist = self.get_q_posterior(z0_pred, z_t, t)
return p_dist
def get_q_posterior(self, z_0, z_t, t):
q_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, z_t.shape) * z_0
+ _extract_into_tensor(self.posterior_mean_coef2, t, z_t.shape) * z_t
)
q_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, z_t.shape
)
return Normal(q_mean, q_log_variance_clipped)
def sample_t(self, batch_size, mode):
if self.t_sample == 'uniform' or mode == 'val':
indices_np = np.random.choice(range(self.T), size=(batch_size,))
elif self.t_sample == 'loss_aware':
# sample with the weights proportional to the loss
loss = -1 * self._ll_hist.numpy()
weights = loss/np.sum(loss)
indices_np = np.random.choice(range(self.T), size=(batch_size,), p=weights)
else:
NotImplementedError(f"unknown t sampling schedule: {self.t_sample}")
return indices_np
def eval_is_ll(self, z_0, is_k=1):
"""
Importance sampling estimation of the NLL
:param z_0: batch of data points
:param is_k: number of importance samples
:return:
"""
elbo = torch.zeros(is_k, z_0.shape[0], device=z_0.device)
for k in range(is_k):
elbo[k] = self.log_prob(z_0, mode='test')
ll = torch.logsumexp(elbo, 0) - np.log(is_k)
return ll
def log_prob(self, z_0, mode='train', reduce_dim=True):
"""
:param z_0: (MB, ch, h, w)
:param mode: 'train', 'test' or 'val'
:return:
"""
batch_size = z_0.shape[0]
if self.device is None:
self.device = z_0.device
# loop over t or sample t uniformly
if mode in ['test', 'val']:
t_to_loop = [torch.ones(batch_size, ).long().to(z_0.device)*i for i in range(self.T)]
else:
indices_np = self.sample_t(batch_size, mode=mode)
t_to_loop = [
torch.from_numpy(indices_np).long().to(z_0.device)
]
log_lik = 0.
for t in t_to_loop:
log_lik += self._step_loss(z_0, t, reduce_dim=reduce_dim)
if mode == 'val' and self.t_sample == 'loss_aware':
t = t_to_loop[0]
with torch.no_grad():
# update running average of the loss for the sampler
for i in range(self.T):
step_ll = torch.clamp(log_lik[t == i], max=0)
if len(step_ll) > 0:
gamma = 0.99
self._ll_hist[i] = gamma*self._ll_hist[i] + (1 - gamma)*step_ll.mean().cpu()
# multiply by the number of steps if sampling is used
log_lik *= self.T / len(t_to_loop)
return log_lik
def _step_loss(self, z_0, t, reduce_dim=True):
batch_size = z_0.shape[0]
# sample z_t from q(z_t | z_0)
if self.use_noise_scale:
z_0 = z_0 * self.noise_scale
# z_0 = z_0 / self.noise_scale.abs().mean()
z_t = self.q_sample(z_0, t)
# get p(z_{t-1} | z_t) params
p_dist = self.get_p(z_t, t)
# get q posterior params
q_posterior_dist = self.get_q_posterior(z_0, z_t, t)
if self.ll == 'discretized_gaussian':
rec_ll = discretized_gaussian_log_likelihood(z_0, means=p_dist.mu,
log_scales=0.5 * p_dist.log_var,
n_bits=self.num_bits)
rec_ll = rec_ll.reshape(batch_size, -1)
if reduce_dim:
rec_ll = rec_ll.sum(1)
elif 'vdm':
vocab_size = 2 ** self.num_bits
exp_half_g_0 = _extract_into_tensor(np.sqrt( (1 - self.alphas_cumprod) / self.alphas_cumprod), torch.zeros_like(t), z_0.shape)
# var_0 = torch.sigmoid(g_0)
eps_0 = torch.rand_like(z_0)
z_noisy_rescaled = z_0 + exp_half_g_0 * eps_0 # = z_0/sqrt(1-var)
x = (torch.clamp(z_0, -1., 1.) + 1.)/ 2. * (vocab_size - 1) # covert to uint
# create x OHE
x = x.round().long()
x_onehot = torch.nn.functional.one_hot(x, num_classes=vocab_size)
# decoding
x_vals = torch.arange(0, vocab_size)[:, None]
x_vals = x_vals.repeat(1, self.model.in_channels)
x_vals = 2 * ((x_vals + .5) / vocab_size) - 1
x_vals = x_vals.transpose(1, 0)[None, None, None, :, :]
x_vals = x_vals.swapaxes(2, 3).swapaxes(1, 2)
x_vals = x_vals.to(z_0.device)
inv_stdev = 1 / exp_half_g_0[..., None] # torch.exp(-0.5 * g_0[..., None])
logits = -0.5 * torch.square((z_noisy_rescaled[..., None] - x_vals) * inv_stdev)
# calcualte log prob
logprobs = torch.nn.functional.log_softmax(logits, dim=-1)
rec_ll = torch.sum(x_onehot * logprobs, axis=-1).reshape(batch_size, -1)
if reduce_dim:
rec_ll = rec_ll.sum(1)
kl = q_posterior_dist.kl(p_dist).reshape(batch_size, -1)
if reduce_dim:
kl = kl.sum(1)
out = -1 * kl
out[t == 0] = rec_ll[t==0]
return out
def _predict_z0_from_eps(self, z_t, t, eps):
return (
_extract_into_tensor( np.sqrt(1.0 / self.alphas_cumprod), t, z_t.shape) * z_t
- _extract_into_tensor(np.sqrt(1.0 / self.alphas_cumprod - 1), t, z_t.shape) * eps
)
class DiffusionDCTPrior(DiffusionPrior):
def __init__(self,
model,
T,
beta_schedule,
dct_scale,
t_sample='uniform',
parametrization='x',
num_bits=5,
):
super(DiffusionDCTPrior, self).__init__(model, T, beta_schedule, t_sample, parametrization, num_bits)
"""
dct_scale - tensor of the size [ch, h, w] which was used to scale DCT tp the range [-1, 1]. Will be used to scale gaussina noise accordingly
"""
self.dct_scale = dct_scale
self.init_schedules()
def get_beta_schedule(self, name, s=0.008):
if name == "cosine":
betas = []
max_beta = 0.999
min_beta = 0.001
fn = lambda t: np.cos((t + s) / (1 + s) * math.pi / 2) ** 2
for i in range(self.T):
t1 = i / self.T
t2 = (i + 1) / self.T
betas.append(np.clip(1 - fn(t2) / fn(t1), min_beta, max_beta))
return np.stack(betas)
else:
raise NotImplementedError(f"unknown beta schedule: {name}")
def init_schedules(self):
self.BETAS_mu = self.get_beta_schedule(self.beta_schedule)
self.alphas_cumprod_mu = np.cumprod(1.0 - self.BETAS_mu, axis=0)
# beta_scale = self.dct_scale / self.dct_scale.min() #(self.dct_scale ** 0.5)
# beta_scale = (self.dct_scale ** 0.5)
# self.BETAS_sigma = self.BETAS_mu[:, None, None, None] / beta_scale
# self.BETAS_sigma[-1] = self.BETAS_sigma[-1] * beta_scale #self.BETAS_mu[-1] #torch.ones_like(self.BETAS_sigma[-1]) * 0.9
s = - 0.2 * torch.ones_like(self.dct_scale)
self.BETAS_sigma = self.get_beta_schedule(self.beta_schedule, s)
self.BETAS_sigma[0] = self.BETAS_mu[0]
self.BETAS_sigma = torch.from_numpy(self.BETAS_sigma).float()
self.alphas_cumprod_sigma = np.cumprod(1.0 - self.BETAS_sigma, axis=0)
alphas_cumprod_sigma_prev = torch.cat(
[torch.ones((1,) + self.dct_scale.shape), self.alphas_cumprod_sigma[:-1]])
alpha_mu = torch.from_numpy(1.0 - self.BETAS_mu)[:, None, None, None]
denominator = alpha_mu * (1.0 - alphas_cumprod_sigma_prev) + self.BETAS_sigma
self.posterior_variance = self.BETAS_sigma * (
1.0 - alphas_cumprod_sigma_prev) / denominator
alphas_cumprod_mu_prev = torch.cat(
[torch.ones(1),
torch.from_numpy(self.alphas_cumprod_mu[:-1])])[:, None, None, None]
self.posterior_mean_coef1 = torch.sqrt(
alphas_cumprod_mu_prev) * self.BETAS_sigma / denominator
self.posterior_mean_coef2 = (1.0 - alphas_cumprod_sigma_prev) * alpha_mu.sqrt() / denominator
# def sample(self, N, t=None):
# '''
# t stand for temperature. Is not used.
# '''
# shape = [N, self.model.in_channels, self.model.image_size, self.model.image_size]
# std_coef = torch.sqrt(1.0 - self.alphas_cumprod_sigma)[-1].float().to(self.device)
# img = torch.randn(*shape, device=self.device) * std_coef
# indices = list(range(self.T))[::-1]
# for i in indices:
# t = torch.tensor([i] * shape[0], device=self.device)
# with torch.no_grad():
# img = self.p_sample(img, t)
# return img
def q_sample(self, z_0, t):
noise = torch.randn_like(z_0)
mu_coef = _extract_into_tensor(np.sqrt(self.alphas_cumprod_mu), t, z_0.shape)
std_coef = torch.sqrt(1.0 - self.alphas_cumprod_sigma)[t].float().to(z_0.device)
return mu_coef * z_0 + std_coef * noise
def get_q_posterior(self, z_0, z_t, t):
q_mean = self.posterior_mean_coef1[t].to(z_0.device) * z_0 + \
self.posterior_mean_coef2[t].to(z_0.device) * z_t
q_log_variance = torch.log(self.posterior_variance[t]).float().to(z_0.device)
return Normal(q_mean.float(), q_log_variance)
def get_p(self, z_t, t):
if self.parametrization == 'x':
p_mean, p_logvar_coef = torch.chunk(self.model(z_t, t), 2, dim=1)
p_dist = Normal(p_mean, p_logvar_coef)
elif self.parametrization == 'x_var':
p_mean, p_logvar_coef = torch.chunk(self.model(z_t, t), 2, dim=1)
min_log = torch.log(self.posterior_variance[t]).float().to(z_t.device)
p_dist = Normal(p_mean, min_log)
elif self.parametrization == 'eps':
eps = self.model(z_t, t)
z0_pred = self._predict_z0_from_eps(z_t, t, eps)
p_dist = self.get_q_posterior(z0_pred, z_t, t)
return p_dist
def _predict_z0_from_eps(self, z_t, t, eps):
eps_coef = torch.sqrt(1.0 - self.alphas_cumprod_sigma)[t].float().to(z_t.device)
coef = _extract_into_tensor(1.0 / np.sqrt(self.alphas_cumprod_mu), t, z_t.shape)
return coef * (z_t - eps_coef * eps)
def discretized_gaussian_log_likelihood(x, *, means, log_scales, n_bits = 5):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities
"""
bins = 2 ** n_bits - 1
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / bins )
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / bins)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = torch.log(cdf_plus.clamp(min=1e-10))
log_one_minus_cdf_min = torch.log((1.0 - cdf_min).clamp(min=1e-10))
log_cdf_delta = torch.log((cdf_plus - cdf_min).clamp(min=1e-10))
# print(x.min(), x.max())
# print(torch.sum(x <= -1. + 1./255.) / torch.sum(x >= -10.), log_cdf_plus.mean().item())
# print(torch.sum(x >= 1. - 1. / 255.) / torch.sum(x >= -10.), log_one_minus_cdf_min.mean().item())
# print(log_cdf_delta.mean().item())
# breakpoint()
# print(inv_stdv.mean().item(), centered_x.abs().mean().item())
log_probs = torch.where(
x <= -1. + 1./bins,
log_cdf_plus,
torch.where(x >= 1. - 1./bins,
log_one_minus_cdf_min,
log_cdf_delta),
)
assert log_probs.shape == x.shape
return log_probs
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * torch.pow(x, 3))))
| 18,457 | 40.478652 | 148 | py |
ATISE | ATISE-master/model.py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 13:27:48 2019
@author: 86187
"""
import torch
import numpy as np
import torch.nn as nn
from torch.nn.init import xavier_normal_
from torch.nn import functional as F
from torch.autograd import Variable
from numpy.random import RandomState
class TeRo(nn.Module):
def __init__(self, kg, embedding_dim, batch_size, learning_rate, L, gran, gamma, n_day, gpu=True):
super(TeRo, self).__init__()
self.gpu = gpu
self.kg = kg
self.embedding_dim = embedding_dim
self.batch_size = batch_size
self.learning_rate = learning_rate
self.gamma = gamma
self.n_day = n_day
self.gran = gran
self.L = L
# Nets
self.emb_E_real = torch.nn.Embedding(self.kg.n_entity, self.embedding_dim, padding_idx=0)
self.emb_E_img = torch.nn.Embedding(self.kg.n_entity, self.embedding_dim, padding_idx=0)
self.emb_R_real = torch.nn.Embedding(self.kg.n_relation*2, self.embedding_dim, padding_idx=0)
self.emb_R_img = torch.nn.Embedding(self.kg.n_relation*2, self.embedding_dim, padding_idx=0)
self.emb_Time = torch.nn.Embedding(n_day, self.embedding_dim, padding_idx=0)
# Initialization
r = 6 / np.sqrt(self.embedding_dim)
self.emb_E_real.weight.data.uniform_(-r, r)
self.emb_E_img.weight.data.uniform_(-r, r)
self.emb_R_real.weight.data.uniform_(-r, r)
self.emb_R_img.weight.data.uniform_(-r, r)
self.emb_Time.weight.data.uniform_(-r, r)
# self.emb_T_img.weight.data.uniform_(-r, r)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
if self.gpu:
self.cuda()
def forward(self, X):
h_i, t_i, r_i, d_i = X[:, 0].astype(np.int64), X[:, 1].astype(np.int64), X[:, 2].astype(np.int64), X[:, 3].astype(np.int64)//self.gran
if self.gpu:
h_i = Variable(torch.from_numpy(h_i).cuda())
t_i = Variable(torch.from_numpy(t_i).cuda())
r_i = Variable(torch.from_numpy(r_i).cuda())
d_i = Variable(torch.from_numpy(d_i).cuda())
else:
h_i = Variable(torch.from_numpy(h_i))
t_i = Variable(torch.from_numpy(t_i))
r_i = Variable(torch.from_numpy(r_i))
d_i = Variable(torch.from_numpy(d_i))
pi = 3.14159265358979323846
d_img = torch.sin(self.emb_Time(d_i).view(-1, self.embedding_dim))#/(6 / np.sqrt(self.embedding_dim)/pi))
d_real = torch.cos(
self.emb_Time(d_i).view(-1, self.embedding_dim))#/(6 / np.sqrt(self.embedding_dim)/pi))
h_real = self.emb_E_real(h_i).view(-1, self.embedding_dim) *d_real-\
self.emb_E_img(h_i).view(-1,self.embedding_dim) *d_img
t_real = self.emb_E_real(t_i).view(-1, self.embedding_dim) *d_real-\
self.emb_E_img(t_i).view(-1,self.embedding_dim)*d_img
r_real = self.emb_R_real(r_i).view(-1, self.embedding_dim)
h_img = self.emb_E_real(h_i).view(-1, self.embedding_dim) *d_img+\
self.emb_E_img(h_i).view(-1,self.embedding_dim) *d_real
t_img = self.emb_E_real(t_i).view(-1, self.embedding_dim) *d_img+\
self.emb_E_img(t_i).view(-1,self.embedding_dim) *d_real
r_img = self.emb_R_img(r_i).view(-1, self.embedding_dim)
if self.L == 'L1':
out_real = torch.sum(torch.abs(h_real + r_real - t_real), 1)
out_img = torch.sum(torch.abs(h_img + r_img + t_img), 1)
out = out_real + out_img
else:
out_real = torch.sum((h_real + r_real + d_i - t_real) ** 2, 1)
out_img = torch.sum((h_img + r_img + d_i + t_real) ** 2, 1)
out = torch.sqrt(out_img + out_real)
return out
def normalize_embeddings(self):
self.emb_E_real.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_E_img.weight.data.renorm_(p=2, dim=0, maxnorm=1)
def log_rank_loss(self, y_pos, y_neg, temp=0):
M = y_pos.size(0)
N = y_neg.size(0)
y_pos = self.gamma-y_pos
y_neg = self.gamma-y_neg
C = int(N / M)
y_neg = y_neg.view(C, -1).transpose(0, 1)
p = F.softmax(temp * y_neg)
loss_pos = torch.sum(F.softplus(-1 * y_pos))
loss_neg = torch.sum(p * F.softplus(y_neg))
loss = (loss_pos + loss_neg) / 2 / M
if self.gpu:
loss = loss.cuda()
return loss
def rank_loss(self, y_pos, y_neg):
M = y_pos.size(0)
N = y_neg.size(0)
C = int(N / M)
y_pos = y_pos.repeat(C)
if self.gpu:
target = Variable(torch.from_numpy(-np.ones(N, dtype=np.float32))).cuda()
else:
target = Variable(torch.from_numpy(-np.ones(N, dtype=np.float32))).cpu()
loss = nn.MarginRankingLoss(margin=self.gamma)
loss = loss(y_pos, y_neg, target)
return loss
def rank_left(self, X, facts, kg, timedisc, rev_set=0):
rank = []
with torch.no_grad():
if timedisc:
for triple, fact in zip(X, facts):
X_i = np.ones([self.kg.n_entity, 4])
Xe_i = np.ones([self.kg.n_entity, 4])
for i in range(0, self.kg.n_entity):
X_i[i, 0] = i
X_i[i, 1] = triple[1]
X_i[i, 2] = triple[2] if triple[3]>=0 else triple[2]+self.kg.n_relation
X_i[i, 3] = triple[3] if triple[3]>=0 else triple[4]
Xe_i[i, 0] = i
Xe_i[i, 1] = triple[1]
Xe_i[i, 2] = triple[2]+self.kg.n_relation if triple[4]>=0 else triple[2]
Xe_i[i, 3] = triple[4] if triple[4]>=0 else triple[3]
i_score = self.forward(X_i)+self.forward(Xe_i)
if rev_set>0:
X_rev = np.ones([self.kg.n_entity,4])
Xe_rev = np.ones([self.kg.n_entity,4])
for i in range(0, self.kg.n_entity):
X_rev[i, 0] = triple[1]
X_rev[i, 1] = i
X_rev[i, 2] = triple[2]+self.kg.n_relation//2 if triple[3]>=0 else triple[2]+self.kg.n_relation+self.kg.n_relation//2
X_rev[i, 3] = triple[3] if triple[3]>=0 else triple[4]
Xe_rev[i, 0] = triple[1]
Xe_rev[i, 1] = i
Xe_rev[i, 2] = triple[2]+self.kg.n_relation//2+self.kg.n_relation if triple[4]>=0 else triple[2]+self.kg.n_relation//2
Xe_rev[i, 3] = triple[4] if triple[4]>=0 else triple[3]
i_score = i_score + self.forward(X_rev).view(-1)+self.forward(Xe_rev).view(-1)
if self.gpu:
i_score = i_score.cuda()
filter_out = kg.to_skip_final['lhs'][(fact[1], fact[2],fact[3], fact[4])]
target = i_score[int(triple[0])].clone()
i_score[filter_out]=1e6
rank_triple=torch.sum((i_score < target).float()).cpu().item()+1
rank.append(rank_triple)
else:
for triple, fact in zip(X, facts):
X_i = np.ones([self.kg.n_entity, 4])
for i in range(0, self.kg.n_entity):
X_i[i, 0] = i
X_i[i, 1] = triple[1]
X_i[i, 2] = triple[2]
X_i[i, 3] = triple[3]
i_score = self.forward(X_i)
if rev_set>0:
X_rev = np.ones([self.kg.n_entity,4])
for i in range(0, self.kg.n_entity):
X_rev[i, 0] = triple[1]
X_rev[i, 1] = i
X_rev[i, 2] = triple[2]+self.kg.n_relation//2
X_rev[i, 3] = triple[3]
i_score = i_score + self.forward(X_rev).view(-1)
if self.gpu:
i_score = i_score.cuda()
filter_out = kg.to_skip_final['lhs'][(fact[1], fact[2],fact[3], fact[4])]
target = i_score[int(triple[0])].clone()
i_score[filter_out]=1e6
rank_triple=torch.sum((i_score < target).float()).cpu().item()+1
rank.append(rank_triple)
return rank
def rank_right(self, X, facts, kg, timedisc, rev_set=0):
rank = []
with torch.no_grad():
if timedisc:
for triple, fact in zip(X, facts):
X_i = np.ones([self.kg.n_entity, 4])
Xe_i = np.ones([self.kg.n_entity, 4])
for i in range(0, self.kg.n_entity):
X_i[i, 0] = triple[0]
X_i[i, 1] = i
X_i[i, 2] = triple[2] if triple[3]>=0 else triple[2]+self.kg.n_relation
X_i[i, 3] = triple[3] if triple[3]>=0 else triple[4]
Xe_i[i, 0] = triple[0]
Xe_i[i, 1] = i
Xe_i[i, 2] = triple[2]+self.kg.n_relation if triple[4]>=0 else triple[2]
Xe_i[i, 3] = triple[4] if triple[4]>=0 else triple[3]
i_score = self.forward(X_i)+self.forward(Xe_i)
if rev_set>0:
X_rev = np.ones([self.kg.n_entity,4])
Xe_rev = np.ones([self.kg.n_entity,4])
for i in range(0, self.kg.n_entity):
X_rev[i, 0] = i
X_rev[i, 1] = triple[0]
X_rev[i, 2] = triple[2]+self.kg.n_relation//2 if triple[3]>=0 else triple[2]+self.kg.n_relation+self.kg.n_relation//2
X_rev[i, 3] = triple[3] if triple[3]>=0 else triple[4]
Xe_rev[i, 0] = i
Xe_rev[i, 1] = triple[0]
Xe_rev[i, 2] = triple[2]+self.kg.n_relation//2+self.kg.n_relation if triple[4]>=0 else triple[2]+self.kg.n_relation//2
Xe_rev[i, 3] = triple[4] if triple[4]>=0 else triple[3]
i_score = i_score + self.forward(X_rev).view(-1)+ self.forward(Xe_rev).view(-1)
if self.gpu:
i_score = i_score.cuda()
filter_out = kg.to_skip_final['rhs'][(fact[0], fact[2],fact[3], fact[4])]
target = i_score[int(triple[1])].clone()
i_score[filter_out]=1e6
rank_triple=torch.sum((i_score < target).float()).cpu().item()+1
rank.append(rank_triple)
else:
for triple, fact in zip(X, facts):
X_i = np.ones([self.kg.n_entity, 4])
for i in range(0, self.kg.n_entity):
X_i[i, 0] = triple[0]
X_i[i, 1] = i
X_i[i, 2] = triple[2]
X_i[i, 3] = triple[3]
i_score = self.forward(X_i)
if rev_set>0:
X_rev = np.ones([self.kg.n_entity,4])
for i in range(0, self.kg.n_entity):
X_rev[i, 0] = i
X_rev[i, 1] = triple[0]
X_rev[i, 2] = triple[2]+self.kg.n_relation//2
X_rev[i, 3] = triple[3]
i_score = i_score + self.forward(X_rev).view(-1)
if self.gpu:
i_score = i_score.cuda()
filter_out = kg.to_skip_final['rhs'][(fact[0], fact[2],fact[3], fact[4])]
target = i_score[int(triple[1])].clone()
i_score[filter_out]=1e6
rank_triple=torch.sum((i_score < target).float()).cpu().item()+1
rank.append(rank_triple)
return rank
def timepred(self, X):
rank = []
with torch.no_grad():
for triple in X:
X_i = np.ones([self.kg.n_day, len(triple)])
for i in range(self.kg.n_day):
X_i[i, 0] = triple[0]
X_i[i, 1] = triple[1]
X_i[i, 2] = triple[2]
X_i[i, 3:] = self.kg.time_dict[i]
i_score = self.forward(X_i)
if self.gpu:
i_score = i_score.cuda()
target = i_score[triple[3]]
rank_triple=torch.sum((i_score < target).float()).cpu().item()+1
rank.append(rank_triple)
return rank
class ATISE(nn.Module):
def __init__(self, kg, embedding_dim, batch_size, learning_rate, gamma, cmin, cmax, gpu=True):
super(ATISE, self).__init__()
self.gpu = gpu
self.kg = kg
self.embedding_dim = embedding_dim
self.batch_size = batch_size
self.learning_rate = learning_rate
self.gamma = gamma
self.cmin = cmin
self.cmax = cmax
# Nets
self.emb_E = torch.nn.Embedding(self.kg.n_entity, self.embedding_dim, padding_idx=0)
self.emb_E_var = torch.nn.Embedding(self.kg.n_entity, self.embedding_dim, padding_idx=0)
self.emb_R = torch.nn.Embedding(self.kg.n_relation, self.embedding_dim, padding_idx=0)
self.emb_R_var = torch.nn.Embedding(self.kg.n_relation, self.embedding_dim, padding_idx=0)
self.emb_TE = torch.nn.Embedding(self.kg.n_entity, self.embedding_dim, padding_idx=0)
self.alpha_E = torch.nn.Embedding(self.kg.n_entity, 1, padding_idx=0)
self.beta_E = torch.nn.Embedding(self.kg.n_entity, self.embedding_dim, padding_idx=0)
self.omega_E = torch.nn.Embedding(self.kg.n_entity, self.embedding_dim, padding_idx=0)
self.emb_TR = torch.nn.Embedding(self.kg.n_relation, self.embedding_dim, padding_idx=0)
self.alpha_R = torch.nn.Embedding(self.kg.n_relation, 1, padding_idx=0)
self.beta_R = torch.nn.Embedding(self.kg.n_relation, self.embedding_dim, padding_idx=0)
self.omega_R = torch.nn.Embedding(self.kg.n_relation, self.embedding_dim, padding_idx=0)
# Initialization
r = 6 / np.sqrt(self.embedding_dim)
self.emb_E.weight.data.uniform_(-r, r)
self.emb_E_var.weight.data.uniform_(self.cmin, self.cmax)
self.emb_R.weight.data.uniform_(-r, r)
self.emb_R_var.weight.data.uniform_(self.cmin, self.cmax)
self.emb_TE.weight.data.uniform_(-r, r)
self.alpha_E.weight.data.uniform_(0, 0)
self.beta_E.weight.data.uniform_(0, 0)
self.omega_E.weight.data.uniform_(-r, r)
self.emb_TR.weight.data.uniform_(-r, r)
self.alpha_R.weight.data.uniform_(0, 0)
self.beta_R.weight.data.uniform_(0, 0)
self.omega_R.weight.data.uniform_(-r, r)
# Regularization
self.normalize_embeddings()
if self.gpu:
self.cuda()
def forward(self, X):
h_i, t_i, r_i, d_i = X[:, 0].astype(np.int64), X[:, 1].astype(np.int64), X[:, 2].astype(np.int64), X[:, 3].astype(np.float32)
if self.gpu:
h_i = Variable(torch.from_numpy(h_i).cuda())
t_i = Variable(torch.from_numpy(t_i).cuda())
r_i = Variable(torch.from_numpy(r_i).cuda())
d_i = Variable(torch.from_numpy(d_i).cuda())
else:
h_i = Variable(torch.from_numpy(h_i))
t_i = Variable(torch.from_numpy(t_i))
r_i = Variable(torch.from_numpy(r_i))
d_i = Variable(torch.from_numpy(d_i))
pi = 3.14159265358979323846
h_mean = self.emb_E(h_i).view(-1, self.embedding_dim) + \
d_i.view(-1, 1) * self.alpha_E(h_i).view(-1, 1) * self.emb_TE(h_i).view(-1, self.embedding_dim) \
+ self.beta_E(h_i).view(-1, self.embedding_dim) * torch.sin(
2 * pi * self.omega_E(h_i).view(-1, self.embedding_dim) * d_i.view(-1, 1))
t_mean = self.emb_E(t_i).view(-1, self.embedding_dim) + \
d_i.view(-1, 1) * self.alpha_E(t_i).view(-1, 1) * self.emb_TE(t_i).view(-1, self.embedding_dim) \
+ self.beta_E(t_i).view(-1, self.embedding_dim) * torch.sin(
2 * pi * self.omega_E(t_i).view(-1, self.embedding_dim) * d_i.view(-1, 1))
r_mean = self.emb_R(r_i).view(-1, self.embedding_dim) + \
d_i.view(-1, 1) * self.alpha_R(r_i).view(-1, 1) * self.emb_TR(r_i).view(-1, self.embedding_dim) \
+ self.beta_R(r_i).view(-1, self.embedding_dim) * torch.sin(
2 * pi * self.omega_R(r_i).view(-1, self.embedding_dim) * d_i.view(-1, 1))
h_var = self.emb_E_var(h_i).view(-1, self.embedding_dim)
t_var = self.emb_E_var(t_i).view(-1, self.embedding_dim)
r_var = self.emb_R_var(r_i).view(-1, self.embedding_dim)
out1 = torch.sum((h_var+t_var)/r_var, 1)+torch.sum(((r_mean-h_mean+t_mean)**2)/r_var, 1)-self.embedding_dim
out2 = torch.sum(r_var/(h_var+t_var), 1)+torch.sum(((h_mean-t_mean-r_mean)**2)/(h_var+t_var), 1)-self.embedding_dim
out = (out1+out2)/4
return out
def log_rank_loss(self, y_pos, y_neg, temp=0):
M = y_pos.size(0)
N = y_neg.size(0)
y_pos = self.gamma-y_pos
y_neg = self.gamma-y_neg
C = int(N / M)
y_neg = y_neg.view(C, -1).transpose(0, 1)
p = F.softmax(temp * y_neg)
loss_pos = torch.sum(F.softplus(-1 * y_pos))
loss_neg = torch.sum(p * F.softplus(y_neg))
loss = (loss_pos + loss_neg) / 2 / M
if self.gpu:
loss = loss.cuda()
return loss
def rank_loss(self, y_pos, y_neg):
M = y_pos.size(0)
N = y_neg.size(0)
C = int(N / M)
y_pos = y_pos.repeat(C)
if self.gpu:
target = Variable(torch.from_numpy(-np.ones(N, dtype=np.float32))).cuda()
else:
target = Variable(torch.from_numpy(-np.ones(N, dtype=np.float32))).cpu()
loss = nn.MarginRankingLoss(margin=self.gamma)
loss = loss(y_pos, y_neg, target)
return loss
def normalize_embeddings(self):
self.emb_E.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_E_var.weight.data.uniform_(self.cmin, self.cmax)
self.emb_R.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_R_var.weight.data.uniform_(self.cmin, self.cmax)
self.emb_TE.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_TR.weight.data.renorm_(p=2, dim=0, maxnorm=1)
def regularization_embeddings(self):
lower = torch.tensor(self.cmin).float()
upper = torch.tensor(self.cmax).float()
if self.gpu:
lower = lower.cuda()
upper = upper.cuda()
self.emb_E_var.weight.data=torch.where(self.emb_E_var.weight.data<self.cmin,lower,self.emb_E_var.weight.data)
self.emb_E_var.weight.data=torch.where(self.emb_E_var.weight.data>self.cmax,upper,self.emb_E_var.weight.data)
self.emb_R_var.weight.data=torch.where(self.emb_R_var.weight.data < self.cmin,lower, self.emb_R_var.weight.data)
self.emb_R_var.weight.data=torch.where(self.emb_R_var.weight.data > self.cmax,upper, self.emb_R_var.weight.data)
self.emb_E.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_R.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_TE.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_TR.weight.data.renorm_(p=2, dim=0, maxnorm=1)
def rank_left(self, X, facts, kg, timedisc, rev_set=0):
rank = []
with torch.no_grad():
if timedisc:
for triple, fact in zip(X, facts):
X_i = np.ones([self.kg.n_entity, 4])
i_score = torch.zeros(self.kg.n_entity)
if self.gpu:
i_score = i_score.cuda()
for time_index in [triple[3],triple[4]]:
for i in range(0, self.kg.n_entity):
X_i[i, 0] = i
X_i[i, 1] = triple[1]
X_i[i, 2] = triple[2]
X_i[i, 3] = time_index
i_score = i_score + self.forward(X_i).view(-1)
if rev_set>0:
X_rev = np.ones([self.kg.n_entity,4])
for i in range(0, self.kg.n_entity):
X_rev[i, 0] = triple[1]
X_rev[i, 1] = i
X_rev[i, 2] = triple[2]+self.kg.n_relation//2
X_rev[i, 3] = time_index
i_score = i_score + self.forward(X_rev).view(-1)
filter_out = kg.to_skip_final['lhs'][(fact[1], fact[2],fact[3], fact[4])]
target = i_score[int(triple[0])].clone()
i_score[filter_out]=1e6
rank_triple=torch.sum((i_score < target).float()).cpu().item()+1
rank.append(rank_triple)
else:
for triple, fact in zip(X, facts):
X_i = np.ones([self.kg.n_entity, 4])
for i in range(0, self.kg.n_entity):
X_i[i, 0] = i
X_i[i, 1] = triple[1]
X_i[i, 2] = triple[2]
X_i[i, 3] = triple[3]
i_score = self.forward(X_i)
if rev_set>0:
X_rev = np.ones([self.kg.n_entity,4])
for i in range(0, self.kg.n_entity):
X_rev[i, 0] = triple[1]
X_rev[i, 1] = i
X_rev[i, 2] = triple[2]+self.kg.n_relation//2
X_rev[i, 3] = triple[3]
i_score = i_score + self.forward(X_rev).view(-1)
if self.gpu:
i_score = i_score.cuda()
filter_out = kg.to_skip_final['lhs'][(fact[1], fact[2],fact[3], fact[4])]
target = i_score[int(triple[0])].clone()
i_score[filter_out]=1e6
rank_triple=torch.sum((i_score < target).float()).cpu().item()+1
rank.append(rank_triple)
return rank
def rank_right(self, X, facts, kg,timedisc, rev_set=0):
rank = []
with torch.no_grad():
if timedisc:
for triple, fact in zip(X, facts):
X_i = np.ones([self.kg.n_entity, 4])
i_score = torch.zeros(self.kg.n_entity)
if self.gpu:
i_score = i_score.cuda()
for time_index in [triple[3],triple[4]]:
for i in range(0, self.kg.n_entity):
X_i[i, 0] = triple[0]
X_i[i, 1] = i
X_i[i, 2] = triple[2]
X_i[i, 3] = time_index
i_score = i_score + self.forward(X_i).view(-1)
if rev_set>0:
X_rev = np.ones([self.kg.n_entity,4])
for i in range(0, self.kg.n_entity):
X_rev[i, 0] = i
X_rev[i, 1] = triple[0]
X_rev[i, 2] = triple[2]+self.kg.n_relation//2
X_rev[i, 3] = time_index
i_score = i_score + self.forward(X_rev).view(-1)
filter_out = kg.to_skip_final['rhs'][(fact[0], fact[2],fact[3], fact[4])]
target = i_score[int(triple[1])].clone()
i_score[filter_out]=1e6
rank_triple=torch.sum((i_score < target).float()).cpu().item()+1
rank.append(rank_triple)
else:
for triple, fact in zip(X, facts):
X_i = np.ones([self.kg.n_entity, 4])
for i in range(0, self.kg.n_entity):
X_i[i, 0] = triple[0]
X_i[i, 1] = i
X_i[i, 2] = triple[2]
X_i[i, 3] = triple[3]
i_score = self.forward(X_i)
if rev_set>0:
X_rev = np.ones([self.kg.n_entity,4])
for i in range(0, self.kg.n_entity):
X_rev[i, 0] = i
X_rev[i, 1] = triple[0]
X_rev[i, 2] = triple[2]+self.kg.n_relation//2
X_rev[i, 3] = triple[3]
i_score = i_score + self.forward(X_rev).view(-1)
if self.gpu:
i_score = i_score.cuda()
filter_out = kg.to_skip_final['rhs'][(fact[0], fact[2],fact[3], fact[4])]
target = i_score[int(triple[1])].clone()
i_score[filter_out]=1e6
rank_triple=torch.sum((i_score < target).float()).cpu().item()+1
rank.append(rank_triple)
return rank
def timepred(self, X):
rank = []
with torch.no_grad():
for triple in X:
X_i = np.ones([self.kg.n_day, len(triple)])
for i in range(self.kg.n_day):
X_i[i, 0] = triple[0]
X_i[i, 1] = triple[1]
X_i[i, 2] = triple[2]
X_i[i, 3:] = self.kg.time_dict[i]
i_score = self.forward(X_i)
if self.gpu:
i_score = i_score.cuda()
target = i_score[triple[3]]
rank_triple=torch.sum((i_score < target).float()).cpu().item()+1
rank.append(rank_triple)
return rank
| 26,618 | 44.580479 | 146 | py |
ATISE | ATISE-master/Train.py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 16:11:52 2019
@author: 86187
"""
import model as KGE
from Dataset import KnowledgeGraph
from Dataset_YG import KnowledgeGraphYG
import torch
import numpy as np
from time import time
from sklearn.utils import shuffle as skshuffle
import os
def mean_rank(rank):
m_r = 0
N = len(rank)
for i in rank:
m_r = m_r + i / N
return m_r
def mrr(rank):
mrr = 0
N = len(rank)
for i in rank:
mrr = mrr + 1 / i / N
return mrr
def hit_N(rank, N):
hit = 0
for i in rank:
if i <= N:
hit = hit + 1
hit = hit / len(rank)
return hit
def get_minibatches(X, mb_size, shuffle=True):
"""
Generate minibatches from given dataset for training.
Params:
-------
X: np.array of M x 3
Contains the triplets from dataset. The entities and relations are
translated to its unique indices.
mb_size: int
Size of each minibatch.
shuffle: bool, default True
Whether to shuffle the dataset before dividing it into minibatches.
Returns:
--------
mb_iter: generator
Example usage:
--------------
mb_iter = get_minibatches(X_train, mb_size)
for X_mb in mb_iter:
// do something with X_mb, the minibatch
"""
X_shuff = X.copy()
if shuffle:
X_shuff = skshuffle(X_shuff)
for i in range(0, X_shuff.shape[0], mb_size):
yield X_shuff[i:i + mb_size]
def sample_negatives(X, C, kg):
"""
Perform negative sampling by corrupting head or tail of each triplets in
dataset.
Params:
-------
X: int matrix of M x 3, where M is the (mini)batch size
First column contains index of head entities.
Second column contains index of relationships.
Third column contains index of tail entities.
n_e: int
Number of entities in dataset.
Returns:
--------
X_corr: int matrix of M x 3, where M is the (mini)batch size
Similar to input param X, but at each column, either first or third col
is subtituted with random entity.
"""
M = X.shape[0]
X_corr = X
for i in range(C-1):
X_corr = np.concatenate((X_corr,X),0)
X_corr[:int(M*C/2),0]=torch.randint(kg.n_entity,[int(M*C/2)])
X_corr[int(M*C/2):,1]=torch.randint(kg.n_entity,[int(M*C/2)])
return X_corr
def sample_negatives_t(X, C, n_day):
"""
Perform negative sampling by corrupting head or tail of each triplets in
dataset.
Params:
-------
X: int matrix of M x 4, where M is the (mini)batch size
First column contains index of head entities.
Second column contains index of relationships.
Third column contains index of tail entities.
n_e: int
Number of entities in dataset.
Returns:
--------
X_corr: int matrix of M x 4, where M is the (mini)batch size
Similar to input param X, but at each column, either first or third col
is subtituted with random entity.
"""
M = X.shape[0]
X_corr = X
for i in range(C-1):
X_corr = torch.cat((X_corr,X),0)
X_corr[:,3]=torch.randint(n_day,[int(M*C)])
return X_corr
def train(task ='LinkPrediction',
modelname='ATISE',
data_dir='yago',
dim=500,
batch=512,
lr=0.1,
max_epoch=5000,
min_epoch=250,
gamma=1,
L = 'L1',
negsample_num=10,
timedisc = 0,
lossname = 'logloss',
cmin = 0.001,
cuda_able = True,
rev_set = 1,
temp = 0.5,
gran = 7,
count = 300
):
randseed = 9999
np.random.seed(randseed)
torch.manual_seed(randseed)
"""
Data Loading
"""
if data_dir == 'yago' or data_dir == 'wikidata':
kg = KnowledgeGraphYG(data_dir=data_dir, count = count,rev_set = rev_set)
n_day = kg.n_time
min_epoch=50
elif data_dir=='icews14':
n_day = 365
kg = KnowledgeGraph(data_dir=data_dir,gran=gran,rev_set = rev_set)
elif data_dir == 'icews05-15':
n_day = 4017
kg = KnowledgeGraph(data_dir=data_dir,gran=gran,rev_set = rev_set)
"""
Create a model
"""
if modelname== 'TERO':
model = KGE.TeRo(kg, embedding_dim=dim, batch_size=batch, learning_rate=lr, gamma=gamma, L=L, gran=gran, n_day=kg.n_time,gpu=cuda_able)
if modelname=='ATISE':
model = KGE.ATISE(kg, embedding_dim=dim, batch_size=batch, learning_rate=lr, gamma=gamma, cmin=cmin, cmax=100*cmin, gpu=cuda_able)
if modelname == 'ATISE':
solver = torch.optim.Adam(model.parameters(), model.learning_rate)
optimizer = 'Adam'
else:
solver = torch.optim.Adagrad(model.parameters(), model.learning_rate)
optimizer = 'Adagrad'
if timedisc == 0 or timedisc ==2:
train_pos = np.array(kg.training_triples)
validation_pos = np.array(kg.validation_triples)
test_pos = np.array(kg.test_triples)
elif timedisc == 1:
train_pos = []
validation_pos = []
test_pos = []
for fact in kg.training_triples:
for time_index in range(fact[3],fact[4]+1):
train_pos.append([fact[0], fact[1], fact[2], time_index])
train_pos = np.array(train_pos)
# for fact in kg.validation_triples:
# for time_index in range(fact[3],fact[4]+1):
# validation_pos.append([fact[0], fact[1], fact[2], time_index])
validation_pos = np.array(kg.validation_triples)
# for fact in kg.test_triples:
# for time_index in range(fact[3],fact[4]+1):
# test_pos.append([fact[0], fact[1], fact[2], time_index])
# test_pos = np.array(test_pos)
test_pos = np.array(kg.test_triples)
losses = []
mrr_std = 0
C = negsample_num
patience = 0
path = os.path.join(data_dir,modelname,'timediscrete{:.0f}/dim{:.0f}/lr{:.4f}/neg_num{:.0f}/{:.0f}day/gamma{:.0f}/cmin{:.4f}'
.format(timedisc,dim,lr,negsample_num,gran,gamma,cmin))
if timedisc: path = os.path.join(path,'{:.0f}count'.format(count))
try:
os.makedirs(path)
except:
print('path existed')
return
"""
Training Process
"""
for epoch in range(max_epoch):
print('Epoch-{}'.format(epoch + 1))
print('————————————————')
it = 0
train_triple = list(get_minibatches(train_pos, batch, shuffle=True))
for iter_triple in train_triple:
if iter_triple.shape[0] < batch:
break
start = time()
if task=='TimePrediction':
iter_neg = sample_negatives_t(iter_triple, C, n_day)
else:
iter_neg = sample_negatives(iter_triple, C, kg)
if timedisc == 2:
end_miss = np.where(iter_triple[:,4:5]<0)[0]
start_miss = np.where(iter_triple[:,3:4]<0)[0]
neg_end_miss = np.where(iter_neg[:,4:5]<0)[0]
neg_start_miss = np.where(iter_neg[:,3:4]<0)[0]
iter_triple_e = np.delete(iter_triple,3,1)
iter_triple = np.delete(iter_triple,4,1)
iter_triple_e[:,2:3] += kg.n_relation
iter_triple_e[end_miss,:]=iter_triple[end_miss,:]
iter_triple[start_miss,:]=iter_triple_e[start_miss,:]
iter_neg_e = np.delete(iter_neg,3,1)
iter_neg = np.delete(iter_neg,4,1)
iter_neg_e[:,2:3] += kg.n_relation
iter_neg_e[neg_end_miss,:]=iter_neg[neg_end_miss,:]
iter_neg[neg_start_miss,:]=iter_neg_e[neg_start_miss,:]
pos_score = model.forward(iter_triple)
neg_score = model.forward(iter_neg)
if timedisc ==2:
pos_score += model.forward(iter_triple_e)
neg_score += model.forward(iter_neg_e)
if lossname == 'logloss':
loss = model.log_rank_loss(pos_score, neg_score,temp=temp)
else:
loss = model.rank_loss(pos_score, neg_score)
losses.append(loss.item())
solver.zero_grad()
loss.backward()
solver.step()
if lossname == 'marginloss':
model.normalize_embeddings()
if modelname == 'ATISE':
model.regularization_embeddings()
end = time()
if it % 33 == 0:
print('Iter-{}; loss: {:.4f};time per batch:{:.4f}s'.format(it, loss.item(), end - start))
it += 1
"""
Evaluation for Link Prediction
"""
if ((epoch+1)//min_epoch>epoch//min_epoch and epoch < max_epoch) :
if task == 'LinkPrediction':
rank = model.rank_left(validation_pos,kg.validation_facts,kg,timedisc,rev_set=rev_set)
rank_right = model.rank_right(validation_pos,kg.validation_facts,kg,timedisc,rev_set=rev_set)
rank = rank + rank_right
else:
rank = model.timepred(validation_pos)
m_rank = mean_rank(rank)
mean_rr = mrr(rank)
hit_1 = hit_N(rank, 1)
hit_3 = hit_N(rank, 3)
hit_5 = hit_N(rank, 5)
hit_10 = hit_N(rank, 10)
print('validation results:')
print('Mean Rank: {:.0f}'.format(m_rank))
print('Mean RR: {:.4f}'.format(mean_rr))
print('Hit@1: {:.4f}'.format(hit_1))
print('Hit@3: {:.4f}'.format(hit_3))
print('Hit@5: {:.4f}'.format(hit_5))
print('Hit@10: {:.4f}'.format(hit_10))
f = open(os.path.join(path, 'result{:.0f}.txt'.format(epoch)), 'w')
f.write('Mean Rank: {:.0f}\n'.format(m_rank))
f.write('Mean RR: {:.4f}\n'.format(mean_rr))
f.write('Hit@1: {:.4f}\n'.format(hit_1))
f.write('Hit@3: {:.4f}\n'.format(hit_3))
f.write('Hit@5: {:.4f}\n'.format(hit_5))
f.write('Hit@10: {:.4f}\n'.format(hit_10))
for loss in losses:
f.write(str(loss))
f.write('\n')
f.close()
if mean_rr < mrr_std and patience<3:
patience+=1
elif (mean_rr < mrr_std and patience>=3) or epoch==max_epoch-1:
if epoch == max_epoch-1:
torch.save(model.state_dict(), os.path.join(path, 'params.pkl'))
model.load_state_dict(torch.load(os.path.join(path,'params.pkl')))
if task == 'LinkPrediction':
rank = model.rank_left(test_pos,kg.test_facts,kg,timedisc,rev_set=rev_set)
rank_right = model.rank_right(test_pos,kg.test_facts,kg,timedisc,rev_set=rev_set)
rank = rank + rank_right
else:
rank = model.timepred(test_pos)
m_rank = mean_rank(rank)
mean_rr = mrr(rank)
hit_1 = hit_N(rank, 1)
hit_3 = hit_N(rank, 3)
hit_5 = hit_N(rank, 5)
hit_10 = hit_N(rank, 10)
print('test result:')
print('Mean Rank: {:.0f}'.format(m_rank))
print('Mean RR: {:.4f}'.format(mean_rr))
print('Hit@1: {:.4f}'.format(hit_1))
print('Hit@3: {:.4f}'.format(hit_3))
print('Hit@5: {:.4f}'.format(hit_5))
print('Hit@10: {:.4f}'.format(hit_10))
if epoch == max_epoch-1:
f = open(os.path.join(path, 'test_result{:.0f}.txt'.format(epoch)), 'w')
else:
f = open(os.path.join(path, 'test_result{:.0f}.txt'.format(epoch)), 'w')
f.write('Mean Rank: {:.0f}\n'.format(m_rank))
f.write('Mean RR: {:.4f}\n'.format(mean_rr))
f.write('Hit@1: {:.4f}\n'.format(hit_1))
f.write('Hit@3: {:.4f}\n'.format(hit_3))
f.write('Hit@5: {:.4f}\n'.format(hit_5))
f.write('Hit@10: {:.4f}\n'.format(hit_10))
for loss in losses:
f.write(str(loss))
f.write('\n')
f.close()
break
if mean_rr>=mrr_std:
torch.save(model.state_dict(), os.path.join(path, 'params.pkl'))
mrr_std = mean_rr
patience = 0
| 12,810 | 31.35101 | 143 | py |
pyslam | pyslam-master/feature_superpoint.py | """
* This file is part of PYSLAM
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import cv2
import torch
import config
config.cfg.set_lib('superpoint')
from demo_superpoint import SuperPointFrontend
from threading import RLock
from utils_sys import Printer, is_opencv_version_greater_equal
kVerbose = True
class SuperPointOptions:
def __init__(self, do_cuda=True):
# default options from demo_superpoints
self.weights_path=config.cfg.root_folder + '/thirdparty/superpoint/superpoint_v1.pth'
self.nms_dist=4
self.conf_thresh=0.015
self.nn_thresh=0.7
use_cuda = torch.cuda.is_available() & do_cuda
device = torch.device('cuda' if use_cuda else 'cpu')
print('SuperPoint using ', device)
self.cuda=use_cuda
# convert matrix of pts into list of keypoints
# N.B.: pts are - 3xN numpy array with corners [x_i, y_i, confidence_i]^T.
def convert_superpts_to_keypoints(pts, size=1):
kps = []
if pts is not None:
# convert matrix [Nx2] of pts into list of keypoints
if is_opencv_version_greater_equal(4,5,3):
kps = [ cv2.KeyPoint(p[0], p[1], size=size, response=p[2]) for p in pts ]
else:
kps = [ cv2.KeyPoint(p[0], p[1], _size=size, _response=p[2]) for p in pts ]
return kps
def transpose_des(des):
if des is not None:
return des.T
else:
return None
# interface for pySLAM
class SuperPointFeature2D:
def __init__(self, do_cuda=True):
self.lock = RLock()
self.opts = SuperPointOptions(do_cuda)
print(self.opts)
print('SuperPointFeature2D')
print('==> Loading pre-trained network.')
# This class runs the SuperPoint network and processes its outputs.
self.fe = SuperPointFrontend(weights_path=self.opts.weights_path,
nms_dist=self.opts.nms_dist,
conf_thresh=self.opts.conf_thresh,
nn_thresh=self.opts.nn_thresh,
cuda=self.opts.cuda)
print('==> Successfully loaded pre-trained network.')
self.pts = []
self.kps = []
self.des = []
self.heatmap = []
self.frame = None
self.frameFloat = None
self.keypoint_size = 20 # just a representative size for visualization and in order to convert extracted points to cv2.KeyPoint
# compute both keypoints and descriptors
def detectAndCompute(self, frame, mask=None): # mask is a fake input
with self.lock:
self.frame = frame
self.frameFloat = (frame.astype('float32') / 255.)
self.pts, self.des, self.heatmap = self.fe.run(self.frameFloat)
# N.B.: pts are - 3xN numpy array with corners [x_i, y_i, confidence_i]^T.
#print('pts: ', self.pts.T)
self.kps = convert_superpts_to_keypoints(self.pts.T, size=self.keypoint_size)
if kVerbose:
print('detector: SUPERPOINT, #features: ', len(self.kps), ', frame res: ', frame.shape[0:2])
return self.kps, transpose_des(self.des)
# return keypoints if available otherwise call detectAndCompute()
def detect(self, frame, mask=None): # mask is a fake input
with self.lock:
#if self.frame is not frame:
self.detectAndCompute(frame)
return self.kps
# return descriptors if available otherwise call detectAndCompute()
def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input
with self.lock:
if self.frame is not frame:
Printer.orange('WARNING: SUPERPOINT is recomputing both kps and des on last input frame', frame.shape)
self.detectAndCompute(frame)
return self.kps, transpose_des(self.des)
| 4,782 | 37.572581 | 137 | py |
pyslam | pyslam-master/feature_d2net.py | """
* This file is part of PYSLAM
* Adapted from https://github.com/mihaidusmanu/d2-net/blob/master/extract_features.py, see the license therein.
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
# adapted from https://github.com/mihaidusmanu/d2-net/blob/master/extract_features.py
import config
config.cfg.set_lib('d2net')
import os
import argparse
import cv2
import numpy as np
import imageio
from threading import RLock
import torch
from tqdm import tqdm
import scipy
import scipy.io
import scipy.misc
from utils_sys import Printer
from lib.model_test import D2Net
from lib.utils import preprocess_image
from lib.pyramid import process_multiscale
from utils_sys import Printer, is_opencv_version_greater_equal
kVerbose = True
# convert matrix of pts into list of keypoints
def convert_pts_to_keypoints(pts, scores, size=1):
assert(len(pts)==len(scores))
kps = []
if pts is not None:
# convert matrix [Nx2] of pts into list of keypoints
if is_opencv_version_greater_equal(4,5,3):
kps = [ cv2.KeyPoint(p[0], p[1], size=size, response=scores[i]) for i,p in enumerate(pts) ]
else:
kps = [ cv2.KeyPoint(p[0], p[1], _size=size, _response=scores[i]) for i,p in enumerate(pts) ]
return kps
# interface for pySLAM
# from https://github.com/mihaidusmanu/d2-net
# N.B.: The singlescale features require less than 6GB of VRAM for 1200x1600 images.
# The multiscale flag can be used to extract multiscale features - for this, we recommend at least 12GB of VRAM.
class D2NetFeature2D:
def __init__(self,
use_relu=True, # remove ReLU after the dense feature extraction module
multiscale=False, # extract multiscale features (read the note above)
max_edge=1600, # maximum image size at network input
max_sum_edges=2800, # maximum sum of image sizes at network input
preprocessing='torch', # image preprocessing (caffe or torch)
do_cuda=True):
print('Using D2NetFeature2D')
self.lock = RLock()
self.model_base_path = config.cfg.root_folder + '/thirdparty/d2net/'
self.models_path = self.model_base_path + 'models/d2_ots.pth' # best performances obtained with 'd2_ots.pth'
self.use_relu = use_relu
self.multiscale = multiscale
self.max_edge = max_edge
self.max_sum_edges = max_sum_edges
self.preprocessing = preprocessing
self.pts = []
self.kps = []
self.des = []
self.frame = None
self.keypoint_size = 20 # just a representative size for visualization and in order to convert extracted points to cv2.KeyPoint
self.do_cuda = do_cuda & torch.cuda.is_available()
print('cuda:',self.do_cuda)
self.device = torch.device("cuda:0" if self.do_cuda else "cpu")
torch.set_grad_enabled(False)
print('==> Loading pre-trained network.')
# Creating CNN model
self.model = D2Net(
model_file=self.models_path,
use_relu=use_relu,
use_cuda=do_cuda)
if self.do_cuda:
print('Extracting on GPU')
else:
print('Extracting on CPU')
print('==> Successfully loaded pre-trained network.')
def compute_kps_des(self, image):
with self.lock:
print('D2Net image shape:',image.shape)
if len(image.shape) == 2:
image = image[:, :, np.newaxis]
image = np.repeat(image, 3, -1)
# TODO: switch to PIL.Image due to deprecation of scipy.misc.imresize.
resized_image = image
if max(resized_image.shape) > self.max_edge:
resized_image = scipy.misc.imresize(
resized_image,
self.max_edge / max(resized_image.shape)
).astype('float')
if sum(resized_image.shape[: 2]) > self.max_sum_edges:
resized_image = scipy.misc.imresize(
resized_image,
self.max_sum_edges / sum(resized_image.shape[: 2])
).astype('float')
fact_i = image.shape[0] / resized_image.shape[0]
fact_j = image.shape[1] / resized_image.shape[1]
print('scale factors: {}, {}'.format(fact_i,fact_j))
input_image = preprocess_image(
resized_image,
preprocessing=self.preprocessing
)
with torch.no_grad():
if self.multiscale:
self.pts, scores, descriptors = process_multiscale(
torch.tensor(
input_image[np.newaxis, :, :, :].astype(np.float32),
device=self.device
),
self.model
)
else:
self.pts, scores, descriptors = process_multiscale(
torch.tensor(
input_image[np.newaxis, :, :, :].astype(np.float32),
device=self.device
),
self.model,
scales=[1]
)
# Input image coordinates
self.pts[:, 0] *= fact_i
self.pts[:, 1] *= fact_j
# i, j -> u, v
self.pts = self.pts[:, [1, 0, 2]]
#print('pts.shape: ', self.pts.shape)
#print('pts:', self.pts)
self.kps = convert_pts_to_keypoints(self.pts, scores, self.keypoint_size)
self.des = descriptors
return self.kps, self.des
def detectAndCompute(self, frame, mask=None): #mask is a fake input
with self.lock:
self.frame = frame
self.kps, self.des = self.compute_kps_des(frame)
if kVerbose:
print('detector: D2NET, descriptor: D2NET, #features: ', len(self.kps), ', frame res: ', frame.shape[0:2])
return self.kps, self.des
# return keypoints if available otherwise call detectAndCompute()
def detect(self, frame, mask=None): # mask is a fake input
with self.lock:
if self.frame is not frame:
self.detectAndCompute(frame)
return self.kps
# return descriptors if available otherwise call detectAndCompute()
def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input
with self.lock:
if self.frame is not frame:
Printer.orange('WARNING: D2NET is recomputing both kps and des on last input frame', frame.shape)
self.detectAndCompute(frame)
return self.kps, self.des
| 7,800 | 38.005 | 144 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.