entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
TorchMod
import torch class TorchMod(torch.nn.Module): def __init__(self): super(TorchMod, self).__init__() def forward(self, x, y): return torch.fmod(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_fmod_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.fmod(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_fmod_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchModNew(torch.nn.Module): def __init__(self): super(TorchModNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TorchMod
false
10,536
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
TorchDiv
import torch class TorchDiv(torch.nn.Module): def __init__(self): super(TorchDiv, self).__init__() def forward(self, x, y): return torch.div(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchDivNew(torch.nn.Module): def __init__(self): super(TorchDivNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TorchDiv
false
10,537
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
TensorClampMin
import torch class TensorClampMin(torch.nn.Module): def forward(self, x): return x.clamp_min(-0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -0.1 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorClampMinNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TensorClampMin
false
10,538
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
RpowInt
import torch class RpowInt(torch.nn.Module): def __init__(self): super(RpowInt, self).__init__() def forward(self, x): return 2 ** x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.exp2(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RpowIntNew(torch.nn.Module): def __init__(self): super(RpowIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
RpowInt
false
10,539
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
NotEqual
import torch class NotEqual(torch.nn.Module): def __init__(self): super(NotEqual, self).__init__() def forward(self, x, y): return x != y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_ne_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 != tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_ne_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class NotEqualNew(torch.nn.Module): def __init__(self): super(NotEqualNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
NotEqual
false
10,540
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
Sub
import torch class Sub(torch.nn.Module): def __init__(self): super(Sub, self).__init__() def forward(self, x, y): return x - y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SubNew(torch.nn.Module): def __init__(self): super(SubNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
Sub
false
10,541
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
TorchFloorDiv
import torch class TorchFloorDiv(torch.nn.Module): def __init__(self): super(TorchFloorDiv, self).__init__() def forward(self, x, y): return torch.floor_divide(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_floor_divide_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 / tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_floor_divide_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchFloorDivNew(torch.nn.Module): def __init__(self): super(TorchFloorDivNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TorchFloorDiv
false
10,542
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
RAddInt
import torch class RAddInt(torch.nn.Module): def __init__(self): super(RAddInt, self).__init__() def forward(self, x): return 1 + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class RAddIntNew(torch.nn.Module): def __init__(self): super(RAddIntNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
RAddInt
false
10,543
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
TorchClampOptionMax
import torch class TorchClampOptionMax(torch.nn.Module): def forward(self, x): return torch.clamp(x, max=0.1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1 tmp2 = triton_helpers.minimum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TorchClampOptionMaxNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TorchClampOptionMax
false
10,544
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
TorchNotEqual
import torch class TorchNotEqual(torch.nn.Module): def __init__(self): super(TorchNotEqual, self).__init__() def forward(self, x, y): return torch.ne(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_ne_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 != tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_ne_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchNotEqualNew(torch.nn.Module): def __init__(self): super(TorchNotEqualNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TorchNotEqual
false
10,545
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
Pow
import torch class Pow(torch.nn.Module): def __init__(self): super(Pow, self).__init__() def forward(self, x, y): return x ** y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.pow(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class PowNew(torch.nn.Module): def __init__(self): super(PowNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
Pow
false
10,546
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
TorchMul
import torch class TorchMul(torch.nn.Module): def __init__(self): super(TorchMul, self).__init__() def forward(self, x, y): return torch.mul(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchMulNew(torch.nn.Module): def __init__(self): super(TorchMulNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TorchMul
false
10,547
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
TorchPow
import torch class TorchPow(torch.nn.Module): def __init__(self): super(TorchPow, self).__init__() def forward(self, x, y): return torch.pow(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.pow(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchPowNew(torch.nn.Module): def __init__(self): super(TorchPowNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TorchPow
false
10,548
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
RDivFloat
import torch class RDivFloat(torch.nn.Module): def __init__(self): super(RDivFloat, self).__init__() def forward(self, x): return 100.0 / x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 100.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class RDivFloatNew(torch.nn.Module): def __init__(self): super(RDivFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
RDivFloat
false
10,549
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
TorchSub
import torch class TorchSub(torch.nn.Module): def __init__(self): super(TorchSub, self).__init__() def forward(self, x, y): return torch.sub(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchSubNew(torch.nn.Module): def __init__(self): super(TorchSubNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TorchSub
false
10,550
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
TorchAdd
import torch class TorchAdd(torch.nn.Module): def __init__(self): super(TorchAdd, self).__init__() def forward(self, x, y): return torch.add(x, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class TorchAddNew(torch.nn.Module): def __init__(self): super(TorchAddNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NVIDIA-AI-IOT-private/torch2trt
TorchAdd
false
10,551
[ "MIT" ]
0
953d60039e0c81e90eea467c3df2e6e3f7040242
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
WassersteinGeneratorLoss
import torch import torch.nn as nn import torch.autograd import torch.utils.data def reduce(x, reduction=None): """Applies reduction on a torch.Tensor. Args: x (torch.Tensor): The tensor on which reduction is to be applied. reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the above then the Tensor is returning without any change. Returns: As per the above ``reduction`` convention. """ if reduction == 'mean': return torch.mean(x) elif reduction == 'sum': return torch.sum(x) else: return x def wasserstein_generator_loss(fgz, reduction='mean'): return reduce(-1.0 * fgz, reduction) class GeneratorLoss(nn.Module): """Base class for all generator losses. .. note:: All Losses meant to be minimized for optimizing the Generator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(GeneratorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_generator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_generator, device, batch_size, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``generator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value = discriminator(fake)` 3. :math:`loss = loss\\_function(value)` 4. Backpropagate by computing :math:`\\nabla loss` 5. Run a step of the optimizer for generator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_generator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``generator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(generator, discriminator, optimizer_generator, device, batch_size, labels) else: if labels is None and generator.label_type == 'required': raise Exception('GAN model requires labels for training') noise = torch.randn(batch_size, generator.encoding_dims, device =device) optimizer_generator.zero_grad() if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) elif generator.label_type == 'generated': fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake) elif generator.label_type == 'generated': dgz = discriminator(fake, label_gen) else: dgz = discriminator(fake, labels) loss = self.forward(dgz) loss.backward() optimizer_generator.step() return loss.item() class WassersteinGeneratorLoss(GeneratorLoss): """Wasserstein GAN generator loss from `"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper The loss can be described as: .. math:: L(G) = -f(G(z)) where - :math:`G` : Generator - :math:`f` : Critic/Discriminator - :math:`z` : A sample from the noise prior Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def forward(self, fgz): """Computes the loss for the given input. Args: dgz (torch.Tensor) : Output of the Discriminator with generated data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. Returns: scalar if reduction is applied else Tensor with dimensions (N, \\*). """ return wasserstein_generator_loss(fgz, self.reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.autograd import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = -1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mul_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, def reduce(x, reduction=None): """Applies reduction on a torch.Tensor. Args: x (torch.Tensor): The tensor on which reduction is to be applied. reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the above then the Tensor is returning without any change. Returns: As per the above ``reduction`` convention. """ if reduction == 'mean': return torch.mean(x) elif reduction == 'sum': return torch.sum(x) else: return x def wasserstein_generator_loss(fgz, reduction='mean'): return reduce(-1.0 * fgz, reduction) class GeneratorLoss(nn.Module): """Base class for all generator losses. .. note:: All Losses meant to be minimized for optimizing the Generator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(GeneratorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_generator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_generator, device, batch_size, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``generator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value = discriminator(fake)` 3. :math:`loss = loss\\_function(value)` 4. Backpropagate by computing :math:`\\nabla loss` 5. Run a step of the optimizer for generator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_generator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``generator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(generator, discriminator, optimizer_generator, device, batch_size, labels) else: if labels is None and generator.label_type == 'required': raise Exception('GAN model requires labels for training') noise = torch.randn(batch_size, generator.encoding_dims, device =device) optimizer_generator.zero_grad() if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) elif generator.label_type == 'generated': fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake) elif generator.label_type == 'generated': dgz = discriminator(fake, label_gen) else: dgz = discriminator(fake, labels) loss = self.forward(dgz) loss.backward() optimizer_generator.step() return loss.item() class WassersteinGeneratorLossNew(GeneratorLoss): """Wasserstein GAN generator loss from `"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper The loss can be described as: .. math:: L(G) = -f(G(z)) where - :math:`G` : Generator - :math:`f` : Critic/Discriminator - :math:`z` : A sample from the noise prior Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
kayuksel/torchgan
WassersteinGeneratorLoss
false
10,552
[ "MIT" ]
0
739d97cef4c49fb80155de84e609471efafab107
https://github.com/kayuksel/torchgan/tree/739d97cef4c49fb80155de84e609471efafab107
MinimaxDiscriminatorLoss
import torch import torch.nn as nn import torch.autograd import torch.utils.data import torch.nn.functional as F def minimax_discriminator_loss(dx, dgz, label_smoothing=0.0, reduction='mean'): target_ones = torch.ones_like(dgz) * (1.0 - label_smoothing) target_zeros = torch.zeros_like(dx) loss = F.binary_cross_entropy_with_logits(dx, target_ones, reduction= reduction) loss += F.binary_cross_entropy_with_logits(dgz, target_zeros, reduction =reduction) return loss class DiscriminatorLoss(nn.Module): """Base class for all discriminator losses. .. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(DiscriminatorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value_1 = discriminator(fake)` 3. :math:`value_2 = discriminator(real)` 4. :math:`loss = loss\\_function(value_1, value_2)` 5. Backpropagate by computing :math:`\\nabla loss` 6. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if labels is None and (generator.label_type == 'required' or discriminator.label_type == 'required'): raise Exception('GAN model requires labels for training') batch_size = real_inputs.size(0) noise = torch.randn(batch_size, generator.encoding_dims, device =device) if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) optimizer_discriminator.zero_grad() if discriminator.label_type == 'none': dx = discriminator(real_inputs) elif discriminator.label_type == 'required': dx = discriminator(real_inputs, labels) else: dx = discriminator(real_inputs, label_gen) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) else: fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake.detach()) elif generator.label_type == 'generated': dgz = discriminator(fake.detach(), label_gen) else: dgz = discriminator(fake.detach(), labels) loss = self.forward(dx, dgz) loss.backward() optimizer_discriminator.step() return loss.item() class MinimaxDiscriminatorLoss(DiscriminatorLoss): """Minimax game discriminator loss from the original GAN paper `"Generative Adversarial Networks by Goodfellow et. al." <https://arxiv.org/abs/1406.2661>`_ The loss can be described as: .. math:: L(D) = -[log(D(x)) + log(1 - D(G(z)))] where - :math:`G` : Generator - :math:`D` : Discriminator - :math:`x` : A sample from the data distribution - :math:`z` : A sample from the noise prior Args: label_smoothing (float, optional): The factor by which the labels (1 in this case) needs to be smoothened. For example, label_smoothing = 0.2 changes the value of the real labels to 0.8. reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def __init__(self, label_smoothing=0.0, reduction='mean', override_train_ops=None): super(MinimaxDiscriminatorLoss, self).__init__(reduction, override_train_ops) self.label_smoothing = label_smoothing def forward(self, dx, dgz): """Computes the loss for the given input. Args: dx (torch.Tensor) : Output of the Discriminator with real data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. dgz (torch.Tensor) : Output of the Discriminator with generated data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. Returns: scalar if reduction is applied else Tensor with dimensions (N, \\*). """ return minimax_discriminator_loss(dx, dgz, label_smoothing=self. label_smoothing, reduction=self.reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.autograd import torch.utils.data import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp13 = tl.load(in_ptr1 + r0, None) tmp1 = 0.0 tmp2 = tmp1 * tmp0 tmp3 = triton_helpers.minimum(tmp1, tmp0) tmp4 = tl_math.abs(tmp0) tmp5 = -tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = libdevice.log1p(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tmp2 - tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp14 = triton_helpers.minimum(tmp1, tmp13) tmp15 = tl_math.abs(tmp13) tmp16 = -tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = libdevice.log1p(tmp17) tmp19 = tmp14 - tmp18 tmp20 = tmp13 - tmp19 tmp21 = tl.broadcast_to(tmp20, [RBLOCK]) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tmp24 = 256.0 tmp25 = tmp12 / tmp24 tmp26 = tmp23 / tmp24 tmp27 = tmp25 + tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp27, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, def minimax_discriminator_loss(dx, dgz, label_smoothing=0.0, reduction='mean'): target_ones = torch.ones_like(dgz) * (1.0 - label_smoothing) target_zeros = torch.zeros_like(dx) loss = F.binary_cross_entropy_with_logits(dx, target_ones, reduction= reduction) loss += F.binary_cross_entropy_with_logits(dgz, target_zeros, reduction =reduction) return loss class DiscriminatorLoss(nn.Module): """Base class for all discriminator losses. .. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(DiscriminatorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value_1 = discriminator(fake)` 3. :math:`value_2 = discriminator(real)` 4. :math:`loss = loss\\_function(value_1, value_2)` 5. Backpropagate by computing :math:`\\nabla loss` 6. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if labels is None and (generator.label_type == 'required' or discriminator.label_type == 'required'): raise Exception('GAN model requires labels for training') batch_size = real_inputs.size(0) noise = torch.randn(batch_size, generator.encoding_dims, device =device) if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) optimizer_discriminator.zero_grad() if discriminator.label_type == 'none': dx = discriminator(real_inputs) elif discriminator.label_type == 'required': dx = discriminator(real_inputs, labels) else: dx = discriminator(real_inputs, label_gen) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) else: fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake.detach()) elif generator.label_type == 'generated': dgz = discriminator(fake.detach(), label_gen) else: dgz = discriminator(fake.detach(), labels) loss = self.forward(dx, dgz) loss.backward() optimizer_discriminator.step() return loss.item() class MinimaxDiscriminatorLossNew(DiscriminatorLoss): """Minimax game discriminator loss from the original GAN paper `"Generative Adversarial Networks by Goodfellow et. al." <https://arxiv.org/abs/1406.2661>`_ The loss can be described as: .. math:: L(D) = -[log(D(x)) + log(1 - D(G(z)))] where - :math:`G` : Generator - :math:`D` : Discriminator - :math:`x` : A sample from the data distribution - :math:`z` : A sample from the noise prior Args: label_smoothing (float, optional): The factor by which the labels (1 in this case) needs to be smoothened. For example, label_smoothing = 0.2 changes the value of the real labels to 0.8. reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def __init__(self, label_smoothing=0.0, reduction='mean', override_train_ops=None): super(MinimaxDiscriminatorLossNew, self).__init__(reduction, override_train_ops) self.label_smoothing = label_smoothing def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kayuksel/torchgan
MinimaxDiscriminatorLoss
false
10,553
[ "MIT" ]
0
739d97cef4c49fb80155de84e609471efafab107
https://github.com/kayuksel/torchgan/tree/739d97cef4c49fb80155de84e609471efafab107
WassersteinDiscriminatorLoss
import torch import torch.nn as nn import torch.autograd import torch.utils.data def reduce(x, reduction=None): """Applies reduction on a torch.Tensor. Args: x (torch.Tensor): The tensor on which reduction is to be applied. reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the above then the Tensor is returning without any change. Returns: As per the above ``reduction`` convention. """ if reduction == 'mean': return torch.mean(x) elif reduction == 'sum': return torch.sum(x) else: return x def wasserstein_discriminator_loss(fx, fgz, reduction='mean'): return reduce(fgz - fx, reduction) class DiscriminatorLoss(nn.Module): """Base class for all discriminator losses. .. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(DiscriminatorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value_1 = discriminator(fake)` 3. :math:`value_2 = discriminator(real)` 4. :math:`loss = loss\\_function(value_1, value_2)` 5. Backpropagate by computing :math:`\\nabla loss` 6. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if labels is None and (generator.label_type == 'required' or discriminator.label_type == 'required'): raise Exception('GAN model requires labels for training') batch_size = real_inputs.size(0) noise = torch.randn(batch_size, generator.encoding_dims, device =device) if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) optimizer_discriminator.zero_grad() if discriminator.label_type == 'none': dx = discriminator(real_inputs) elif discriminator.label_type == 'required': dx = discriminator(real_inputs, labels) else: dx = discriminator(real_inputs, label_gen) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) else: fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake.detach()) elif generator.label_type == 'generated': dgz = discriminator(fake.detach(), label_gen) else: dgz = discriminator(fake.detach(), labels) loss = self.forward(dx, dgz) loss.backward() optimizer_discriminator.step() return loss.item() class WassersteinDiscriminatorLoss(DiscriminatorLoss): """Wasserstein GAN generator loss from `"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper The loss can be described as: .. math:: L(D) = f(G(z)) - f(x) where - :math:`G` : Generator - :math:`f` : Critic/Discriminator - :math:`x` : A sample from the data distribution - :math:`z` : A sample from the noise prior Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. clip (tuple, optional): Tuple that specifies the maximum and minimum parameter clamping to be applied, as per the original version of the Wasserstein loss without Gradient Penalty. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def __init__(self, reduction='mean', clip=None, override_train_ops=None): super(WassersteinDiscriminatorLoss, self).__init__(reduction, override_train_ops) if (isinstance(clip, tuple) or isinstance(clip, list)) and len(clip ) > 1: self.clip = clip else: self.clip = None def forward(self, fx, fgz): """Computes the loss for the given input. Args: fx (torch.Tensor) : Output of the Discriminator with real data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. fgz (torch.Tensor) : Output of the Discriminator with generated data. It must have the dimensions (N, \\*) where \\* means any number of additional dimensions. Returns: scalar if reduction is applied else Tensor with dimensions (N, \\*). """ return wasserstein_discriminator_loss(fx, fgz, self.reduction) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by wasserstein discriminator loss. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. Clamp the discriminator parameters to satisfy :math:`lipschitz\\ condition` 2. :math:`fake = generator(noise)` 3. :math:`value_1 = discriminator(fake)` 4. :math:`value_2 = discriminator(real)` 5. :math:`loss = loss\\_function(value_1, value_2)` 6. Backpropagate by computing :math:`\\nabla loss` 7. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if self.clip is not None: for p in discriminator.parameters(): p.data.clamp_(self.clip[0], self.clip[1]) return super(WassersteinDiscriminatorLoss, self).train_ops( generator, discriminator, optimizer_discriminator, real_inputs, device, labels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.autograd import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce(x, reduction=None): """Applies reduction on a torch.Tensor. Args: x (torch.Tensor): The tensor on which reduction is to be applied. reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the above then the Tensor is returning without any change. Returns: As per the above ``reduction`` convention. """ if reduction == 'mean': return torch.mean(x) elif reduction == 'sum': return torch.sum(x) else: return x def wasserstein_discriminator_loss(fx, fgz, reduction='mean'): return reduce(fgz - fx, reduction) class DiscriminatorLoss(nn.Module): """Base class for all discriminator losses. .. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this. Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size. If ``sum`` the elements of the output are summed. override_train_ops (function, optional): Function to be used in place of the default ``train_ops`` """ def __init__(self, reduction='mean', override_train_ops=None): super(DiscriminatorLoss, self).__init__() self.reduction = reduction self.override_train_ops = override_train_ops self.arg_map = {} def set_arg_map(self, value): """Updates the ``arg_map`` for passing a different value to the ``train_ops``. Args: value (dict): A mapping of the ``argument name`` in the method signature and the variable name in the ``Trainer`` it corresponds to. .. note:: If the ``train_ops`` signature is ``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)`` then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``. In this case we make the following function call ``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``. """ self.arg_map.update(value) def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by most losses. Losses which have a different training procedure can either ``subclass`` it **(recommended approach)** or make use of ``override_train_ops`` argument. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. :math:`fake = generator(noise)` 2. :math:`value_1 = discriminator(fake)` 3. :math:`value_2 = discriminator(real)` 4. :math:`loss = loss\\_function(value_1, value_2)` 5. Backpropagate by computing :math:`\\nabla loss` 6. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if labels is None and (generator.label_type == 'required' or discriminator.label_type == 'required'): raise Exception('GAN model requires labels for training') batch_size = real_inputs.size(0) noise = torch.randn(batch_size, generator.encoding_dims, device =device) if generator.label_type == 'generated': label_gen = torch.randint(0, generator.num_classes, ( batch_size,), device=device) optimizer_discriminator.zero_grad() if discriminator.label_type == 'none': dx = discriminator(real_inputs) elif discriminator.label_type == 'required': dx = discriminator(real_inputs, labels) else: dx = discriminator(real_inputs, label_gen) if generator.label_type == 'none': fake = generator(noise) elif generator.label_type == 'required': fake = generator(noise, labels) else: fake = generator(noise, label_gen) if discriminator.label_type == 'none': dgz = discriminator(fake.detach()) elif generator.label_type == 'generated': dgz = discriminator(fake.detach(), label_gen) else: dgz = discriminator(fake.detach(), labels) loss = self.forward(dx, dgz) loss.backward() optimizer_discriminator.step() return loss.item() class WassersteinDiscriminatorLossNew(DiscriminatorLoss): """Wasserstein GAN generator loss from `"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper The loss can be described as: .. math:: L(D) = f(G(z)) - f(x) where - :math:`G` : Generator - :math:`f` : Critic/Discriminator - :math:`x` : A sample from the data distribution - :math:`z` : A sample from the noise prior Args: reduction (str, optional): Specifies the reduction to apply to the output. If ``none`` no reduction will be applied. If ``mean`` the mean of the output. If ``sum`` the elements of the output will be summed. clip (tuple, optional): Tuple that specifies the maximum and minimum parameter clamping to be applied, as per the original version of the Wasserstein loss without Gradient Penalty. override_train_ops (function, optional): A function is passed to this argument, if the default ``train_ops`` is not to be used. """ def __init__(self, reduction='mean', clip=None, override_train_ops=None): super(WassersteinDiscriminatorLossNew, self).__init__(reduction, override_train_ops) if (isinstance(clip, tuple) or isinstance(clip, list)) and len(clip ) > 1: self.clip = clip else: self.clip = None def train_ops(self, generator, discriminator, optimizer_discriminator, real_inputs, device, labels=None): """Defines the standard ``train_ops`` used by wasserstein discriminator loss. The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops is as follows: 1. Clamp the discriminator parameters to satisfy :math:`lipschitz\\ condition` 2. :math:`fake = generator(noise)` 3. :math:`value_1 = discriminator(fake)` 4. :math:`value_2 = discriminator(real)` 5. :math:`loss = loss\\_function(value_1, value_2)` 6. Backpropagate by computing :math:`\\nabla loss` 7. Run a step of the optimizer for discriminator Args: generator (torchgan.models.Generator): The model to be optimized. discriminator (torchgan.models.Discriminator): The discriminator which judges the performance of the generator. optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters`` of the ``discriminator``. real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``. device (torch.device): Device on which the ``generator`` and ``discriminator`` is present. labels (torch.Tensor, optional): Labels for the data. Returns: Scalar value of the loss. """ if self.override_train_ops is not None: return self.override_train_ops(generator, discriminator, optimizer_discriminator, real_inputs, device, labels) else: if self.clip is not None: for p in discriminator.parameters(): p.data.clamp_(self.clip[0], self.clip[1]) return super(WassersteinDiscriminatorLossNew, self).train_ops( generator, discriminator, optimizer_discriminator, real_inputs, device, labels) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kayuksel/torchgan
WassersteinDiscriminatorLoss
false
10,554
[ "MIT" ]
0
739d97cef4c49fb80155de84e609471efafab107
https://github.com/kayuksel/torchgan/tree/739d97cef4c49fb80155de84e609471efafab107
VirtualBatchNorm
import torch import torch.nn as nn import torch.autograd import torch.utils.data class VirtualBatchNorm(nn.Module): """Virtual Batch Normalization Module as proposed in the paper `"Improved Techniques for Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_ Performs Normalizes the features of a batch based on the statistics collected on a reference batch of samples that are chosen once and fixed from the start, as opposed to regular batch normalization that uses the statistics of the batch being normalized Virtual Batch Normalization requires that the size of the batch being normalized is at least a multiple of (and ideally equal to) the size of the reference batch. Keep this in mind while choosing the batch size in ```torch.utils.data.DataLoader``` or use ```drop_last=True``` .. math:: y = \\frac{x - \\mathrm{E}[x_{ref}]}{\\sqrt{\\mathrm{Var}[x_{ref}] + \\epsilon}} * \\gamma + \\beta where - :math:`x` : Batch Being Normalized - :math:`x_{ref}` : Reference Batch Args: in_features (int): Size of the input dimension to be normalized eps (float, optional): Value to be added to variance for numerical stability while normalizing """ def __init__(self, in_features, eps=1e-05): super(VirtualBatchNorm, self).__init__() self.in_features = in_features self.scale = nn.Parameter(torch.ones(in_features)) self.bias = nn.Parameter(torch.zeros(in_features)) self.ref_mu = None self.ref_var = None self.eps = eps def _batch_stats(self, x): """Computes the statistics of the batch ``x``. Args: x (torch.Tensor): Tensor whose statistics need to be computed. Returns: A tuple of the mean and variance of the batch ``x``. """ mu = torch.mean(x, dim=0, keepdim=True) var = torch.var(x, dim=0, keepdim=True) return mu, var def _normalize(self, x, mu, var): """Normalizes the tensor ``x`` using the statistics ``mu`` and ``var``. Args: x (torch.Tensor): The Tensor to be normalized. mu (torch.Tensor): Mean using which the Tensor is to be normalized. var (torch.Tensor): Variance used in the normalization of ``x``. Returns: Normalized Tensor ``x``. """ std = torch.sqrt(self.eps + var) x = (x - mu) / std sizes = list(x.size()) for dim, i in enumerate(x.size()): if dim != 1: sizes[dim] = 1 scale = self.scale.view(*sizes) bias = self.bias.view(*sizes) return x * scale + bias def forward(self, x): """Computes the output of the Virtual Batch Normalization Args: x (torch.Tensor): A Torch Tensor of dimension at least 2 which is to be Normalized Returns: Torch Tensor of the same dimension after normalizing with respect to the statistics of the reference batch """ assert x.size(1) == self.in_features if self.ref_mu is None or self.ref_var is None: self.ref_mu, self.ref_var = self._batch_stats(x) self.ref_mu = self.ref_mu.clone().detach() self.ref_var = self.ref_var.clone().detach() out = self._normalize(x, self.ref_mu, self.ref_var) else: out = self._normalize(x, self.ref_mu, self.ref_var) self.ref_mu = None self.ref_var = None return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.autograd import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_var_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp21, xmask) @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tmp2 / tmp6 tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tl.store(out_ptr0 + x3, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_var_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_sqrt_sub_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf2, buf1, buf0, primals_1, buf0, buf1 class VirtualBatchNormNew(nn.Module): """Virtual Batch Normalization Module as proposed in the paper `"Improved Techniques for Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_ Performs Normalizes the features of a batch based on the statistics collected on a reference batch of samples that are chosen once and fixed from the start, as opposed to regular batch normalization that uses the statistics of the batch being normalized Virtual Batch Normalization requires that the size of the batch being normalized is at least a multiple of (and ideally equal to) the size of the reference batch. Keep this in mind while choosing the batch size in ```torch.utils.data.DataLoader``` or use ```drop_last=True``` .. math:: y = \\frac{x - \\mathrm{E}[x_{ref}]}{\\sqrt{\\mathrm{Var}[x_{ref}] + \\epsilon}} * \\gamma + \\beta where - :math:`x` : Batch Being Normalized - :math:`x_{ref}` : Reference Batch Args: in_features (int): Size of the input dimension to be normalized eps (float, optional): Value to be added to variance for numerical stability while normalizing """ def __init__(self, in_features, eps=1e-05): super(VirtualBatchNormNew, self).__init__() self.in_features = in_features self.scale = nn.Parameter(torch.ones(in_features)) self.bias = nn.Parameter(torch.zeros(in_features)) self.ref_mu = None self.ref_var = None self.eps = eps def _batch_stats(self, x): """Computes the statistics of the batch ``x``. Args: x (torch.Tensor): Tensor whose statistics need to be computed. Returns: A tuple of the mean and variance of the batch ``x``. """ mu = torch.mean(x, dim=0, keepdim=True) var = torch.var(x, dim=0, keepdim=True) return mu, var def _normalize(self, x, mu, var): """Normalizes the tensor ``x`` using the statistics ``mu`` and ``var``. Args: x (torch.Tensor): The Tensor to be normalized. mu (torch.Tensor): Mean using which the Tensor is to be normalized. var (torch.Tensor): Variance used in the normalization of ``x``. Returns: Normalized Tensor ``x``. """ std = torch.sqrt(self.eps + var) x = (x - mu) / std sizes = list(x.size()) for dim, i in enumerate(x.size()): if dim != 1: sizes[dim] = 1 scale = self.scale.view(*sizes) bias = self.bias.view(*sizes) return x * scale + bias def forward(self, input_0): primals_2 = self.scale primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
kayuksel/torchgan
VirtualBatchNorm
false
10,555
[ "MIT" ]
0
739d97cef4c49fb80155de84e609471efafab107
https://github.com/kayuksel/torchgan/tree/739d97cef4c49fb80155de84e609471efafab107
ConvCIFAR
import torch import torch.nn as nn import torch.nn.functional as F class ConvCIFAR(nn.Module): def __init__(self): super(ConvCIFAR, self).__init__() self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 4 * 4, 500) self.fc2 = nn.Linear(500, 10) self.dropout = nn.Dropout(0.25) def forward(self, x): x = self.pool(F.elu(self.conv1(x))) x = self.pool(F.elu(self.conv2(x))) x = self.pool(F.elu(self.conv3(x))) x = x.view(-1, 64 * 4 * 4) x = self.dropout(x) x = F.elu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_elu_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (500, 1024), (1024, 1)) assert_size_stride(primals_9, (500,), (1,)) assert_size_stride(primals_10, (10, 500), (500, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(262144)](buf1, primals_2, buf2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) buf4 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf2, buf3, buf4, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_elu_2[grid(131072)](buf6, primals_5, buf7, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf8 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf9 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf7, buf8, buf9, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 16, 16), (16384, 256, 16, 1)) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) triton_poi_fused_convolution_elu_4[grid(65536)](buf11, primals_7, buf12, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf13 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8) buf14 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf12, buf13, buf14, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf15 = empty_strided_cuda((16, 500), (500, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf14, (16, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_8, (1024, 500), (1, 1024), 0), alpha=1, beta=1, out=buf15) del primals_9 buf16 = empty_strided_cuda((16, 500), (500, 1), torch.float32) triton_poi_fused_elu_6[grid(8000)](buf15, buf16, 8000, XBLOCK=256, num_warps=4, num_stages=1) buf17 = empty_strided_cuda((16, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf16, reinterpret_tensor( primals_10, (500, 10), (1, 500), 0), alpha=1, beta=1, out=buf17) del primals_11 return (buf17, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf9, buf11, buf12, buf13, reinterpret_tensor(buf14, (16, 1024), (1024, 1), 0), buf15, buf16, primals_10, primals_8) class ConvCIFARNew(nn.Module): def __init__(self): super(ConvCIFARNew, self).__init__() self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 4 * 4, 500) self.fc2 = nn.Linear(500, 10) self.dropout = nn.Dropout(0.25) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
mnguyen0226/soo_non_convex_ml
ConvCIFAR
false
10,556
[ "MIT" ]
0
2ffbedbe5eb536e017c643f725cc08551a5b1e9f
https://github.com/mnguyen0226/soo_non_convex_ml/tree/2ffbedbe5eb536e017c643f725cc08551a5b1e9f
GGCL_D
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.modules.module import Module from torch.nn.parameter import Parameter class GGCL_D(Module): """Graph Gaussian Convolution Layer (GGCL) when the input is distribution""" def __init__(self, in_features, out_features, dropout): super(GGCL_D, self).__init__() self.in_features = in_features self.out_features = out_features self.dropout = dropout self.weight_miu = Parameter(torch.FloatTensor(in_features, out_features)) self.weight_sigma = Parameter(torch.FloatTensor(in_features, out_features)) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_uniform_(self.weight_miu) torch.nn.init.xavier_uniform_(self.weight_sigma) def forward(self, miu, sigma, adj_norm1, adj_norm2, gamma=1): miu = F.dropout(miu, self.dropout, training=self.training) sigma = F.dropout(sigma, self.dropout, training=self.training) miu = F.elu(miu @ self.weight_miu) sigma = F.relu(sigma @ self.weight_sigma) Att = torch.exp(-gamma * sigma) mean_out = adj_norm1 @ (miu * Att) sigma_out = adj_norm2 @ (sigma * Att * Att) return mean_out, sigma_out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module from torch.nn.modules.module import Module from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_exp_mul_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp8 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = -1.0 tmp12 = tmp10 * tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp7 * tmp13 tmp15 = tmp10 * tmp13 tmp16 = tmp15 * tmp13 tl.store(out_ptr0 + x0, tmp14, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) tl.store(out_ptr2 + x0, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, out=buf0) del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_4, out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_exp_mul_relu_0[grid(256)](buf0, buf1, buf2, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3) buf6 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(primals_6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf5 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf0, buf1, buf4, reinterpret_tensor(primals_6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_5, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0) class GGCL_DNew(Module): """Graph Gaussian Convolution Layer (GGCL) when the input is distribution""" def __init__(self, in_features, out_features, dropout): super(GGCL_DNew, self).__init__() self.in_features = in_features self.out_features = out_features self.dropout = dropout self.weight_miu = Parameter(torch.FloatTensor(in_features, out_features)) self.weight_sigma = Parameter(torch.FloatTensor(in_features, out_features)) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_uniform_(self.weight_miu) torch.nn.init.xavier_uniform_(self.weight_sigma) def forward(self, input_0, input_1, input_2, input_3): primals_3 = self.weight_miu primals_4 = self.weight_sigma primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 primals_6 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
marblet/DeepRobust
GGCL_D
false
10,557
[ "MIT" ]
0
126c05818e38062c2423cd40dc8937ccc43c738b
https://github.com/marblet/DeepRobust/tree/126c05818e38062c2423cd40dc8937ccc43c738b
Autoencoder
import torch import torch.nn as nn import torch.nn.functional as F class Autoencoder(nn.Module): def __init__(self, input_length, output_length=None, neuron_multiplier= 1, sigmoid=False, drop=False, drop_pct=0.3): """ Dense autoencoder. Args: input_length (int): Length (i.e., size) of input sample. output_length (int): Length of output sample. Defaults to None, leave as default if input and output are equal size. neuron_multiplier (int): Number to augment the set number of neurons. Defaults to 1. sigmoid (boolean): Defaults to False. Leave as default if output is nn.Linear (not sigmoid). drop (boolean): Defaults to False. True activates dropout for each layer except final. drop_pct (float): Amount of dropout to use (if drop is True). Defaults to 0.3. """ super(Autoencoder, self).__init__() self.input_length = input_length if not output_length: self.output_length = input_length if output_length: self.output_length = output_length self.neuron_multiplier = neuron_multiplier self.sigmoid = sigmoid self.drop = drop self.drop_pct = drop_pct self.layer1 = nn.Linear(in_features=self.input_length, out_features =256 * self.neuron_multiplier) self.layer2 = nn.Linear(in_features=256 * self.neuron_multiplier, out_features=128 * self.neuron_multiplier) self.layer3 = nn.Linear(in_features=128 * self.neuron_multiplier, out_features=64 * self.neuron_multiplier) self.layer4 = nn.Linear(in_features=64 * self.neuron_multiplier, out_features=128 * self.neuron_multiplier) self.layer5 = nn.Linear(in_features=128 * self.neuron_multiplier, out_features=256 * self.neuron_multiplier) self.layer6 = nn.Linear(in_features=256 * self.neuron_multiplier, out_features=self.output_length) self.out = nn.Linear(in_features=self.output_length, out_features= self.output_length) def forward(self, x): if self.drop: drp = nn.Dropout(p=self.drop_pct) x = F.relu(self.layer1(x)) if self.drop: x = drp(x) x = F.relu(self.layer2(x)) if self.drop: x = drp(x) x = F.relu(self.layer3(x)) if self.drop: x = drp(x) x = F.relu(self.layer4(x)) if self.drop: x = drp(x) x = F.relu(self.layer5(x)) if self.drop: x = drp(x) x = F.relu(self.layer6(x)) if self.drop: x = drp(x) if self.sigmoid: x = torch.sigmoid(self.out(x)) if not self.sigmoid: x = self.out(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_length': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 256), (256, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (64, 128), (128, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (128, 64), (64, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128), (128, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (4, 256), (256, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf18 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf18, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf17 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3, primals_5, buf17, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf4 buf16 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch .bool) triton_poi_fused_relu_threshold_backward_2[grid(4096)](buf5, primals_7, buf16, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 128), (1, 64), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf6 buf15 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf7, primals_9, buf15, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (64, 128), (128, 1), 0), reinterpret_tensor(primals_10, (128, 256), (1, 128), 0), out=buf8) buf9 = reinterpret_tensor(buf8, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf8 buf14 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf9, primals_11, buf14, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (64, 256), (256, 1), 0), reinterpret_tensor(primals_12, (256, 4), (1, 256), 0), out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf10 buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(256)](buf11, primals_13, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_13 buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_15 return (reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor( buf7, (64, 128), (128, 1), 0), reinterpret_tensor(buf9, (64, 256), (256, 1), 0), reinterpret_tensor(buf11, (64, 4), (4, 1), 0), primals_14, buf13, primals_12, buf14, primals_10, buf15, primals_8, buf16, primals_6, buf17, primals_4, buf18) class AutoencoderNew(nn.Module): def __init__(self, input_length, output_length=None, neuron_multiplier= 1, sigmoid=False, drop=False, drop_pct=0.3): """ Dense autoencoder. Args: input_length (int): Length (i.e., size) of input sample. output_length (int): Length of output sample. Defaults to None, leave as default if input and output are equal size. neuron_multiplier (int): Number to augment the set number of neurons. Defaults to 1. sigmoid (boolean): Defaults to False. Leave as default if output is nn.Linear (not sigmoid). drop (boolean): Defaults to False. True activates dropout for each layer except final. drop_pct (float): Amount of dropout to use (if drop is True). Defaults to 0.3. """ super(AutoencoderNew, self).__init__() self.input_length = input_length if not output_length: self.output_length = input_length if output_length: self.output_length = output_length self.neuron_multiplier = neuron_multiplier self.sigmoid = sigmoid self.drop = drop self.drop_pct = drop_pct self.layer1 = nn.Linear(in_features=self.input_length, out_features =256 * self.neuron_multiplier) self.layer2 = nn.Linear(in_features=256 * self.neuron_multiplier, out_features=128 * self.neuron_multiplier) self.layer3 = nn.Linear(in_features=128 * self.neuron_multiplier, out_features=64 * self.neuron_multiplier) self.layer4 = nn.Linear(in_features=64 * self.neuron_multiplier, out_features=128 * self.neuron_multiplier) self.layer5 = nn.Linear(in_features=128 * self.neuron_multiplier, out_features=256 * self.neuron_multiplier) self.layer6 = nn.Linear(in_features=256 * self.neuron_multiplier, out_features=self.output_length) self.out = nn.Linear(in_features=self.output_length, out_features= self.output_length) def forward(self, input_0): primals_1 = self.layer1.weight primals_2 = self.layer1.bias primals_4 = self.layer2.weight primals_5 = self.layer2.bias primals_6 = self.layer3.weight primals_7 = self.layer3.bias primals_8 = self.layer4.weight primals_9 = self.layer4.bias primals_10 = self.layer5.weight primals_11 = self.layer5.bias primals_12 = self.layer6.weight primals_13 = self.layer6.bias primals_14 = self.out.weight primals_15 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
mariajmolina/ML-for-S2S
Autoencoder
false
10,558
[ "MIT" ]
0
3de32e72042ba7e8b37a433579fa9c5630246d8c
https://github.com/mariajmolina/ML-for-S2S/tree/3de32e72042ba7e8b37a433579fa9c5630246d8c
GGCL_F
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.modules.module import Module from torch.nn.parameter import Parameter class GGCL_F(Module): """Graph Gaussian Convolution Layer (GGCL) when the input is feature""" def __init__(self, in_features, out_features, dropout=0.6): super(GGCL_F, self).__init__() self.in_features = in_features self.out_features = out_features self.dropout = dropout self.weight_miu = Parameter(torch.FloatTensor(in_features, out_features)) self.weight_sigma = Parameter(torch.FloatTensor(in_features, out_features)) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_uniform_(self.weight_miu) torch.nn.init.xavier_uniform_(self.weight_sigma) def forward(self, features, adj_norm1, adj_norm2, gamma=1): features = F.dropout(features, self.dropout, training=self.training) self.miu = F.elu(torch.mm(features, self.weight_miu)) self.sigma = F.relu(torch.mm(features, self.weight_sigma)) Att = torch.exp(-gamma * self.sigma) miu_out = adj_norm1 @ (self.miu * Att) sigma_out = adj_norm2 @ (self.sigma * Att * Att) return miu_out, sigma_out def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module from torch.nn.modules.module import Module from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_exp_mul_relu_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp8 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp11 = -1.0 tmp12 = tmp10 * tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp7 * tmp13 tmp15 = tmp10 * tmp13 tmp16 = tmp15 * tmp13 tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(in_out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) tl.store(out_ptr3 + x0, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_3, out=buf2) del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_exp_mul_relu_0[grid(16)](buf3, buf0, buf1, buf4, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_4, buf4, out=buf5) buf8 = buf4 del buf4 extern_kernels.mm(primals_5, buf7, out=buf8) del buf7 return buf5, buf8, buf3, buf1, buf0, buf1, buf3, buf6, reinterpret_tensor( primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class GGCL_FNew(Module): """Graph Gaussian Convolution Layer (GGCL) when the input is feature""" def __init__(self, in_features, out_features, dropout=0.6): super(GGCL_FNew, self).__init__() self.in_features = in_features self.out_features = out_features self.dropout = dropout self.weight_miu = Parameter(torch.FloatTensor(in_features, out_features)) self.weight_sigma = Parameter(torch.FloatTensor(in_features, out_features)) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_uniform_(self.weight_miu) torch.nn.init.xavier_uniform_(self.weight_sigma) def forward(self, input_0, input_1, input_2): primals_1 = self.weight_miu primals_2 = self.weight_sigma primals_3 = input_0 primals_4 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
marblet/DeepRobust
GGCL_F
false
10,559
[ "MIT" ]
0
126c05818e38062c2423cd40dc8937ccc43c738b
https://github.com/marblet/DeepRobust/tree/126c05818e38062c2423cd40dc8937ccc43c738b
MinibatchDiscrimination1d
import torch import torch.nn as nn import torch.autograd import torch.utils.data class MinibatchDiscrimination1d(nn.Module): """1D Minibatch Discrimination Module as proposed in the paper `"Improved Techniques for Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_ Allows the Discriminator to easily detect mode collapse by augmenting the activations to the succeeding layer with side information that allows it to determine the 'closeness' of the minibatch examples with each other .. math :: M_i = T * f(x_{i}) .. math :: c_b(x_{i}, x_{j}) = \\exp(-||M_{i, b} - M_{j, b}||_1) \\in \\mathbb{R}. .. math :: o(x_{i})_b &= \\sum_{j=1}^{n} c_b(x_{i},x_{j}) \\in \\mathbb{R} \\\\ .. math :: o(x_{i}) &= \\Big[ o(x_{i})_1, o(x_{i})_2, \\dots, o(x_{i})_B \\Big] \\in \\mathbb{R}^B \\\\ .. math :: o(X) \\in \\mathbb{R}^{n \\times B} This is followed by concatenating :math:`o(x_{i})` and :math:`f(x_{i})` where - :math:`f(x_{i}) \\in \\mathbb{R}^A` : Activations from an intermediate layer - :math:`f(x_{i}) \\in \\mathbb{R}^A` : Parameter Tensor for generating minibatch discrimination matrix Args: in_features (int): Features input corresponding to dimension :math:`A` out_features (int): Number of output features that are to be concatenated corresponding to dimension :math:`B` intermediate_features (int): Intermediate number of features corresponding to dimension :math:`C` Returns: A Tensor of size :math:`(N, in_features + out_features)` where :math:`N` is the batch size """ def __init__(self, in_features, out_features, intermediate_features=16): super(MinibatchDiscrimination1d, self).__init__() self.in_features = in_features self.out_features = out_features self.intermediate_features = intermediate_features self.T = nn.Parameter(torch.Tensor(in_features, out_features, intermediate_features)) nn.init.normal_(self.T) def forward(self, x): """Computes the output of the Minibatch Discrimination Layer Args: x (torch.Tensor): A Torch Tensor of dimensions :math: `(N, infeatures)` Returns: 3D Torch Tensor of size :math: `(N,infeatures + outfeatures)` after applying Minibatch Discrimination """ M = torch.mm(x, self.T.view(self.in_features, -1)) M = M.view(-1, self.out_features, self.intermediate_features ).unsqueeze(0) M_t = M.permute(1, 0, 2, 3) out = torch.sum(torch.exp(-torch.abs(M - M_t).sum(3)), dim=0) - 1 return torch.cat([x, out], 1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.autograd import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_abs_exp_neg_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x5 = xindex % 16 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (r3 + 16 * x5), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.load(in_ptr0 + (r3 + 16 * x0 + 64 * x2), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + (16 + 4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.load(in_ptr1 + (32 + 4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.load(in_ptr1 + (48 + 4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = 1.0 tmp17 = tmp15 - tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp6, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 16), (64, 16, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_abs_exp_neg_sub_sum_0[grid(64)](buf2, buf0, 64, 16, XBLOCK=8, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_2, buf2, buf3, 32, XBLOCK= 32, num_warps=1, num_stages=1) return buf3, buf0, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class MinibatchDiscrimination1dNew(nn.Module): """1D Minibatch Discrimination Module as proposed in the paper `"Improved Techniques for Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_ Allows the Discriminator to easily detect mode collapse by augmenting the activations to the succeeding layer with side information that allows it to determine the 'closeness' of the minibatch examples with each other .. math :: M_i = T * f(x_{i}) .. math :: c_b(x_{i}, x_{j}) = \\exp(-||M_{i, b} - M_{j, b}||_1) \\in \\mathbb{R}. .. math :: o(x_{i})_b &= \\sum_{j=1}^{n} c_b(x_{i},x_{j}) \\in \\mathbb{R} \\\\ .. math :: o(x_{i}) &= \\Big[ o(x_{i})_1, o(x_{i})_2, \\dots, o(x_{i})_B \\Big] \\in \\mathbb{R}^B \\\\ .. math :: o(X) \\in \\mathbb{R}^{n \\times B} This is followed by concatenating :math:`o(x_{i})` and :math:`f(x_{i})` where - :math:`f(x_{i}) \\in \\mathbb{R}^A` : Activations from an intermediate layer - :math:`f(x_{i}) \\in \\mathbb{R}^A` : Parameter Tensor for generating minibatch discrimination matrix Args: in_features (int): Features input corresponding to dimension :math:`A` out_features (int): Number of output features that are to be concatenated corresponding to dimension :math:`B` intermediate_features (int): Intermediate number of features corresponding to dimension :math:`C` Returns: A Tensor of size :math:`(N, in_features + out_features)` where :math:`N` is the batch size """ def __init__(self, in_features, out_features, intermediate_features=16): super(MinibatchDiscrimination1dNew, self).__init__() self.in_features = in_features self.out_features = out_features self.intermediate_features = intermediate_features self.T = nn.Parameter(torch.Tensor(in_features, out_features, intermediate_features)) nn.init.normal_(self.T) def forward(self, input_0): primals_1 = self.T primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
kayuksel/torchgan
MinibatchDiscrimination1d
false
10,560
[ "MIT" ]
0
739d97cef4c49fb80155de84e609471efafab107
https://github.com/kayuksel/torchgan/tree/739d97cef4c49fb80155de84e609471efafab107
Generator
import torch import torch.nn as nn import torch.nn.functional as F class Generator(nn.Module): """Define standard linear + softmax generation step.""" def __init__(self, emb_size, vocab_size): super(Generator, self).__init__() self.proj = nn.Linear(emb_size, vocab_size, bias=False) def forward(self, x): return F.log_softmax(torch.tanh(self.proj(x)), dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'emb_size': 4, 'vocab_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = libdevice.tanh(tmp0) tmp3 = libdevice.tanh(tmp2) tmp5 = libdevice.tanh(tmp4) tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = libdevice.tanh(tmp7) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = libdevice.tanh(tmp10) tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp1 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf2 class GeneratorNew(nn.Module): """Define standard linear + softmax generation step.""" def __init__(self, emb_size, vocab_size): super(GeneratorNew, self).__init__() self.proj = nn.Linear(emb_size, vocab_size, bias=False) def forward(self, input_0): primals_1 = self.proj.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
msobrevillac/Multilingual-RDF-Verbalizer
Generator
false
10,561
[ "MIT" ]
0
ba396693f65eaf74d1f60eb9aed3e78ab9593b22
https://github.com/msobrevillac/Multilingual-RDF-Verbalizer/tree/ba396693f65eaf74d1f60eb9aed3e78ab9593b22
MVloss
import torch import torch.distributed import torch.nn as nn class MVloss(nn.Module): def __init__(self): super(MVloss, self).__init__() def forward(self, xRA0, xRA20, xRA_20, target, wRA0, wRA20, wRA_20): criterion_MV = torch.nn.CrossEntropyLoss() loss_multiview = criterion_MV(wRA0 * xRA0 + wRA20 * xRA20 + wRA_20 * xRA_20, target) return loss_multiview def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.distributed import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp4 = tl.load(in_ptr3 + x0, xmask) tmp7 = tl.load(in_ptr4 + x0, xmask) tmp8 = tl.load(in_ptr5 + x0, xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 del arg5_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf0, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_div_mul_neg_sum_2[grid(1)](buf3, buf1, arg6_1, 1, 256, num_warps=2, num_stages=1) del arg6_1 del buf1 return buf3, class MVlossNew(nn.Module): def __init__(self): super(MVlossNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3, input_4, input_5, input_6): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 arg4_1 = input_4 arg5_1 = input_5 arg6_1 = input_6 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1]) return output[0]
muzammilbehzad/MultiviewTransformer
MVloss
false
10,562
[ "MIT" ]
0
c6c7c34c8d156e187a986e35268e1fc4a5d0175d
https://github.com/muzammilbehzad/MultiviewTransformer/tree/c6c7c34c8d156e187a986e35268e1fc4a5d0175d
Critic
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=256, fc2_units=128): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = F.relu(self.fcs1(state)) x = torch.cat((xs, action), dim=1) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 260 x1 = xindex // 260 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (256 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 260, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-256 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (128, 260), (260, 1)) assert_size_stride(primals_6, (128,), (1,)) assert_size_stride(primals_7, (1, 128), (128, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 260), (260, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1040)](buf0, primals_2, primals_4, buf1, 1040, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (260, 128), ( 1, 260), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(512)](buf3, primals_6, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 256), (256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(1024)](buf0, primals_2, buf6, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class CriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=256, fc2_units=128): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(CriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.fcs1.weight primals_2 = self.fcs1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
moritzzzzz/Continuous_Control
Critic
false
10,563
[ "Apache-2.0" ]
0
655530bdbbe77eb285c95246331be4636c0d076c
https://github.com/moritzzzzz/Continuous_Control/tree/655530bdbbe77eb285c95246331be4636c0d076c
Loss
import torch import torch.utils.data import torch import torch.nn as nn class Loss(nn.Module): def __init__(self): super(Loss, self).__init__() def forward(self, x, y): z = (x - y) ** 2 t = z[:, 1:].sum(dim=1) loss = z[:, 0] + y[:, 0] * t loss = loss.mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp7 + tmp11 tmp15 = tmp13 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tmp12 + tmp16 tmp18 = tmp1 * tmp17 tmp19 = tmp3 + tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = 64.0 tmp24 = tmp22 / tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_mul_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class LossNew(nn.Module): def __init__(self): super(LossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
medric49/lookatme
Loss
false
10,564
[ "Apache-2.0" ]
0
bbd3d9ae8e5787d7ec53955df9aaba80959f46e5
https://github.com/medric49/lookatme/tree/bbd3d9ae8e5787d7ec53955df9aaba80959f46e5
EqualLinear
import math import torch import torch.nn as nn from torch.nn import functional as F class EqualLinear(nn.Module): """Equalized Linear as StyleGAN2. Args: in_channels (int): Size of each sample. out_channels (int): Size of each output sample. bias (bool): If set to ``False``, the layer will not learn an additive bias. Default: ``True``. bias_init_val (float): Bias initialized value. Default: 0. lr_mul (float): Learning rate multiplier. Default: 1. activation (None | str): The activation after ``linear`` operation. Supported: 'fused_lrelu', None. Default: None. """ def __init__(self, in_channels, out_channels, bias=True, bias_init_val= 0, lr_mul=1, activation=None): super(EqualLinear, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.lr_mul = lr_mul self.activation = activation if self.activation not in ['fused_lrelu', None]: raise ValueError( f"Wrong activation value in EqualLinear: {activation}Supported ones are: ['fused_lrelu', None]." ) self.scale = 1 / math.sqrt(in_channels) * lr_mul self.weight = nn.Parameter(torch.randn(out_channels, in_channels). div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_channels).fill_( bias_init_val)) else: self.register_parameter('bias', None) def forward(self, x): if self.bias is None: bias = None else: bias = self.bias * self.lr_mul if self.activation == 'fused_lrelu': out = F.linear(x, self.weight * self.scale) out = fused_leaky_relu(out, bias) else: out = F.linear(x, self.weight * self.scale, bias=bias) return out def __repr__(self): return ( f'{self.__class__.__name__}(in_channels={self.in_channels}, out_channels={self.out_channels}, bias={self.bias is not None})' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_1, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class EqualLinearNew(nn.Module): """Equalized Linear as StyleGAN2. Args: in_channels (int): Size of each sample. out_channels (int): Size of each output sample. bias (bool): If set to ``False``, the layer will not learn an additive bias. Default: ``True``. bias_init_val (float): Bias initialized value. Default: 0. lr_mul (float): Learning rate multiplier. Default: 1. activation (None | str): The activation after ``linear`` operation. Supported: 'fused_lrelu', None. Default: None. """ def __init__(self, in_channels, out_channels, bias=True, bias_init_val= 0, lr_mul=1, activation=None): super(EqualLinearNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.lr_mul = lr_mul self.activation = activation if self.activation not in ['fused_lrelu', None]: raise ValueError( f"Wrong activation value in EqualLinear: {activation}Supported ones are: ['fused_lrelu', None]." ) self.scale = 1 / math.sqrt(in_channels) * lr_mul self.weight = nn.Parameter(torch.randn(out_channels, in_channels). div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_channels).fill_( bias_init_val)) else: self.register_parameter('bias', None) def __repr__(self): return ( f'{self.__class__.__name__}(in_channels={self.in_channels}, out_channels={self.out_channels}, bias={self.bias is not None})' ) def forward(self, input_0): primals_2 = self.weight primals_1 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
naarkhoo/GFPGAN
EqualLinear
false
10,565
[ "BSD-3-Clause" ]
0
73559ec44a734fe084b6a0e28107295c5e98f335
https://github.com/naarkhoo/GFPGAN/tree/73559ec44a734fe084b6a0e28107295c5e98f335
traspose_conv
import torch import torch.nn as nn class traspose_conv(nn.Module): def __init__(self, num_of_channels): super(traspose_conv, self).__init__() self.trasnpose_conv = nn.ConvTranspose2d(num_of_channels, int( num_of_channels / 2), kernel_size=2, stride=2) def forward(self, x): x = self.trasnpose_conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_of_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 2 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 2, 2, 2), (8, 4, 2, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 8, 8), (128, 64, 8, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(512)](buf1, primals_2, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class traspose_convNew(nn.Module): def __init__(self, num_of_channels): super(traspose_convNew, self).__init__() self.trasnpose_conv = nn.ConvTranspose2d(num_of_channels, int( num_of_channels / 2), kernel_size=2, stride=2) def forward(self, input_0): primals_1 = self.trasnpose_conv.weight primals_2 = self.trasnpose_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mhakyash/UNet-MNIST-denoising
traspose_conv
false
10,566
[ "MIT" ]
0
0e3c20cbb3f34af575e33209425ae4d7cb0bcd82
https://github.com/mhakyash/UNet-MNIST-denoising/tree/0e3c20cbb3f34af575e33209425ae4d7cb0bcd82
DeepAutoencoder
import torch import torch.nn as nn import torch.nn.functional as F class DeepAutoencoder(nn.Module): def __init__(self, input_length, output_length=None, neuron_multiplier= 1, sigmoid=False, drop=False, drop_pct=0.3): """ Dense deep autoencoder. Args: input_length (int): Length (i.e., size) of input sample. output_length (int): Length of output sample. Defaults to None, leave as default if input and output are equal size. neuron_multiplier (int): Number to augment the set number of neurons. Defaults to 1. sigmoid (boolean): Defaults to False. Leave as default if output is nn.Linear (not sigmoid). drop (boolean): Defaults to False. True activates dropout for each layer except final. drop_pct (float): Amount of dropout to use (if drop is True). Defaults to 0.3. """ super(DeepAutoencoder, self).__init__() self.input_length = input_length if not output_length: self.output_length = input_length if output_length: self.output_length = output_length self.neuron_multiplier = neuron_multiplier self.sigmoid = sigmoid self.drop = drop self.drop_pct = drop_pct self.inplay = nn.Linear(in_features=self.input_length, out_features =2 * (256 * self.neuron_multiplier)) self.layer1 = nn.Linear(in_features=2 * (256 * self. neuron_multiplier), out_features=256 * self.neuron_multiplier) self.layer2 = nn.Linear(in_features=256 * self.neuron_multiplier, out_features=128 * self.neuron_multiplier) self.layer3 = nn.Linear(in_features=128 * self.neuron_multiplier, out_features=64 * self.neuron_multiplier) self.layer4 = nn.Linear(in_features=64 * self.neuron_multiplier, out_features=128 * self.neuron_multiplier) self.layer5 = nn.Linear(in_features=128 * self.neuron_multiplier, out_features=256 * self.neuron_multiplier) self.layer6 = nn.Linear(in_features=256 * self.neuron_multiplier, out_features=self.output_length) self.out = nn.Linear(in_features=self.output_length, out_features= self.output_length) def forward(self, x): if self.drop: drp = nn.Dropout(p=self.drop_pct) x = F.relu(self.inplay(x)) if self.drop: x = drp(x) x = F.relu(self.layer1(x)) if self.drop: x = drp(x) x = F.relu(self.layer2(x)) if self.drop: x = drp(x) x = F.relu(self.layer3(x)) if self.drop: x = drp(x) x = F.relu(self.layer4(x)) if self.drop: x = drp(x) x = F.relu(self.layer5(x)) if self.drop: x = drp(x) x = F.relu(self.layer6(x)) if self.drop: x = drp(x) if self.sigmoid: x = torch.sigmoid(self.out(x)) if not self.sigmoid: x = self.out(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_length': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 512), (512, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (128, 256), (256, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (64, 128), (128, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64), (64, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (256, 128), (128, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (4, 256), (256, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf0 buf21 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf1, primals_2, buf21, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 256), (1, 512), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf20 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16384)](buf3, primals_5, buf20, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 128), (1, 256), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf4 buf19 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(8192)](buf5, primals_7, buf19, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 64), (1, 128), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf6 buf18 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch .bool) triton_poi_fused_relu_threshold_backward_3[grid(4096)](buf7, primals_9, buf18, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (64, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 128), (1, 64), 0), out=buf8) buf9 = reinterpret_tensor(buf8, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf8 buf17 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(8192)](buf9, primals_11, buf17, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (64, 128), (128, 1), 0), reinterpret_tensor(primals_12, (128, 256), (1, 128), 0), out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4, 256), (4096, 1024, 256, 1), 0) del buf10 buf16 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16384)](buf11, primals_13, buf16, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (64, 256), (256, 1), 0), reinterpret_tensor(primals_14, (256, 4), (1, 256), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf12 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(256)](buf13, primals_15, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf14 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_17, reinterpret_tensor(buf13, (64, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14) del primals_17 return (reinterpret_tensor(buf14, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(buf5, (64, 128), (128, 1), 0), reinterpret_tensor(buf7, (64, 64), (64, 1), 0), reinterpret_tensor( buf9, (64, 128), (128, 1), 0), reinterpret_tensor(buf11, (64, 256), (256, 1), 0), reinterpret_tensor(buf13, (64, 4), (4, 1), 0), primals_16, buf15, primals_14, buf16, primals_12, buf17, primals_10, buf18, primals_8, buf19, primals_6, buf20, primals_4, buf21) class DeepAutoencoderNew(nn.Module): def __init__(self, input_length, output_length=None, neuron_multiplier= 1, sigmoid=False, drop=False, drop_pct=0.3): """ Dense deep autoencoder. Args: input_length (int): Length (i.e., size) of input sample. output_length (int): Length of output sample. Defaults to None, leave as default if input and output are equal size. neuron_multiplier (int): Number to augment the set number of neurons. Defaults to 1. sigmoid (boolean): Defaults to False. Leave as default if output is nn.Linear (not sigmoid). drop (boolean): Defaults to False. True activates dropout for each layer except final. drop_pct (float): Amount of dropout to use (if drop is True). Defaults to 0.3. """ super(DeepAutoencoderNew, self).__init__() self.input_length = input_length if not output_length: self.output_length = input_length if output_length: self.output_length = output_length self.neuron_multiplier = neuron_multiplier self.sigmoid = sigmoid self.drop = drop self.drop_pct = drop_pct self.inplay = nn.Linear(in_features=self.input_length, out_features =2 * (256 * self.neuron_multiplier)) self.layer1 = nn.Linear(in_features=2 * (256 * self. neuron_multiplier), out_features=256 * self.neuron_multiplier) self.layer2 = nn.Linear(in_features=256 * self.neuron_multiplier, out_features=128 * self.neuron_multiplier) self.layer3 = nn.Linear(in_features=128 * self.neuron_multiplier, out_features=64 * self.neuron_multiplier) self.layer4 = nn.Linear(in_features=64 * self.neuron_multiplier, out_features=128 * self.neuron_multiplier) self.layer5 = nn.Linear(in_features=128 * self.neuron_multiplier, out_features=256 * self.neuron_multiplier) self.layer6 = nn.Linear(in_features=256 * self.neuron_multiplier, out_features=self.output_length) self.out = nn.Linear(in_features=self.output_length, out_features= self.output_length) def forward(self, input_0): primals_1 = self.inplay.weight primals_2 = self.inplay.bias primals_4 = self.layer1.weight primals_5 = self.layer1.bias primals_6 = self.layer2.weight primals_7 = self.layer2.bias primals_8 = self.layer3.weight primals_9 = self.layer3.bias primals_10 = self.layer4.weight primals_11 = self.layer4.bias primals_12 = self.layer5.weight primals_13 = self.layer5.bias primals_14 = self.layer6.weight primals_15 = self.layer6.bias primals_16 = self.out.weight primals_17 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
mariajmolina/ML-for-S2S
DeepAutoencoder
false
10,567
[ "MIT" ]
0
3de32e72042ba7e8b37a433579fa9c5630246d8c
https://github.com/mariajmolina/ML-for-S2S/tree/3de32e72042ba7e8b37a433579fa9c5630246d8c
DeeperAutoencoder
import torch import torch.nn as nn import torch.nn.functional as F class DeeperAutoencoder(nn.Module): def __init__(self, input_length, output_length=None, neuron_multiplier= 1, sigmoid=False, drop=False, drop_pct=0.3): """ Dense deeper autoencoder. Args: input_length (int): Length (i.e., size) of input sample. output_length (int): Length of output sample. Defaults to None, leave as default if input and output are equal size. neuron_multiplier (int): Number to augment the set number of neurons. Defaults to 1. sigmoid (boolean): Defaults to False. Leave as default if output is nn.Linear (not sigmoid). drop (boolean): Defaults to False. True activates dropout for each layer except final. drop_pct (float): Amount of dropout to use (if drop is True). Defaults to 0.3. """ super(DeeperAutoencoder, self).__init__() self.input_length = input_length if not output_length: self.output_length = input_length if output_length: self.output_length = output_length self.neuron_multiplier = neuron_multiplier self.sigmoid = sigmoid self.drop = drop self.drop_pct = drop_pct self.layer1 = nn.Linear(in_features=self.input_length, out_features =4 * 256 * self.neuron_multiplier) self.layer2 = nn.Linear(in_features=4 * 256 * self. neuron_multiplier, out_features=256 * self.neuron_multiplier) self.layer5 = nn.Linear(in_features=256 * self.neuron_multiplier, out_features=4 * 256 * self.neuron_multiplier) self.layer6 = nn.Linear(in_features=4 * 256 * self. neuron_multiplier, out_features=self.output_length) self.layer7 = nn.Linear(in_features=self.output_length, out_features=int(self.input_length / 3) + self.input_length) self.out = nn.Linear(in_features=int(self.input_length / 3) + self. input_length, out_features=self.output_length) def forward(self, x): x = F.relu(self.layer1(x)) x = F.relu(self.layer2(x)) x = F.relu(self.layer5(x)) x = F.relu(self.layer6(x)) x = F.relu(self.layer7(x)) if self.sigmoid: x = torch.sigmoid(self.out(x)) if not self.sigmoid: x = self.out(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_length': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (1024, 4), (4, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 1024), (1024, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (1024, 256), (256, 1)) assert_size_stride(primals_7, (1024,), (1,)) assert_size_stride(primals_8, (4, 1024), (1024, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (5, 4), (4, 1)) assert_size_stride(primals_11, (5,), (1,)) assert_size_stride(primals_12, (4, 5), (5, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1024), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1024), (16384, 4096, 1024, 1), 0) del buf0 buf15 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(65536)](buf1, primals_2, buf15, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0 ), reinterpret_tensor(primals_4, (1024, 256), (1, 1024), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf14 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16384)](buf3, primals_5, buf14, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 1024), (1, 256), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1024), (16384, 4096, 1024, 1), 0) del buf4 buf13 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(65536)](buf5, primals_7, buf13, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 1024), (1024, 1), 0 ), reinterpret_tensor(primals_8, (1024, 4), (1, 1024), 0), out=buf6 ) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf7, primals_9, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((64, 5), (5, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 5), (1, 4), 0), out=buf8) buf9 = reinterpret_tensor(buf8, (4, 4, 4, 5), (80, 20, 5, 1), 0) del buf8 buf11 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(320)](buf9, primals_11, buf11, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 5), (5, 1), 0), reinterpret_tensor(primals_12, (5, 4), (1, 5), 0), alpha=1, beta=1, out=buf10) del primals_13 return (reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(buf5, (64, 1024), (1024, 1), 0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor( buf9, (64, 5), (5, 1), 0), primals_12, buf11, primals_10, buf12, primals_8, buf13, primals_6, buf14, primals_4, buf15) class DeeperAutoencoderNew(nn.Module): def __init__(self, input_length, output_length=None, neuron_multiplier= 1, sigmoid=False, drop=False, drop_pct=0.3): """ Dense deeper autoencoder. Args: input_length (int): Length (i.e., size) of input sample. output_length (int): Length of output sample. Defaults to None, leave as default if input and output are equal size. neuron_multiplier (int): Number to augment the set number of neurons. Defaults to 1. sigmoid (boolean): Defaults to False. Leave as default if output is nn.Linear (not sigmoid). drop (boolean): Defaults to False. True activates dropout for each layer except final. drop_pct (float): Amount of dropout to use (if drop is True). Defaults to 0.3. """ super(DeeperAutoencoderNew, self).__init__() self.input_length = input_length if not output_length: self.output_length = input_length if output_length: self.output_length = output_length self.neuron_multiplier = neuron_multiplier self.sigmoid = sigmoid self.drop = drop self.drop_pct = drop_pct self.layer1 = nn.Linear(in_features=self.input_length, out_features =4 * 256 * self.neuron_multiplier) self.layer2 = nn.Linear(in_features=4 * 256 * self. neuron_multiplier, out_features=256 * self.neuron_multiplier) self.layer5 = nn.Linear(in_features=256 * self.neuron_multiplier, out_features=4 * 256 * self.neuron_multiplier) self.layer6 = nn.Linear(in_features=4 * 256 * self. neuron_multiplier, out_features=self.output_length) self.layer7 = nn.Linear(in_features=self.output_length, out_features=int(self.input_length / 3) + self.input_length) self.out = nn.Linear(in_features=int(self.input_length / 3) + self. input_length, out_features=self.output_length) def forward(self, input_0): primals_1 = self.layer1.weight primals_2 = self.layer1.bias primals_4 = self.layer2.weight primals_5 = self.layer2.bias primals_6 = self.layer5.weight primals_7 = self.layer5.bias primals_8 = self.layer6.weight primals_9 = self.layer6.bias primals_10 = self.layer7.weight primals_11 = self.layer7.bias primals_12 = self.out.weight primals_13 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
mariajmolina/ML-for-S2S
DeeperAutoencoder
false
10,568
[ "MIT" ]
0
3de32e72042ba7e8b37a433579fa9c5630246d8c
https://github.com/mariajmolina/ML-for-S2S/tree/3de32e72042ba7e8b37a433579fa9c5630246d8c
ConveRTOuterFeedForward
import torch import torch.nn as nn import torch.nn.functional as fnn from torch.nn.modules.normalization import LayerNorm class ConveRTOuterFeedForward(nn.Module): """Fully-Connected 3-layer Linear Model""" def __init__(self, input_hidden: 'int', intermediate_hidden: 'int', dropout_rate: 'float'=0.0): """ :param input_hidden: first-hidden layer input embed-dim :type input_hidden: int :param intermediate_hidden: layer-(hidden)-layer middle point weight :type intermediate_hidden: int :param dropout_rate: dropout rate, defaults to None :type dropout_rate: float, optional """ super().__init__() self.linear_1 = nn.Linear(input_hidden, intermediate_hidden) self.dropout = nn.Dropout(dropout_rate) self.linear_2 = nn.Linear(intermediate_hidden, intermediate_hidden) self.dropout = nn.Dropout(dropout_rate) self.linear_3 = nn.Linear(intermediate_hidden, intermediate_hidden) self.norm = LayerNorm(intermediate_hidden) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """forward through fully-connected 3-layer :param x: fnn input :type x: torch.Tensor :return: return fnn output :rtype: torch.Tensor """ x = self.linear_1(x) x = self.linear_2(self.dropout(x)) x = self.linear_3(self.dropout(x)) return fnn.gelu(self.norm(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_hidden': 4, 'intermediate_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.modules.normalization import LayerNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_gelu_native_layer_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tmp11 = 0.7071067811865476 tmp12 = tmp8 * tmp11 tmp13 = libdevice.erf(tmp12) tmp14 = 1.0 tmp15 = tmp13 + tmp14 tmp16 = tmp10 * tmp15 tl.store(in_out_ptr0 + x2, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf5 del buf5 triton_poi_fused_gelu_native_layer_norm_1[grid(256)](buf6, buf2, buf3, buf4, primals_8, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del buf4 return buf6, primals_8, primals_9, reinterpret_tensor(primals_3, (64, 4 ), (4, 1), 0), buf0, buf1, buf2, primals_6, primals_4 class ConveRTOuterFeedForwardNew(nn.Module): """Fully-Connected 3-layer Linear Model""" def __init__(self, input_hidden: 'int', intermediate_hidden: 'int', dropout_rate: 'float'=0.0): """ :param input_hidden: first-hidden layer input embed-dim :type input_hidden: int :param intermediate_hidden: layer-(hidden)-layer middle point weight :type intermediate_hidden: int :param dropout_rate: dropout rate, defaults to None :type dropout_rate: float, optional """ super().__init__() self.linear_1 = nn.Linear(input_hidden, intermediate_hidden) self.dropout = nn.Dropout(dropout_rate) self.linear_2 = nn.Linear(intermediate_hidden, intermediate_hidden) self.dropout = nn.Dropout(dropout_rate) self.linear_3 = nn.Linear(intermediate_hidden, intermediate_hidden) self.norm = LayerNorm(intermediate_hidden) def forward(self, input_0): primals_1 = self.linear_1.weight primals_2 = self.linear_1.bias primals_4 = self.linear_2.weight primals_5 = self.linear_2.bias primals_6 = self.linear_3.weight primals_7 = self.linear_3.bias primals_8 = self.norm.weight primals_9 = self.norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
luweishuang/ConveRT-pytorch
ConveRTOuterFeedForward
false
10,569
[ "Apache-2.0" ]
0
e14aaf2287eb3a78ee7d83ea02d9bd322863227f
https://github.com/luweishuang/ConveRT-pytorch/tree/e14aaf2287eb3a78ee7d83ea02d9bd322863227f
MultiheadAttention
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn from typing import Optional class MultiheadAttention(nn.Module): """Multi-Head Attention Implemenetation from huggingface/transformer""" def __init__(self, config: 'ConveRTModelConfig'): super().__init__() self.num_attention_heads = 2 self.num_attn_proj = config.num_embed_hidden self.attention_head_size = int(self.num_attn_proj / self. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.num_embed_hidden, self.num_attn_proj) self.key = nn.Linear(config.num_embed_hidden, self.num_attn_proj) self.value = nn.Linear(config.num_embed_hidden, self.num_attn_proj) self.dropout = nn.Dropout(config.dropout_rate) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: 'torch.Tensor', attention_mask: 'Optional[torch.Tensor]'=None, head_mask: 'Optional[torch.Tensor]'= None, encoder_hidden_states: 'Optional[torch.Tensor]'=None, encoder_attention_mask: 'Optional[torch.Tensor]'=None) ->torch.Tensor: mixed_query_layer = self.query(hidden_states) if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states) mixed_value_layer = self.value(encoder_hidden_states) attention_mask = encoder_attention_mask else: mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: attention_mask = attention_mask[:, None, None, :] attention_mask = (1.0 - attention_mask) * -10000.0 attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. num_attn_proj,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(num_embed_hidden=4, dropout_rate=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 4 x2 = xindex // 8 % 2 x3 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2 * x2 + 4 * x1 + 16 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 2 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp3 = 0.8408964152537145 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.8408964152537145 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 4 x2 = xindex // 8 % 2 x3 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2 * x2 + 4 * x1 + 16 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 2 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 % 4 x3 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2 * x2 + 8 * x1 + 16 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 2, 4, 2), (16, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(64)](buf0, primals_2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 2, 2, 4), (16, 8, 4, 1), 0) del buf0 triton_poi_fused_1[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((8, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (8, 4, 2), (8, 2, 1), 0 ), reinterpret_tensor(buf4, (8, 2, 4), (8, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(128)](buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) triton_poi_fused_3[grid(128)](buf5, buf6, buf7, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 2, 4, 2), (16, 8, 2, 1), 0) del buf1 triton_poi_fused_4[grid(64)](buf2, primals_7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (8, 4, 2), (8, 2, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (8, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (8, 4, 2), (8, 2, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_clone_5[grid(64)](buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf9 return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (8, 2, 4), (8, 1, 2), 0 ), reinterpret_tensor(buf3, (8, 2, 4), (8, 1, 2), 0 ), reinterpret_tensor(buf4, (8, 4, 2), (8, 1, 4), 0) class MultiheadAttentionNew(nn.Module): """Multi-Head Attention Implemenetation from huggingface/transformer""" def __init__(self, config: 'ConveRTModelConfig'): super().__init__() self.num_attention_heads = 2 self.num_attn_proj = config.num_embed_hidden self.attention_head_size = int(self.num_attn_proj / self. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.num_embed_hidden, self.num_attn_proj) self.key = nn.Linear(config.num_embed_hidden, self.num_attn_proj) self.value = nn.Linear(config.num_embed_hidden, self.num_attn_proj) self.dropout = nn.Dropout(config.dropout_rate) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
luweishuang/ConveRT-pytorch
MultiheadAttention
false
10,570
[ "Apache-2.0" ]
0
e14aaf2287eb3a78ee7d83ea02d9bd322863227f
https://github.com/luweishuang/ConveRT-pytorch/tree/e14aaf2287eb3a78ee7d83ea02d9bd322863227f
Keypoint2DLoss
import torch import torch.nn as nn class Keypoint2DLoss(nn.Module): def __init__(self, loss_type: 'str'='l1'): """ 2D keypoint loss module. Args: loss_type (str): Choose between l1 and l2 losses. """ super(Keypoint2DLoss, self).__init__() if loss_type == 'l1': self.loss_fn = nn.L1Loss(reduction='none') elif loss_type == 'l2': self.loss_fn = nn.MSELoss(reduction='none') else: raise NotImplementedError('Unsupported loss function') def forward(self, pred_keypoints_2d: 'torch.Tensor', gt_keypoints_2d: 'torch.Tensor') ->torch.Tensor: """ Compute 2D reprojection loss on the keypoints. Args: pred_keypoints_2d (torch.Tensor): Tensor of shape [B, S, N, 2] containing projected 2D keypoints (B: batch_size, S: num_samples, N: num_keypoints) gt_keypoints_2d (torch.Tensor): Tensor of shape [B, S, N, 3] containing the ground truth 2D keypoints and confidence. Returns: torch.Tensor: 2D keypoint loss. """ conf = gt_keypoints_2d[:, :, :, -1].unsqueeze(-1).clone() conf.shape[0] loss = (conf * self.loss_fn(pred_keypoints_2d, gt_keypoints_2d[:, :, :, :-1])).sum(dim=(2, 3)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 3]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_clone_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 12 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex // 3 x0 = xindex r3 = rindex r1 = rindex % 3 tmp0 = tl.load(in_ptr0 + (3 + 4 * r2 + 16 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (r3 + 12 * x0), rmask & xmask, other=0.0) tmp2 = tl.load(in_ptr0 + (r1 + 4 * r2 + 16 * x0), rmask & xmask, other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.abs(tmp3) tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 3), (48, 12, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_abs_clone_mul_sub_sum_0[grid(16)](arg0_1, arg1_1, buf0, 16, 12, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class Keypoint2DLossNew(nn.Module): def __init__(self, loss_type: 'str'='l1'): """ 2D keypoint loss module. Args: loss_type (str): Choose between l1 and l2 losses. """ super(Keypoint2DLossNew, self).__init__() if loss_type == 'l1': self.loss_fn = nn.L1Loss(reduction='none') elif loss_type == 'l2': self.loss_fn = nn.MSELoss(reduction='none') else: raise NotImplementedError('Unsupported loss function') def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
michael-p-sachen/ProHMR
Keypoint2DLoss
false
10,571
[ "BSD-3-Clause" ]
0
0167d05a9a45939a217d02b4ef8fd67977c15f82
https://github.com/michael-p-sachen/ProHMR/tree/0167d05a9a45939a217d02b4ef8fd67977c15f82
TripletMarginCosineLoss
from torch.nn import Module import torch from torch.nn.functional import cosine_similarity def triplet_margin_cosine_loss(anchor, positive, negative, margin=1.0, eps= 1e-08, sum_loss=False): 'Creates a criterion that measures the triplet cosine loss given input\n tensors x1, x2, x3 and a margin with a value greater than 0.\n This is used for measuring a relative similarity between samples. A triplet\n is composed by `a`, `p` and `n`: anchor, positive example and negative\n example(s) respectively. The shape of the anchor and positive variables should \n be\n math:`(N, D)`.\n The shape of the negative variable should be\n math:`(N, D)`, for 1 negative sample, or\n math:`(N, m, D)`, for m negative samples.\n\n\n .. math::\n L(a, p, n) = \x0crac{1}{N} \\left( \\sum_{i=1}^N \\max \\{0, margin - cos(a_i, p_i) + cos(a_i, n_i)\\} \right)\n\n Args:\n anchor: anchor input tensor\n positive: positive input tensor\n negative: negative input tensor\n margin: the margin value. Default: 1\n eps: small epsilon value to avoid numerical issues. Default: 1e-6\n sum_loss: if True the hinge loss will be summed across batch instances\n\n Shape:\n - Input: :math:`(N, D)` where `D = vector dimension`\n - Output: :math:`(N, 1)`\n\n Example::\n\n >>> input1 = autograd.Variable(torch.randn(100, 128))\n >>> input2 = autograd.Variable(torch.randn(100, 128))\n >>> input3 = autograd.Variable(torch.randn(100, 10, 128))\n >>> output = triplet_margin_cosine_loss(input1, input2, input3)\n >>> output.backward()\n ' assert anchor.size() == positive.size( ), 'Input sizes between anchor and positive must be equal.' assert anchor.dim() == 2, 'Anchor and positive must be 2D matrices.' assert negative.dim( ) <= 3, 'Negative must be 2D (1 negative sample) or 3D matrix (multiple negatives).' assert margin > 0.0, 'Margin should be positive value.' if negative.dim() == 2: assert anchor.size() == negative.size( ), 'Input sizes between anchor and negative must be equal (if 1 negative sample).' d_p = cosine_similarity(anchor, positive, eps=eps) d_n = cosine_similarity(anchor, negative, eps=eps) dist_hinge = torch.clamp(margin - d_p + d_n, min=0.0) else: assert anchor.size()[0] == negative.size()[0] and anchor.size()[1 ] == negative.size()[2 ], 'If using multiple negatives samples, their size: [B, #neg, emb_size].' d_p = cosine_similarity(anchor, positive, eps=eps) d_n = cosine_similarity(anchor.unsqueeze(1), negative, dim=2, eps=eps) dist_hinge = torch.clamp(margin - d_p.unsqueeze(1) + d_n, min=0.0).sum( dim=1) if not sum_loss: loss = torch.mean(dist_hinge) else: loss = torch.sum(dist_hinge) return loss class TripletMarginCosineLoss(Module): 'Creates a criterion that measures the triplet cosine loss given an input\n tensors x1, x2, x3 and a margin with a value greater than 0.\n This is used for measuring a relative similarity between samples. A triplet\n is composed by `a`, `p` and `n`: anchor, positive examples and negative\n example(s) respectively. The shape of the anchor and positive variables should \n be\n math:`(N, D)`.\n The shape of the negative variable should be\n math:`(N, D)`, for 1 negative sample, or\n math:`(N, m, D)`, for m negative samples.\n\n\n .. math::\n L(a, p, n) = \x0crac{1}{N} \\left( \\sum_{i=1}^N \\max \\{0, margin - cos(a_i, p_i) + cos(a_i, n_i)\\} \right)\n\n Args:\n anchor: anchor input tensor\n positive: positive input tensor\n negative: negative input tensor\n\n Shape:\n - Input: :math:`(N, D)` where `D = vector dimension`\n - Output: :math:`(N, 1)`\n\n >>> triplet_loss = nn.TripletMarginCosineLoss(margin=1.0, p=2)\n >>> input1 = autograd.Variable(torch.randn(100, 128))\n >>> input2 = autograd.Variable(torch.randn(100, 128))\n >>> input3 = autograd.Variable(torch.randn(100, 10, 128))\n >>> output = triplet_loss(input1, input2, input3)\n >>> output.backward()\n ' def __init__(self, margin=1.0, eps=1e-08, sum_loss=False): super(TripletMarginCosineLoss, self).__init__() self.margin = margin self.eps = eps self.sum_loss = sum_loss def forward(self, anchor, positive, negative): return triplet_margin_cosine_loss(anchor, positive, negative, self. margin, self.eps, self.sum_loss) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch.nn.functional import cosine_similarity assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + x2, xmask) tmp17 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr2 + x2, xmask) tmp33 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp38 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp41 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tmp34 = tmp33 * tmp33 tmp36 = tmp35 * tmp35 tmp37 = tmp34 + tmp36 tmp39 = tmp38 * tmp38 tmp40 = tmp37 + tmp39 tmp42 = tmp41 * tmp41 tmp43 = tmp40 + tmp42 tmp44 = libdevice.sqrt(tmp43) tmp45 = triton_helpers.maximum(tmp44, tmp13) tmp46 = tmp32 / tmp45 tmp47 = tmp15 * tmp46 tl.store(out_ptr0 + x2, tmp31, xmask) tl.store(out_ptr1 + x2, tmp47, xmask) @triton.jit def triton_per_fused_add_clamp_mean_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp8 + tmp15 tmp17 = 0.0 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tmp22 = 4.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(16)]( arg0_1, arg1_1, arg2_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_add_clamp_mean_rsub_sum_1[grid(1)](buf3, buf0, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf3, def triplet_margin_cosine_loss(anchor, positive, negative, margin=1.0, eps= 1e-08, sum_loss=False): 'Creates a criterion that measures the triplet cosine loss given input\n tensors x1, x2, x3 and a margin with a value greater than 0.\n This is used for measuring a relative similarity between samples. A triplet\n is composed by `a`, `p` and `n`: anchor, positive example and negative\n example(s) respectively. The shape of the anchor and positive variables should \n be\n math:`(N, D)`.\n The shape of the negative variable should be\n math:`(N, D)`, for 1 negative sample, or\n math:`(N, m, D)`, for m negative samples.\n\n\n .. math::\n L(a, p, n) = \x0crac{1}{N} \\left( \\sum_{i=1}^N \\max \\{0, margin - cos(a_i, p_i) + cos(a_i, n_i)\\} \right)\n\n Args:\n anchor: anchor input tensor\n positive: positive input tensor\n negative: negative input tensor\n margin: the margin value. Default: 1\n eps: small epsilon value to avoid numerical issues. Default: 1e-6\n sum_loss: if True the hinge loss will be summed across batch instances\n\n Shape:\n - Input: :math:`(N, D)` where `D = vector dimension`\n - Output: :math:`(N, 1)`\n\n Example::\n\n >>> input1 = autograd.Variable(torch.randn(100, 128))\n >>> input2 = autograd.Variable(torch.randn(100, 128))\n >>> input3 = autograd.Variable(torch.randn(100, 10, 128))\n >>> output = triplet_margin_cosine_loss(input1, input2, input3)\n >>> output.backward()\n ' assert anchor.size() == positive.size( ), 'Input sizes between anchor and positive must be equal.' assert anchor.dim() == 2, 'Anchor and positive must be 2D matrices.' assert negative.dim( ) <= 3, 'Negative must be 2D (1 negative sample) or 3D matrix (multiple negatives).' assert margin > 0.0, 'Margin should be positive value.' if negative.dim() == 2: assert anchor.size() == negative.size( ), 'Input sizes between anchor and negative must be equal (if 1 negative sample).' d_p = cosine_similarity(anchor, positive, eps=eps) d_n = cosine_similarity(anchor, negative, eps=eps) dist_hinge = torch.clamp(margin - d_p + d_n, min=0.0) else: assert anchor.size()[0] == negative.size()[0] and anchor.size()[1 ] == negative.size()[2 ], 'If using multiple negatives samples, their size: [B, #neg, emb_size].' d_p = cosine_similarity(anchor, positive, eps=eps) d_n = cosine_similarity(anchor.unsqueeze(1), negative, dim=2, eps=eps) dist_hinge = torch.clamp(margin - d_p.unsqueeze(1) + d_n, min=0.0).sum( dim=1) if not sum_loss: loss = torch.mean(dist_hinge) else: loss = torch.sum(dist_hinge) return loss class TripletMarginCosineLossNew(Module): 'Creates a criterion that measures the triplet cosine loss given an input\n tensors x1, x2, x3 and a margin with a value greater than 0.\n This is used for measuring a relative similarity between samples. A triplet\n is composed by `a`, `p` and `n`: anchor, positive examples and negative\n example(s) respectively. The shape of the anchor and positive variables should \n be\n math:`(N, D)`.\n The shape of the negative variable should be\n math:`(N, D)`, for 1 negative sample, or\n math:`(N, m, D)`, for m negative samples.\n\n\n .. math::\n L(a, p, n) = \x0crac{1}{N} \\left( \\sum_{i=1}^N \\max \\{0, margin - cos(a_i, p_i) + cos(a_i, n_i)\\} \right)\n\n Args:\n anchor: anchor input tensor\n positive: positive input tensor\n negative: negative input tensor\n\n Shape:\n - Input: :math:`(N, D)` where `D = vector dimension`\n - Output: :math:`(N, 1)`\n\n >>> triplet_loss = nn.TripletMarginCosineLoss(margin=1.0, p=2)\n >>> input1 = autograd.Variable(torch.randn(100, 128))\n >>> input2 = autograd.Variable(torch.randn(100, 128))\n >>> input3 = autograd.Variable(torch.randn(100, 10, 128))\n >>> output = triplet_loss(input1, input2, input3)\n >>> output.backward()\n ' def __init__(self, margin=1.0, eps=1e-08, sum_loss=False): super(TripletMarginCosineLossNew, self).__init__() self.margin = margin self.eps = eps self.sum_loss = sum_loss def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
monkeyhjy/aspect_summarization
TripletMarginCosineLoss
false
10,572
[ "MIT" ]
0
3018815cd0aeccb752e9f51a4d49453c4f441650
https://github.com/monkeyhjy/aspect_summarization/tree/3018815cd0aeccb752e9f51a4d49453c4f441650
ParameterLoss
import torch import torch.nn as nn class ParameterLoss(nn.Module): def __init__(self): """ SMPL parameter loss module. """ super(ParameterLoss, self).__init__() self.loss_fn = nn.MSELoss(reduction='none') def forward(self, pred_param: 'torch.Tensor', gt_param: 'torch.Tensor', has_param: 'torch.Tensor'): """ Compute SMPL parameter loss. Args: pred_param (torch.Tensor): Tensor of shape [B, S, ...] containing the predicted parameters (body pose / global orientation / betas) gt_param (torch.Tensor): Tensor of shape [B, S, ...] containing the ground truth SMPL parameters. Returns: torch.Tensor: L2 parameter loss loss. """ batch_size = pred_param.shape[0] num_samples = pred_param.shape[1] num_dims = len(pred_param.shape) mask_dimension = [batch_size, num_samples] + [1] * (num_dims - 2) has_param = has_param.type(pred_param.type()).view(*mask_dimension) loss_param = has_param * self.loss_fn(pred_param, gt_param) return loss_param def get_inputs(): return [torch.rand([4, 4, 1, 1]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 1, 1])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mse_loss_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x2, xmask) tmp3 = tmp1 - tmp2 tmp4 = tmp3 * tmp3 tmp5 = tmp0 * tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(arg1_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mse_loss_mul_0[grid(256)](arg1_1, arg0_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class ParameterLossNew(nn.Module): def __init__(self): """ SMPL parameter loss module. """ super(ParameterLossNew, self).__init__() self.loss_fn = nn.MSELoss(reduction='none') def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg2_1 = input_1 arg1_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
michael-p-sachen/ProHMR
ParameterLoss
false
10,573
[ "BSD-3-Clause" ]
0
0167d05a9a45939a217d02b4ef8fd67977c15f82
https://github.com/michael-p-sachen/ProHMR/tree/0167d05a9a45939a217d02b4ef8fd67977c15f82
double_decoder_conv
import torch import torch.nn as nn class double_decoder_conv(nn.Module): def __init__(self, input_channels1, output_channels1, output_channels2): super(double_decoder_conv, self).__init__() self.conv1 = nn.Conv2d(input_channels1, output_channels1, kernel_size=3, padding='same') self.conv2 = nn.Conv2d(output_channels1, output_channels2, kernel_size=3, padding='same') self.relu_activation = nn.ReLU(inplace=True) def forward(self, x): x = self.conv1(self.relu_activation(x)) x = self.conv2(self.relu_activation(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels1': 4, 'output_channels1': 4, 'output_channels2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(256)](buf4, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf4, primals_2, primals_4, buf0, buf2 class double_decoder_convNew(nn.Module): def __init__(self, input_channels1, output_channels1, output_channels2): super(double_decoder_convNew, self).__init__() self.conv1 = nn.Conv2d(input_channels1, output_channels1, kernel_size=3, padding='same') self.conv2 = nn.Conv2d(output_channels1, output_channels2, kernel_size=3, padding='same') self.relu_activation = nn.ReLU(inplace=True) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mhakyash/UNet-MNIST-denoising
double_decoder_conv
false
10,574
[ "MIT" ]
0
0e3c20cbb3f34af575e33209425ae4d7cb0bcd82
https://github.com/mhakyash/UNet-MNIST-denoising/tree/0e3c20cbb3f34af575e33209425ae4d7cb0bcd82
Keypoint3DLoss
import torch import torch.nn as nn class Keypoint3DLoss(nn.Module): def __init__(self, loss_type: 'str'='l1'): """ 3D keypoint loss module. Args: loss_type (str): Choose between l1 and l2 losses. """ super(Keypoint3DLoss, self).__init__() if loss_type == 'l1': self.loss_fn = nn.L1Loss(reduction='none') elif loss_type == 'l2': self.loss_fn = nn.MSELoss(reduction='none') else: raise NotImplementedError('Unsupported loss function') def forward(self, pred_keypoints_3d: 'torch.Tensor', gt_keypoints_3d: 'torch.Tensor', pelvis_id: 'int'=39): """ Compute 3D keypoint loss. Args: pred_keypoints_3d (torch.Tensor): Tensor of shape [B, S, N, 3] containing the predicted 3D keypoints (B: batch_size, S: num_samples, N: num_keypoints) gt_keypoints_3d (torch.Tensor): Tensor of shape [B, S, N, 4] containing the ground truth 3D keypoints and confidence. Returns: torch.Tensor: 3D keypoint loss. """ pred_keypoints_3d.shape[0] gt_keypoints_3d = gt_keypoints_3d.clone() pred_keypoints_3d = pred_keypoints_3d - pred_keypoints_3d[:, :, pelvis_id, :].unsqueeze(dim=2) gt_keypoints_3d[:, :, :, :-1] = gt_keypoints_3d[:, :, :, :-1 ] - gt_keypoints_3d[:, :, pelvis_id, :-1].unsqueeze(dim=2) conf = gt_keypoints_3d[:, :, :, -1].unsqueeze(-1).clone() gt_keypoints_3d = gt_keypoints_3d[:, :, :, :-1] loss = (conf * self.loss_fn(pred_keypoints_3d, gt_keypoints_3d)).sum( dim=(2, 3)) return loss def get_inputs(): return [torch.rand([4, 4, 40, 3]), torch.rand([4, 4, 40, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_clone_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 120 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex // 3 x0 = xindex r3 = rindex r1 = rindex % 3 tmp7 = tl.load(in_ptr0 + (3 + 4 * r2 + 160 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (r3 + 120 * x0), rmask & xmask, other=0.0) tmp10 = tl.load(in_ptr1 + (117 + r1 + 120 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.load(in_ptr0 + (r1 + 4 * r2 + 160 * x0), rmask & xmask, other=0.0) tmp0 = tl.full([1, 1], 3, tl.int64) tmp1 = tmp0 < tmp0 tmp2 = tl.load(in_ptr0 + (3 + 4 * r2 + 160 * x0), rmask & tmp1 & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + tl.broadcast_to(159 + 160 * x0, [XBLOCK, RBLOCK]), rmask & tmp1 & xmask, eviction_policy='evict_last', other=0.0 ) tmp4 = tmp2 - tmp3 tmp5 = tl.full(tmp4.shape, 0.0, tmp4.dtype) tmp6 = tl.where(tmp1, tmp4, tmp5) tmp8 = tl.where(tmp1, tmp6, tmp7) tmp11 = tmp9 - tmp10 tmp12 = r1 tmp13 = tmp12 < tmp0 tmp14 = tl.load(in_ptr0 + (r1 + 4 * r2 + 160 * x0), rmask & tmp13 & xmask, other=0.0) tmp15 = tl.load(in_ptr0 + (156 + r1 + 160 * x0), rmask & tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp20 = tl.where(tmp13, tmp18, tmp19) tmp21 = tmp11 - tmp20 tmp22 = tl_math.abs(tmp21) tmp23 = tmp8 * tmp22 tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.where(rmask & xmask, tmp24, 0) tmp27 = tl.sum(tmp26, 1)[:, None] tl.store(out_ptr0 + x0, tmp27, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 40, 3), (480, 120, 3, 1)) assert_size_stride(arg1_1, (4, 4, 40, 4), (640, 160, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_abs_clone_mul_sub_sum_0[grid(16)](arg1_1, arg0_1, buf0, 16, 120, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class Keypoint3DLossNew(nn.Module): def __init__(self, loss_type: 'str'='l1'): """ 3D keypoint loss module. Args: loss_type (str): Choose between l1 and l2 losses. """ super(Keypoint3DLossNew, self).__init__() if loss_type == 'l1': self.loss_fn = nn.L1Loss(reduction='none') elif loss_type == 'l2': self.loss_fn = nn.MSELoss(reduction='none') else: raise NotImplementedError('Unsupported loss function') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
michael-p-sachen/ProHMR
Keypoint3DLoss
false
10,575
[ "BSD-3-Clause" ]
0
0167d05a9a45939a217d02b4ef8fd67977c15f82
https://github.com/michael-p-sachen/ProHMR/tree/0167d05a9a45939a217d02b4ef8fd67977c15f82
ContrastiveLoss
import torch import torch.nn as nn import torch.nn.init import torch.utils.data import torch.utils.data.distributed def cosine_sim(im, s): return im.mm(s.t()) class ContrastiveLoss(nn.Module): def __init__(self, margin=0): super(ContrastiveLoss, self).__init__() self.margin = margin self.sim = cosine_sim def forward(self, hs): hs.size(1) tgt = hs[:, -1, :] src = hs[:, 0:-1, :] tgt = tgt.unsqueeze(1) scores = (tgt * src).sum(dim=2) d = scores[:, -1] d = d.unsqueeze(1) scores = scores[:, 0:-1] d = d.expand_as(scores) cost = (self.margin + scores - d).clamp(min=0) return cost.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.init import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 12 x1 = xindex // 4 % 3 x3 = xindex tmp0 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), xmask) tmp3 = tl.load(in_ptr0 + (52 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (4 + x0 + 16 * x1 + 64 * x2), xmask) tmp7 = tl.load(in_ptr0 + (56 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x1 + 64 * x2), xmask) tmp11 = tl.load(in_ptr0 + (60 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (12 + x0 + 16 * x1 + 64 * x2), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_per_fused_add_clamp_mean_sub_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex // 8 r3 = rindex % 8 r0 = rindex % 4 tmp0 = tl.load(in_ptr0 + (r3 + 12 * r2), None) tmp3 = tl.load(in_ptr0 + (8 + r0 + 12 * r2), None, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = 32.0 tmp10 = tmp8 / tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4), (12, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(48)](arg0_1, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_clamp_mean_sub_1[grid(1)](buf2, buf0, 1, 32, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf2, def cosine_sim(im, s): return im.mm(s.t()) class ContrastiveLossNew(nn.Module): def __init__(self, margin=0): super(ContrastiveLossNew, self).__init__() self.margin = margin self.sim = cosine_sim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
nfyfamr/ActionEstimation
ContrastiveLoss
false
10,576
[ "MIT" ]
0
8f18dba49d9558b28a277ea82c70fb4e3425bbbb
https://github.com/nfyfamr/ActionEstimation/tree/8f18dba49d9558b28a277ea82c70fb4e3425bbbb
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): """policy-value network module""" def __init__(self, board_width, board_height): super(Net, self).__init__() self.board_width = board_width self.board_height = board_height self.conv1 = nn.Conv2d(4, 32, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.act_conv1 = nn.Conv2d(128, 4, kernel_size=1) self.act_fc1 = nn.Linear(4 * board_width * board_height, board_width * board_height) self.val_conv1 = nn.Conv2d(128, 2, kernel_size=1) self.val_fc1 = nn.Linear(2 * board_width * board_height, 64) self.val_fc2 = nn.Linear(64, 1) def forward(self, state_input): x = F.relu(self.conv1(state_input)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x_act = F.relu(self.act_conv1(x)) x_act = x_act.view(-1, 4 * self.board_width * self.board_height) x_act = F.log_softmax(self.act_fc1(x_act), dim=1) x_val = F.relu(self.val_conv1(x)) x_val = x_val.view(-1, 2 * self.board_width * self.board_height) x_val = F.relu(self.val_fc1(x_val)) x_val = self.val_fc2(x_val) x_val = torch.nn.Tanh()(x_val) return x_act, x_val def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'board_width': 4, 'board_height': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp6, xmask & ymask) @triton.jit def triton_per_fused__log_softmax_8(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 16 * x0), tmp12, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 8 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = yindex // 2 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 2 * x2 + 32 * y1), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_tanh_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (4, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (16, 64), (64, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_13, (2,), (1,)) assert_size_stride(primals_14, (64, 32), (32, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (1, 64), (64, 1)) assert_size_stride(primals_17, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 4, 3, 3), (36, 1, 12, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(128, 9)](primals_1, buf0, 128, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 4, 4), (512, 1, 128, 32)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_4[grid(2048)](buf5, primals_2, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 4, 4), (1024, 1, 256, 64)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_5[grid(4096)](buf7, primals_5, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 4, 4), (2048, 1, 512, 128)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_6[grid(8192)](buf9, primals_7, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4)) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_7[grid(16, 16)]( buf10, primals_9, buf11, buf23, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (4, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 16), (1, 64), 0), alpha=1, beta=1, out=buf12) del primals_11 buf15 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_per_fused__log_softmax_8[grid(4)](buf12, buf15, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf12 buf16 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 2, 4, 4), (32, 1, 8, 2)) buf17 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) buf22 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_9[grid(8, 16)]( buf16, primals_13, buf17, buf22, 8, 16, XBLOCK=16, YBLOCK=8, num_warps=4, num_stages=1) del buf16 del primals_13 buf18 = reinterpret_tensor(buf10, (4, 64), (64, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf17, (4, 32), (32, 1), 0), reinterpret_tensor(primals_14, (32, 64), (1, 32), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_relu_10[grid(256)](buf19, primals_15, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_15 buf20 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf19, reinterpret_tensor(primals_16, (64, 1), (1, 64), 0), out=buf20) buf21 = buf20 del buf20 triton_poi_fused_tanh_11[grid(4)](buf21, primals_17, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_17 return (buf15, buf21, buf0, buf1, buf2, buf3, primals_8, primals_12, buf5, buf7, buf9, reinterpret_tensor(buf11, (4, 64), (64, 1), 0), buf15, reinterpret_tensor(buf17, (4, 32), (32, 1), 0), buf19, buf21, primals_16, primals_14, buf22, primals_10, buf23) class NetNew(nn.Module): """policy-value network module""" def __init__(self, board_width, board_height): super(NetNew, self).__init__() self.board_width = board_width self.board_height = board_height self.conv1 = nn.Conv2d(4, 32, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.act_conv1 = nn.Conv2d(128, 4, kernel_size=1) self.act_fc1 = nn.Linear(4 * board_width * board_height, board_width * board_height) self.val_conv1 = nn.Conv2d(128, 2, kernel_size=1) self.val_fc1 = nn.Linear(2 * board_width * board_height, 64) self.val_fc2 = nn.Linear(64, 1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.act_conv1.weight primals_9 = self.act_conv1.bias primals_10 = self.act_fc1.weight primals_11 = self.act_fc1.bias primals_12 = self.val_conv1.weight primals_13 = self.val_conv1.bias primals_14 = self.val_fc1.weight primals_15 = self.val_fc1.bias primals_16 = self.val_fc2.weight primals_17 = self.val_fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1]
moddent/Gomoku_Deep
Net
false
10,577
[ "MIT" ]
0
5d9bca97e6b30db4f99a4686152bcef7a6160ac6
https://github.com/moddent/Gomoku_Deep/tree/5d9bca97e6b30db4f99a4686152bcef7a6160ac6
ExpLinear
import torch from torch import nn import torch.nn from scipy.linalg import logm class InverseNotAvailable(Exception): """Exception to be thrown when a transform does not have an inverse.""" pass class Transform(nn.Module): """Base class for all transform objects.""" def forward(self, inputs, context=None): raise NotImplementedError() def inverse(self, inputs, context=None): raise InverseNotAvailable() class ExpLinear(Transform): def __init__(self, d): super().__init__() self.d = d dummy = nn.Linear(d, d) self.A = nn.Parameter(torch.tensor(logm(dummy.weight.detach().numpy ()), dtype=torch.float32)) self.b = nn.Parameter(dummy.bias.detach()) def forward(self, x, context=None): W = torch.matrix_exp(self.A) z = torch.matmul(x, W) + self.b logabsdet = torch.trace(self.A) * x.new_ones(z.shape[0]) return z, logabsdet def inverse(self, z, context=None): W_inv = torch.matrix_exp(-self.A) x = torch.matmul(z - self.b, W_inv) logabsdet = -torch.trace(self.A) * z.new_ones(x.shape[0]) return x, logabsdet def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn from scipy.linalg import logm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_per_fused_mul_new_ones_trace_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 1.0 tmp5 = tmp3 * tmp4 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.linalg_matrix_exp.default(primals_1) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf1, out=buf2) del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf3, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_new_ones_trace_1[grid(1)](primals_1, buf5, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) return buf3, buf5, primals_1, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class InverseNotAvailable(Exception): """Exception to be thrown when a transform does not have an inverse.""" pass class Transform(nn.Module): """Base class for all transform objects.""" def forward(self, inputs, context=None): raise NotImplementedError() def inverse(self, inputs, context=None): raise InverseNotAvailable() class ExpLinearNew(Transform): def __init__(self, d): super().__init__() self.d = d dummy = nn.Linear(d, d) self.A = nn.Parameter(torch.tensor(logm(dummy.weight.detach().numpy ()), dtype=torch.float32)) self.b = nn.Parameter(dummy.bias.detach()) def inverse(self, z, context=None): W_inv = torch.matrix_exp(-self.A) x = torch.matmul(z - self.b, W_inv) logabsdet = -torch.trace(self.A) * z.new_ones(x.shape[0]) return x, logabsdet def forward(self, input_0): primals_1 = self.A primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
mshakerinava/nflows
ExpLinear
false
10,578
[ "MIT" ]
0
d86cb1478ff36ffd3e005e980d92a3b0bbffbf02
https://github.com/mshakerinava/nflows/tree/d86cb1478ff36ffd3e005e980d92a3b0bbffbf02
SingleConv3DBlock
import torch from torch import nn import torch._utils class SingleConv3DBlock(nn.Module): def __init__(self, in_planes, out_planes, kernel_size): super().__init__() self.block = nn.Conv3d(in_planes, out_planes, kernel_size= kernel_size, stride=1, padding=(kernel_size - 1) // 2) def forward(self, x): return self.block(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'out_planes': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 108 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 27 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 3, 3, 3), (108, 27, 9, 3, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(108)](buf1, primals_2, 108, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 3, 3, 3), (27, 9, 3, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) class SingleConv3DBlockNew(nn.Module): def __init__(self, in_planes, out_planes, kernel_size): super().__init__() self.block = nn.Conv3d(in_planes, out_planes, kernel_size= kernel_size, stride=1, padding=(kernel_size - 1) // 2) def forward(self, input_0): primals_1 = self.block.weight primals_2 = self.block.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ilcessadecalcular/segmentation
SingleConv3DBlock
false
10,579
[ "MIT" ]
0
24ba499a399efdba212ec5e2235b72ed8270cc24
https://github.com/ilcessadecalcular/segmentation/tree/24ba499a399efdba212ec5e2235b72ed8270cc24
MeanMaxPooling
import torch from torch import nn class MeanMaxPooling(nn.Module): def __init__(self): super(MeanMaxPooling, self).__init__() def forward(self, doc_state, entity_mapping, entity_lens): """ :param doc_state: N x L x d :param entity_mapping: N x E x L :param entity_lens: N x E :return: N x E x 2d """ entity_states = entity_mapping.unsqueeze(3) * doc_state.unsqueeze(1) max_pooled = torch.max(entity_states, dim=2)[0] mean_pooled = torch.sum(entity_states, dim=2) / entity_lens.unsqueeze(2 ) output = torch.cat([max_pooled, mean_pooled], dim=2) return output def get_inputs(): return [torch.rand([4, 4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch. rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_max_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 % 4 x4 = xindex // 256 x5 = xindex % 64 x6 = xindex % 16 x7 = xindex // 64 tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 64 * x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 256 * x4), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 4 * x2 + 64 * x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (64 + x5 + 256 * x4), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + x0 + 4 * x2 + 64 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (128 + x5 + 256 * x4), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 4 * x2 + 64 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (192 + x5 + 256 * x4), xmask, eviction_policy ='evict_last') tmp15 = tl.load(in_ptr2 + (x6 + 16 * x7), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp16 = tmp14 / tmp15 tmp17 = triton_helpers.maximum(tmp2, tmp5) tmp18 = triton_helpers.maximum(tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp18, tmp13) tl.store(out_ptr0 + (x5 + 128 * x7), tmp16, xmask) tl.store(out_ptr1 + (x5 + 128 * x7), tmp19, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 8, 4, 4), (512, 128, 16, 4, 1), torch.float32) buf0 = reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (512, 128, 16, 4, 1), 64) buf1 = reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (512, 128, 16, 4, 1), 0) get_raw_stream(0) triton_poi_fused_div_max_mul_sum_0[grid(1024)](arg0_1, arg1_1, arg2_1, buf0, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class MeanMaxPoolingNew(nn.Module): def __init__(self): super(MeanMaxPoolingNew, self).__init__() def forward(self, input_0, input_1, input_2): arg1_1 = input_0 arg0_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
mottled233/DFGN-pytorch
MeanMaxPooling
false
10,580
[ "MIT" ]
0
7d9f6a75404cfa429f1e2b57ec5055df382ed0a4
https://github.com/mottled233/DFGN-pytorch/tree/7d9f6a75404cfa429f1e2b57ec5055df382ed0a4
SphereLoss
import torch import torch.nn as nn from torchvision.transforms import * class SphereLoss(nn.Module): def __init__(self, in_feats, n_classes, scale=14, *args, **kwargs): super(SphereLoss, self).__init__(*args, **kwargs) self.scale = scale self.cross_entropy = nn.CrossEntropyLoss() self.W = torch.nn.Parameter(torch.randn(in_feats, n_classes), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, x, label): x_norm = torch.norm(x, 2, 1, True).clamp(min=1e-12).expand_as(x) x_norm = x / x_norm w_norm = torch.norm(self.W, 2, 0, True).clamp(min=1e-12).expand_as(self .W) w_norm = self.W / w_norm cos_th = torch.mm(x_norm, w_norm) s_cos_th = self.scale * cos_th loss = self.cross_entropy(s_cos_th, label) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_feats': 4, 'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 14.0 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + r2, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = -tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, buf1, out=buf2) buf3 = buf1 del buf1 triton_poi_fused_2[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused__log_softmax_div_mul_neg_sum_3[grid(1)](buf5, buf3, primals_3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf3 return buf5, primals_2, primals_3, buf2, reinterpret_tensor(buf0, (4, 4 ), (1, 4), 0) class SphereLossNew(nn.Module): def __init__(self, in_feats, n_classes, scale=14, *args, **kwargs): super(SphereLossNew, self).__init__(*args, **kwargs) self.scale = scale self.cross_entropy = nn.CrossEntropyLoss() self.W = torch.nn.Parameter(torch.randn(in_feats, n_classes), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, input_0, input_1): primals_1 = self.W primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
modricwang/SphereReID
SphereLoss
false
10,581
[ "MIT" ]
0
d0c39d2ce52cbc35e4d3adc1e90c0e54585aa492
https://github.com/modricwang/SphereReID/tree/d0c39d2ce52cbc35e4d3adc1e90c0e54585aa492
CNNAutoencoder
import torch import torch.nn as nn import torch.nn.functional as F class CNNAutoencoder(nn.Module): def __init__(self, depth_0=1, depth_1=64, depth_2=32, depth_3=16, lastdepth=1): super(CNNAutoencoder, self).__init__() self.depth_0 = depth_0 self.depth_1 = depth_1 self.depth_2 = depth_2 self.depth_3 = depth_3 self.enc1 = nn.Conv2d(self.depth_0, self.depth_1, kernel_size=3, stride=1, padding=1) self.enc2 = nn.Conv2d(self.depth_1, self.depth_2, kernel_size=3, stride=1, padding=1) self.enc3 = nn.Conv2d(self.depth_2, self.depth_3, kernel_size=3, stride=1, padding=1) self.ltnt = nn.Conv2d(self.depth_3, self.depth_3, kernel_size=3, stride=1, padding=1) self.dec3 = nn.ConvTranspose2d(self.depth_3, self.depth_2, kernel_size=1, stride=1) self.dec2 = nn.ConvTranspose2d(self.depth_2, self.depth_1, kernel_size=1, stride=1) self.dec1 = nn.ConvTranspose2d(self.depth_1, self.depth_0, kernel_size=1, stride=1) self.out = nn.Conv2d(self.depth_0, 1, 1) self.pool = nn.MaxPool2d(2, 2, return_indices=True, ceil_mode=False) self.unpool = nn.MaxUnpool2d(2, 2) def forward(self, x): x = F.relu(self.enc1(x)) x, indx1 = self.pool(x) x = F.relu(self.enc2(x)) x, indx2 = self.pool(x) x = F.relu(self.enc3(x)) x, indx3 = self.pool(x) x = F.relu(self.ltnt(x)) x = self.unpool(x, indx3, output_size=indx2.size()) x = F.relu(self.dec3(x)) x = self.unpool(x, indx2, output_size=indx1.size()) x = F.relu(self.dec2(x)) x = self.unpool(x, indx1) x = F.relu(self.dec1(x)) x = self.out(x) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x4 = xindex x2 = xindex // 32 % 32 tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tmp17 = tl.full([1], 2, tl.int32) tmp18 = tl.where((tmp16 < 0) != (tmp17 < 0), tl.where(tmp16 % tmp17 != 0, tmp16 // tmp17 - 1, tmp16 // tmp17), tmp16 // tmp17) tmp19 = tmp18 * tmp17 tmp20 = tmp16 - tmp19 tmp21 = 2 * x2 tmp22 = tmp21 + tmp18 tmp23 = 2 * x0 tmp24 = tmp23 + tmp20 tmp25 = tl.full([1], 64, tl.int64) tmp26 = tmp22 * tmp25 tmp27 = tmp26 + tmp24 tl.store(out_ptr0 + x4, tmp6, None) tl.store(out_ptr1 + x4, tmp27, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x4 = xindex x2 = xindex // 16 % 16 tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tmp17 = tl.full([1], 2, tl.int32) tmp18 = tl.where((tmp16 < 0) != (tmp17 < 0), tl.where(tmp16 % tmp17 != 0, tmp16 // tmp17 - 1, tmp16 // tmp17), tmp16 // tmp17) tmp19 = tmp18 * tmp17 tmp20 = tmp16 - tmp19 tmp21 = 2 * x2 tmp22 = tmp21 + tmp18 tmp23 = 2 * x0 tmp24 = tmp23 + tmp20 tmp25 = tl.full([1], 32, tl.int64) tmp26 = tmp22 * tmp25 tmp27 = tmp26 + tmp24 tl.store(out_ptr0 + x4, tmp6, None) tl.store(out_ptr1 + x4, tmp27, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x4 = xindex x2 = xindex // 8 % 8 tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tmp17 = tl.full([1], 2, tl.int32) tmp18 = tl.where((tmp16 < 0) != (tmp17 < 0), tl.where(tmp16 % tmp17 != 0, tmp16 // tmp17 - 1, tmp16 // tmp17), tmp16 // tmp17) tmp19 = tmp18 * tmp17 tmp20 = tmp16 - tmp19 tmp21 = 2 * x2 tmp22 = tmp21 + tmp18 tmp23 = 2 * x0 tmp24 = tmp23 + tmp20 tmp25 = tl.full([1], 16, tl.int64) tmp26 = tmp22 * tmp25 tmp27 = tmp26 + tmp24 tl.store(out_ptr0 + x4, tmp6, None) tl.store(out_ptr1 + x4, tmp27, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (16, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (32, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (64, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_15, (1,), (1,)) assert_size_stride(primals_16, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_17, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int64) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf1, buf2, buf3, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int64) triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6, buf7, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch. float32) buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int64 ) triton_poi_fused_max_pool2d_with_indices_5[grid(4096)](buf9, buf10, buf11, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 16, 8, 8), (1024, 64, 8, 1)) buf13 = buf12 del buf12 buf30 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(4096)]( buf13, primals_9, buf30, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf14 = torch.ops.aten.max_unpool2d.default(buf13, buf11, [16, 16]) del buf13 buf15 = buf14 del buf14 buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 32, 16, 16), (8192, 256, 16, 1)) buf17 = buf16 del buf16 buf29 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_7[grid(32768)]( buf17, primals_11, buf29, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf18 = torch.ops.aten.max_unpool2d.default(buf17, buf7, [32, 32]) del buf17 buf19 = buf18 del buf18 buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf21 = buf20 del buf20 buf28 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(262144)]( buf21, primals_13, buf28, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf22 = torch.ops.aten.max_unpool2d.default(buf21, buf3, [64, 64]) del buf21 buf23 = buf22 del buf22 buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_9[grid(16384)](buf25, primals_15, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_10[grid(16384)](buf27, primals_17, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 return (buf27, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf15, buf19, buf23, buf25, buf28, buf29, buf30) class CNNAutoencoderNew(nn.Module): def __init__(self, depth_0=1, depth_1=64, depth_2=32, depth_3=16, lastdepth=1): super(CNNAutoencoderNew, self).__init__() self.depth_0 = depth_0 self.depth_1 = depth_1 self.depth_2 = depth_2 self.depth_3 = depth_3 self.enc1 = nn.Conv2d(self.depth_0, self.depth_1, kernel_size=3, stride=1, padding=1) self.enc2 = nn.Conv2d(self.depth_1, self.depth_2, kernel_size=3, stride=1, padding=1) self.enc3 = nn.Conv2d(self.depth_2, self.depth_3, kernel_size=3, stride=1, padding=1) self.ltnt = nn.Conv2d(self.depth_3, self.depth_3, kernel_size=3, stride=1, padding=1) self.dec3 = nn.ConvTranspose2d(self.depth_3, self.depth_2, kernel_size=1, stride=1) self.dec2 = nn.ConvTranspose2d(self.depth_2, self.depth_1, kernel_size=1, stride=1) self.dec1 = nn.ConvTranspose2d(self.depth_1, self.depth_0, kernel_size=1, stride=1) self.out = nn.Conv2d(self.depth_0, 1, 1) self.pool = nn.MaxPool2d(2, 2, return_indices=True, ceil_mode=False) self.unpool = nn.MaxUnpool2d(2, 2) def forward(self, input_0): primals_1 = self.enc1.weight primals_2 = self.enc1.bias primals_4 = self.enc2.weight primals_5 = self.enc2.bias primals_6 = self.enc3.weight primals_7 = self.enc3.bias primals_8 = self.ltnt.weight primals_9 = self.ltnt.bias primals_10 = self.dec3.weight primals_11 = self.dec3.bias primals_12 = self.dec2.weight primals_13 = self.dec2.bias primals_14 = self.dec1.weight primals_15 = self.dec1.bias primals_16 = self.out.weight primals_17 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
mariajmolina/ML-for-S2S
CNNAutoencoder
false
10,582
[ "MIT" ]
0
3de32e72042ba7e8b37a433579fa9c5630246d8c
https://github.com/mariajmolina/ML-for-S2S/tree/3de32e72042ba7e8b37a433579fa9c5630246d8c
BCEFocalLoss
import torch import torch._utils class BCEFocalLoss(torch.nn.Module): """ 二分类的Focalloss alpha 固定 """ def __init__(self, gamma=2, alpha=0.25, reduction='elementwise_mean'): super().__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction def forward(self, _input, target): pt = torch.sigmoid(_input) alpha = self.alpha loss = -alpha * (1 - pt) ** self.gamma * target * torch.log(pt) - ( 1 - alpha) * pt ** self.gamma * (1 - target) * torch.log(1 - pt) if self.reduction == 'elementwise_mean': loss = torch.mean(loss) elif self.reduction == 'sum': loss = torch.sum(loss) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_log_mean_mul_pow_rsub_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp7 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = tmp3 * tmp3 tmp5 = -0.25 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp9 = tl_math.log(tmp1) tmp10 = tmp8 * tmp9 tmp11 = tmp1 * tmp1 tmp12 = 0.75 tmp13 = tmp11 * tmp12 tmp14 = tmp2 - tmp7 tmp15 = tmp13 * tmp14 tmp16 = tl_math.log(tmp3) tmp17 = tmp15 * tmp16 tmp18 = tmp10 - tmp17 tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = 256.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_log_mean_mul_pow_rsub_sigmoid_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCEFocalLossNew(torch.nn.Module): """ 二分类的Focalloss alpha 固定 """ def __init__(self, gamma=2, alpha=0.25, reduction='elementwise_mean'): super().__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ilcessadecalcular/segmentation
BCEFocalLoss
false
10,583
[ "MIT" ]
0
24ba499a399efdba212ec5e2235b72ed8270cc24
https://github.com/ilcessadecalcular/segmentation/tree/24ba499a399efdba212ec5e2235b72ed8270cc24
Embeddings
import torch from torch import nn import torch._utils class Embeddings(nn.Module): def __init__(self, input_dim, embed_dim, cube_size, patch_size, dropout): super().__init__() self.n_patches = int(cube_size[0] * cube_size[1] * cube_size[2] / ( patch_size * patch_size * patch_size)) self.patch_size = patch_size self.embed_dim = embed_dim self.patch_embeddings = nn.Conv3d(in_channels=input_dim, out_channels=embed_dim, kernel_size=patch_size, stride=patch_size) self.position_embeddings = nn.Parameter(torch.zeros(1, self. n_patches, embed_dim)) self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.patch_embeddings(x) x = x.flatten(2) x = x.transpose(-1, -2) embeddings = x + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'embed_dim': 4, 'cube_size': [4, 4, 4], 'patch_size': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(4, 4, 4), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1)) buf1 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(16)](buf0, primals_2, primals_4, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del primals_2 del primals_4 return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) class EmbeddingsNew(nn.Module): def __init__(self, input_dim, embed_dim, cube_size, patch_size, dropout): super().__init__() self.n_patches = int(cube_size[0] * cube_size[1] * cube_size[2] / ( patch_size * patch_size * patch_size)) self.patch_size = patch_size self.embed_dim = embed_dim self.patch_embeddings = nn.Conv3d(in_channels=input_dim, out_channels=embed_dim, kernel_size=patch_size, stride=patch_size) self.position_embeddings = nn.Parameter(torch.zeros(1, self. n_patches, embed_dim)) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_4 = self.position_embeddings primals_1 = self.patch_embeddings.weight primals_2 = self.patch_embeddings.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
ilcessadecalcular/segmentation
Embeddings
false
10,584
[ "MIT" ]
0
24ba499a399efdba212ec5e2235b72ed8270cc24
https://github.com/ilcessadecalcular/segmentation/tree/24ba499a399efdba212ec5e2235b72ed8270cc24
DotRNNSelector
from _paritybench_helpers import _mock_config import torch import torch as th from torch.distributions import Categorical import torch.nn as nn import torch.nn.functional as F class DotRNNSelector(nn.Module): def __init__(self, input_shape, args): super(DotRNNSelector, self).__init__() self.args = args self.epsilon_start = self.args.epsilon_start self.epsilon_finish = self.args.role_epsilon_finish self.epsilon_anneal_time = self.args.epsilon_anneal_time self.epsilon_anneal_time_exp = self.args.epsilon_anneal_time_exp self.delta = (self.epsilon_start - self.epsilon_finish ) / self.epsilon_anneal_time self.role_action_spaces_update_start = (self.args. role_action_spaces_update_start) self.epsilon_start_t = 0 self.epsilon_reset = True self.fc1 = nn.Linear(args.rnn_hidden_dim, 2 * args.rnn_hidden_dim) self.fc2 = nn.Linear(2 * args.rnn_hidden_dim, args.action_latent_dim) self.epsilon = 0.05 def forward(self, inputs, role_latent): x = self.fc2(F.relu(self.fc1(inputs))) x = x.unsqueeze(-1) role_latent_reshaped = role_latent.unsqueeze(0).repeat(x.shape[0], 1, 1 ) role_q = th.bmm(role_latent_reshaped, x).squeeze() return role_q def select_role(self, role_qs, test_mode=False, t_env=None): self.epsilon = self.epsilon_schedule(t_env) if test_mode: self.epsilon = 0.0 masked_q_values = role_qs.detach().clone() random_numbers = th.rand_like(role_qs[:, 0]) pick_random = (random_numbers < self.epsilon).long() random_roles = Categorical(th.ones(role_qs.shape).float()).sample( ).long() picked_roles = pick_random * random_roles + (1 - pick_random ) * masked_q_values.max(dim=1)[1] return picked_roles def epsilon_schedule(self, t_env): if t_env is None: return 0.05 if t_env > self.role_action_spaces_update_start and self.epsilon_reset: self.epsilon_reset = False self.epsilon_start_t = t_env self.epsilon_anneal_time = self.epsilon_anneal_time_exp self.delta = (self.epsilon_start - self.epsilon_finish ) / self.epsilon_anneal_time if t_env - self.epsilon_start_t > self.epsilon_anneal_time: epsilon = self.epsilon_finish else: epsilon = self.epsilon_start - (t_env - self.epsilon_start_t ) * self.delta return epsilon def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_shape': 4, 'args': _mock_config(epsilon_start=4, role_epsilon_finish=4, epsilon_anneal_time=4, epsilon_anneal_time_exp=4, role_action_spaces_update_start=4, rnn_hidden_dim=4, action_latent_dim=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch as th from torch.distributions import Categorical import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_repeat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(32)](buf1, primals_2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_repeat_1[grid(64)](primals_6, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0), out=buf4) del buf2 return reinterpret_tensor(buf4, (4, 4), (4, 1), 0 ), primals_3, buf1, reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0 ), primals_4 class DotRNNSelectorNew(nn.Module): def __init__(self, input_shape, args): super(DotRNNSelectorNew, self).__init__() self.args = args self.epsilon_start = self.args.epsilon_start self.epsilon_finish = self.args.role_epsilon_finish self.epsilon_anneal_time = self.args.epsilon_anneal_time self.epsilon_anneal_time_exp = self.args.epsilon_anneal_time_exp self.delta = (self.epsilon_start - self.epsilon_finish ) / self.epsilon_anneal_time self.role_action_spaces_update_start = (self.args. role_action_spaces_update_start) self.epsilon_start_t = 0 self.epsilon_reset = True self.fc1 = nn.Linear(args.rnn_hidden_dim, 2 * args.rnn_hidden_dim) self.fc2 = nn.Linear(2 * args.rnn_hidden_dim, args.action_latent_dim) self.epsilon = 0.05 def select_role(self, role_qs, test_mode=False, t_env=None): self.epsilon = self.epsilon_schedule(t_env) if test_mode: self.epsilon = 0.0 masked_q_values = role_qs.detach().clone() random_numbers = th.rand_like(role_qs[:, 0]) pick_random = (random_numbers < self.epsilon).long() random_roles = Categorical(th.ones(role_qs.shape).float()).sample( ).long() picked_roles = pick_random * random_roles + (1 - pick_random ) * masked_q_values.max(dim=1)[1] return picked_roles def epsilon_schedule(self, t_env): if t_env is None: return 0.05 if t_env > self.role_action_spaces_update_start and self.epsilon_reset: self.epsilon_reset = False self.epsilon_start_t = t_env self.epsilon_anneal_time = self.epsilon_anneal_time_exp self.delta = (self.epsilon_start - self.epsilon_finish ) / self.epsilon_anneal_time if t_env - self.epsilon_start_t > self.epsilon_anneal_time: epsilon = self.epsilon_finish else: epsilon = self.epsilon_start - (t_env - self.epsilon_start_t ) * self.delta return epsilon def forward(self, input_0, input_1): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
NagisaZj/RODE
DotRNNSelector
false
10,585
[ "Apache-2.0" ]
0
f7f6831fee58a7910e1d7c3a8ae19cef82ab8d03
https://github.com/NagisaZj/RODE/tree/f7f6831fee58a7910e1d7c3a8ae19cef82ab8d03
MeanPooling
import torch from torch import nn class MeanPooling(nn.Module): def __init__(self): super(MeanPooling, self).__init__() def forward(self, doc_state, entity_mapping, entity_lens): entity_states = entity_mapping.unsqueeze(3) * doc_state.unsqueeze(1) mean_pooled = torch.sum(entity_states, dim=2) / entity_lens.unsqueeze(2 ) return mean_pooled def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex // 16 x3 = xindex // 64 x5 = xindex % 16 x6 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x6, tmp14, xmask) @triton.jit def triton_poi_fused_div_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 16 x5 = xindex // 64 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x5), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + x6, tmp2, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_div_mul_sum_1[grid(1024)](buf0, arg2_1, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg2_1 del buf0 return buf1, class MeanPoolingNew(nn.Module): def __init__(self): super(MeanPoolingNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
mottled233/DFGN-pytorch
MeanPooling
false
10,586
[ "MIT" ]
0
7d9f6a75404cfa429f1e2b57ec5055df382ed0a4
https://github.com/mottled233/DFGN-pytorch/tree/7d9f6a75404cfa429f1e2b57ec5055df382ed0a4
SeparableConv2d_same
import torch import torch.nn as nn import torch.nn.functional as F def fixed_padding(inputs, kernel_size, dilation): kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end)) return padded_inputs class SeparableConv2d_same(nn.Module): def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation= 1, bias=False): super(SeparableConv2d_same, self).__init__() self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation, groups=inplanes, bias=bias) self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias) def forward(self, x): x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1 .dilation[0]) x = self.conv1(x) x = self.pointwise(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) return buf2, primals_2, primals_3, buf0, buf1 def fixed_padding(inputs, kernel_size, dilation): kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end)) return padded_inputs class SeparableConv2d_sameNew(nn.Module): def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation= 1, bias=False): super(SeparableConv2d_sameNew, self).__init__() self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation, groups=inplanes, bias=bias) self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.pointwise.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
lutbook/pytorch-segmentation-pipeline
SeparableConv2d_same
false
10,587
[ "MIT" ]
0
eb29d1bf240c158c64d81177e9be93cd958c0026
https://github.com/lutbook/pytorch-segmentation-pipeline/tree/eb29d1bf240c158c64d81177e9be93cd958c0026
ActorCriticModel
import torch import torch.nn as nn import torch.nn.functional as F class ActorCriticModel(nn.Module): def __init__(self, n_state, n_actions): super(ActorCriticModel, self).__init__() self.fc1 = nn.Linear(n_state, 16) self.action1 = nn.Linear(16, 16) self.action2 = nn.Linear(16, n_actions) self.value1 = nn.Linear(16, 16) self.value2 = nn.Linear(16, 1) def forward(self, x): x = torch.Tensor(x) x = F.relu(self.fc1(x)) action_x = F.relu(self.action1(x)) action_probs = F.softmax(self.action2(action_x), dim=-1) value_x = F.relu(self.value1(x)) state_values = self.value2(value_x) return action_probs, state_values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_state': 4, 'n_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (16, 16), (16, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (1, 16), (16, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf13 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_3, buf13, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf2 buf12 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf3, primals_5, buf12, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 buf7 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 16), (1, 16), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf7 buf11 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf8, primals_9, buf11, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (64, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 1), (1, 16), 0 ), alpha=1, beta=1, out=buf10) del primals_11 return buf6, reinterpret_tensor(buf10, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor( buf3, (64, 16), (16, 1), 0), buf6, reinterpret_tensor(buf8, (64, 16 ), (16, 1), 0 ), primals_10, buf11, primals_8, primals_6, buf12, primals_4, buf13 class ActorCriticModelNew(nn.Module): def __init__(self, n_state, n_actions): super(ActorCriticModelNew, self).__init__() self.fc1 = nn.Linear(n_state, 16) self.action1 = nn.Linear(16, 16) self.action2 = nn.Linear(16, n_actions) self.value1 = nn.Linear(16, 16) self.value2 = nn.Linear(16, 1) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.action1.weight primals_5 = self.action1.bias primals_6 = self.action2.weight primals_7 = self.action2.bias primals_8 = self.value1.weight primals_9 = self.value1.bias primals_10 = self.value2.weight primals_11 = self.value2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
nikolim/cablab
ActorCriticModel
false
10,588
[ "MIT" ]
0
1dcf0d7da01ed3988f84309acfb31cc9a9893de1
https://github.com/nikolim/cablab/tree/1dcf0d7da01ed3988f84309acfb31cc9a9893de1
GlobalAvgPool1d
import torch import torch.nn as nn from abc import abstractmethod from torch.nn import functional class AvgPool(nn.Module): """AvgPool Module. """ def __init__(self): super().__init__() @abstractmethod def forward(self, input_tensor): pass class GlobalAvgPool1d(AvgPool): """GlobalAvgPool1d Module. """ def forward(self, input_tensor): return functional.avg_pool1d(input_tensor, input_tensor.size()[2:] ).view(input_tensor.size()[:2]) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from abc import abstractmethod assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class AvgPool(nn.Module): """AvgPool Module. """ def __init__(self): super().__init__() @abstractmethod def forward(self, input_tensor): pass class GlobalAvgPool1dNew(AvgPool): """GlobalAvgPool1d Module. """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
lawwu/nni
GlobalAvgPool1d
false
10,589
[ "MIT" ]
0
b869dd48dfe36392e7b78c70ea35eb6d4b4779dc
https://github.com/lawwu/nni/tree/b869dd48dfe36392e7b78c70ea35eb6d4b4779dc
ResidualConvUnit
import torch import torch.nn as nn class ResidualConvUnit(nn.Module): """Residual convolution module. """ def __init__(self, features): """Init. Args: features (int): number of features """ super().__init__() self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, x): """Forward pass. Args: x (tensor): input Returns: tensor: output """ out = self.relu(x) out = self.conv1(out) out = self.relu(out) out = self.conv2(out) return out + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_add_2[grid(256)](buf4, buf0, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf4, primals_2, primals_4, buf0, buf2 class ResidualConvUnitNew(nn.Module): """Residual convolution module. """ def __init__(self, features): """Init. Args: features (int): number of features """ super().__init__() self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
maayan-myheritage/3d-photo-inpainting
ResidualConvUnit
false
10,590
[ "MIT" ]
0
6293eecfeb55ceba019655723f6efe31e8ecb177
https://github.com/maayan-myheritage/3d-photo-inpainting/tree/6293eecfeb55ceba019655723f6efe31e8ecb177
Temporal_Attention_layer
import torch import torch.nn.functional as F import torch.nn as nn class Temporal_Attention_layer(nn.Module): def __init__(self, DEVICE, in_channels, num_of_vertices, num_of_timesteps): super(Temporal_Attention_layer, self).__init__() self.U1 = nn.Parameter(torch.FloatTensor(num_of_vertices)) self.U2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_vertices)) self.U3 = nn.Parameter(torch.FloatTensor(in_channels)) self.be = nn.Parameter(torch.FloatTensor(1, num_of_timesteps, num_of_timesteps)) self.Ve = nn.Parameter(torch.FloatTensor(num_of_timesteps, num_of_timesteps)) def forward(self, x): """ Making a forward pass of the temporal attention layer. B is the batch size. N_nodes is the number of nodes in the graph. F_in is the dimension of input features. T_in is the length of input sequence in time. Arg types: * x (PyTorch Float Tensor)* - Node features for T time periods, with shape (B, N_nodes, F_in, T_in). Return types: * output (PyTorch Float Tensor)* - Temporal attention score matrices, with shape (B, T_in, T_in). """ _, _num_of_vertices, _num_of_features, _num_of_timesteps = x.shape lhs = torch.matmul(torch.matmul(x.permute(0, 3, 2, 1), self.U1), self.U2) rhs = torch.matmul(self.U3, x) product = torch.matmul(lhs, rhs) E = torch.matmul(self.Ve, torch.sigmoid(product + self.be)) E_normalized = F.softmax(E, dim=1) return E_normalized def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'DEVICE': 4, 'in_channels': 4, 'num_of_vertices': 4, 'num_of_timesteps': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4 * (x0 % 4) + 64 * (x0 // 16) + x0 // 4 % 4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (16 + 4 * (x0 % 4) + 64 * (x0 // 16) + x0 // 4 % 4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (32 + 4 * (x0 % 4) + 64 * (x0 // 16) + x0 // 4 % 4), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (48 + 4 * (x0 % 4) + 64 * (x0 // 16) + x0 // 4 % 4), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) @triton.jit def triton_poi_fused_mv_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16 * (x0 // 4) + x0 % 4), xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (4 + 16 * (x0 // 4) + x0 % 4), xmask) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (8 + 16 * (x0 // 4) + x0 % 4), xmask) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (12 + 16 * (x0 // 4) + x0 % 4), xmask) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y0), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp3, xmask & ymask) @triton.jit def triton_poi_fused__softmax_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_mv_0[grid(64)](primals_1, primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), primals_3, out=buf1) buf2 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused_mv_1[grid(64)](primals_1, primals_4, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, primals_6, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused__softmax_clone_3[grid(64)](buf5, buf6, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0) del buf5 triton_poi_fused__softmax_4[grid(16, 4)](buf6, buf7, 16, 4, XBLOCK= 4, YBLOCK=16, num_warps=1, num_stages=1) del buf6 return buf7, primals_1, primals_6, buf3, reinterpret_tensor(buf4, (16, 4), (4, 1), 0), buf7, primals_5, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (4, 16), (1, 4), 0), reinterpret_tensor( primals_3, (4, 4), (1, 4), 0) class Temporal_Attention_layerNew(nn.Module): def __init__(self, DEVICE, in_channels, num_of_vertices, num_of_timesteps): super(Temporal_Attention_layerNew, self).__init__() self.U1 = nn.Parameter(torch.FloatTensor(num_of_vertices)) self.U2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_vertices)) self.U3 = nn.Parameter(torch.FloatTensor(in_channels)) self.be = nn.Parameter(torch.FloatTensor(1, num_of_timesteps, num_of_timesteps)) self.Ve = nn.Parameter(torch.FloatTensor(num_of_timesteps, num_of_timesteps)) def forward(self, input_0): primals_2 = self.U1 primals_3 = self.U2 primals_4 = self.U3 primals_6 = self.be primals_5 = self.Ve primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
msalvato/pytorch_geometric_temporal
Temporal_Attention_layer
false
10,591
[ "MIT" ]
0
149bd46d3b2bddfc3570e31a91a3f53e8873d50e
https://github.com/msalvato/pytorch_geometric_temporal/tree/149bd46d3b2bddfc3570e31a91a3f53e8873d50e
MultiHeadAttention
import math import torch import numpy as np from torch import nn class MultiHeadAttention(nn.Module): def __init__(self, n_heads, input_dim, embed_dim, val_dim=None, key_dim =None): super(MultiHeadAttention, self).__init__() if val_dim is None: val_dim = embed_dim // n_heads if key_dim is None: key_dim = val_dim self.n_heads = n_heads self.input_dim = input_dim self.embed_dim = embed_dim self.val_dim = val_dim self.key_dim = key_dim self.norm_factor = 1 / math.sqrt(key_dim) self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim)) self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim)) self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim)) self.W_out = nn.Parameter(torch.Tensor(n_heads, val_dim, embed_dim)) self.init_parameters() def init_parameters(self): for param in self.parameters(): stdv = 1.0 / math.sqrt(param.size(-1)) param.data.uniform_(-stdv, stdv) def forward(self, q, h=None, mask=None): """ :param q: queries (batch_size, n_query, input_dim) :param h: data (batch_size, graph_size, input_dim) :param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1) Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency) :return: """ if h is None: h = q batch_size, graph_size, input_dim = h.size() n_query = q.size(1) assert q.size(0) == batch_size assert q.size(2) == input_dim assert input_dim == self.input_dim, 'Wrong embedding dimension of input' hflat = h.contiguous().view(-1, input_dim) qflat = q.contiguous().view(-1, input_dim) shp = self.n_heads, batch_size, graph_size, -1 shp_q = self.n_heads, batch_size, n_query, -1 Q = torch.matmul(qflat, self.W_query).view(shp_q) K = torch.matmul(hflat, self.W_key).view(shp) V = torch.matmul(hflat, self.W_val).view(shp) compatibility = self.norm_factor * torch.matmul(Q, K.transpose(2, 3)) if mask is not None: mask = mask.view(1, batch_size, n_query, graph_size).expand_as( compatibility) compatibility[mask] = -np.inf attn = torch.softmax(compatibility, dim=-1) if mask is not None: attnc = attn.clone() attnc[mask] = 0 attn = attnc heads = torch.matmul(attn, V) out = torch.mm(heads.permute(1, 2, 0, 3).contiguous().view(-1, self .n_heads * self.val_dim), self.W_out.view(-1, self.embed_dim) ).view(batch_size, n_query, self.embed_dim) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'input_dim': 4, 'embed_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_clone_view_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (0, 4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_0[grid(64)](buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf1 triton_poi_fused_0[grid(64)](buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf8) buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_clone_view_3[grid(16, 4)](buf8, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0) del buf8 extern_kernels.mm(buf9, reinterpret_tensor(primals_5, (4, 4), (4, 1 ), 0), out=buf10) return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), primals_1, buf7, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf9, (4, 16), (1, 4), 0), reinterpret_tensor( primals_5, (4, 4), (1, 4), 0) class MultiHeadAttentionNew(nn.Module): def __init__(self, n_heads, input_dim, embed_dim, val_dim=None, key_dim =None): super(MultiHeadAttentionNew, self).__init__() if val_dim is None: val_dim = embed_dim // n_heads if key_dim is None: key_dim = val_dim self.n_heads = n_heads self.input_dim = input_dim self.embed_dim = embed_dim self.val_dim = val_dim self.key_dim = key_dim self.norm_factor = 1 / math.sqrt(key_dim) self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim)) self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim)) self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim)) self.W_out = nn.Parameter(torch.Tensor(n_heads, val_dim, embed_dim)) self.init_parameters() def init_parameters(self): for param in self.parameters(): stdv = 1.0 / math.sqrt(param.size(-1)) param.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.W_query primals_3 = self.W_key primals_4 = self.W_val primals_5 = self.W_out primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
neo-pan/attention-learn-to-route
MultiHeadAttention
false
10,592
[ "MIT" ]
0
bb094d6e96276719ab2e379f279c614df7d822f9
https://github.com/neo-pan/attention-learn-to-route/tree/bb094d6e96276719ab2e379f279c614df7d822f9
PositiveLinear
import torch import torch.nn as nn import torch.nn.functional as F class PositiveLinear(nn.Linear): def forward(self, input): return F.linear(input, self.weight ** 2, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class PositiveLinearNew(nn.Linear): def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
oguzserbetci/monotone-network
PositiveLinear
false
10,593
[ "MIT" ]
0
33a317a1dde1a3d3e74dcbe3eb12d1a81e745c95
https://github.com/oguzserbetci/monotone-network/tree/33a317a1dde1a3d3e74dcbe3eb12d1a81e745c95
Spatial_Attention_layer
import torch import torch.nn.functional as F import torch.nn as nn class Spatial_Attention_layer(nn.Module): """ compute spatial attention scores """ def __init__(self, DEVICE, in_channels, num_of_vertices, num_of_timesteps): super(Spatial_Attention_layer, self).__init__() self.W1 = nn.Parameter(torch.FloatTensor(num_of_timesteps)) self.W2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_timesteps) ) self.W3 = nn.Parameter(torch.FloatTensor(in_channels)) self.bs = nn.Parameter(torch.FloatTensor(1, num_of_vertices, num_of_vertices)) self.Vs = nn.Parameter(torch.FloatTensor(num_of_vertices, num_of_vertices)) def forward(self, x): """ Making a forward pass of the spatial attention layer. B is the batch size. N_nodes is the number of nodes in the graph. F_in is the dimension of input features. T_in is the length of input sequence in time. Arg types: * x (PyTorch Float Tensor)* - Node features for T time periods, with shape (B, N_nodes, F_in, T_in). Return types: * output (PyTorch Float Tensor)* - Spatial attention score matrices, with shape (B, N_nodes, N_nodes). """ lhs = torch.matmul(torch.matmul(x, self.W1), self.W2) rhs = torch.matmul(self.W3, x).transpose(-1, -2) product = torch.matmul(lhs, rhs) S = torch.matmul(self.Vs, torch.sigmoid(product + self.bs)) S_normalized = F.softmax(S, dim=1) return S_normalized def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'DEVICE': 4, 'in_channels': 4, 'num_of_vertices': 4, 'num_of_timesteps': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) @triton.jit def triton_poi_fused_mv_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16 * (x0 // 4) + x0 % 4), xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (4 + 16 * (x0 // 4) + x0 % 4), xmask) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (8 + 16 * (x0 // 4) + x0 % 4), xmask) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (12 + 16 * (x0 // 4) + x0 % 4), xmask) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y0), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp3, xmask & ymask) @triton.jit def triton_poi_fused__softmax_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_mv_0[grid(64)](primals_2, primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), primals_3, out=buf1) buf2 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused_mv_1[grid(64)](primals_2, primals_4, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, primals_6, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused__softmax_clone_3[grid(64)](buf5, buf6, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0) del buf5 triton_poi_fused__softmax_4[grid(16, 4)](buf6, buf7, 16, 4, XBLOCK= 4, YBLOCK=16, num_warps=1, num_stages=1) del buf6 return buf7, primals_2, primals_6, buf3, reinterpret_tensor(buf4, (16, 4), (4, 1), 0), buf7, primals_5, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf0, (4, 16), (1, 4), 0), reinterpret_tensor( primals_3, (4, 4), (1, 4), 0) class Spatial_Attention_layerNew(nn.Module): """ compute spatial attention scores """ def __init__(self, DEVICE, in_channels, num_of_vertices, num_of_timesteps): super(Spatial_Attention_layerNew, self).__init__() self.W1 = nn.Parameter(torch.FloatTensor(num_of_timesteps)) self.W2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_timesteps) ) self.W3 = nn.Parameter(torch.FloatTensor(in_channels)) self.bs = nn.Parameter(torch.FloatTensor(1, num_of_vertices, num_of_vertices)) self.Vs = nn.Parameter(torch.FloatTensor(num_of_vertices, num_of_vertices)) def forward(self, input_0): primals_1 = self.W1 primals_3 = self.W2 primals_4 = self.W3 primals_6 = self.bs primals_5 = self.Vs primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
msalvato/pytorch_geometric_temporal
Spatial_Attention_layer
false
10,594
[ "MIT" ]
0
149bd46d3b2bddfc3570e31a91a3f53e8873d50e
https://github.com/msalvato/pytorch_geometric_temporal/tree/149bd46d3b2bddfc3570e31a91a3f53e8873d50e
TorchAdd
import torch import torch.nn as nn class TorchAdd(nn.Module): """TorchAdd Module. """ def forward(self, input_list): return input_list[0] + input_list[1] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class TorchAddNew(nn.Module): """TorchAdd Module. """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
lawwu/nni
TorchAdd
false
10,595
[ "MIT" ]
0
b869dd48dfe36392e7b78c70ea35eb6d4b4779dc
https://github.com/lawwu/nni/tree/b869dd48dfe36392e7b78c70ea35eb6d4b4779dc
GlobalMaxPooling
import torch import torch.nn as nn class GlobalMaxPooling(nn.Module): def __init__(self, dim=-1): super(self.__class__, self).__init__() self.dim = dim def forward(self, x): return x.max(dim=self.dim)[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class GlobalMaxPoolingNew(nn.Module): def __init__(self, dim=-1): super(self.__class__, self).__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
numb3r33/toxic_comments_classification
GlobalMaxPooling
false
10,596
[ "MIT" ]
0
c5de56751aee29b6dee6e330237a4fd0bcd7fd51
https://github.com/numb3r33/toxic_comments_classification/tree/c5de56751aee29b6dee6e330237a4fd0bcd7fd51
PartialConv
import math import torch import torch.nn as nn def weights_init(init_type='gaussian'): def init_fun(m): classname = m.__class__.__name__ if (classname.find('Conv') == 0 or classname.find('Linear') == 0 ) and hasattr(m, 'weight'): if init_type == 'gaussian': nn.init.normal_(m.weight, 0.0, 0.02) elif init_type == 'xavier': nn.init.xavier_normal_(m.weight, gain=math.sqrt(2)) elif init_type == 'kaiming': nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') elif init_type == 'orthogonal': nn.init.orthogonal_(m.weight, gain=math.sqrt(2)) elif init_type == 'default': pass else: assert 0, 'Unsupported initialization: {}'.format(init_type) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.0) return init_fun class PartialConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__() self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, False) self.input_conv.apply(weights_init('kaiming')) self.slide_winsize = in_channels * kernel_size * kernel_size torch.nn.init.constant_(self.mask_conv.weight, 1.0) for param in self.mask_conv.parameters(): param.requires_grad = False def forward(self, input, mask): output = self.input_conv(input * mask) if self.input_conv.bias is not None: output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as( output) else: output_bias = torch.zeros_like(output) with torch.no_grad(): output_mask = self.mask_conv(mask) no_update_holes = output_mask == 0 mask_sum = output_mask.masked_fill_(no_update_holes, 1.0) output_pre = (output - output_bias ) * self.slide_winsize / mask_sum + output_bias output = output_pre.masked_fill_(no_update_holes, 0.0) new_mask = torch.ones_like(output) new_mask = new_mask.masked_fill_(no_update_holes, 0.0) return output, new_mask def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp5 - tmp4 tmp7 = 64.0 tmp8 = tmp6 * tmp7 tmp9 = 1.0 tmp10 = tl.where(tmp2, tmp9, tmp0) tmp11 = tmp8 / tmp10 tmp12 = tmp11 + tmp4 tmp13 = tl.where(tmp2, tmp1, tmp12) tmp14 = tl.where(tmp2, tmp1, tmp9) tl.store(in_out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = extern_kernels.convolution(primals_2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) del primals_2 del primals_5 buf3 = buf1 del buf1 buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1[ grid(16)](buf3, buf2, primals_4, buf4, 16, XBLOCK=16, num_warps =1, num_stages=1) del primals_4 return buf3, buf4, primals_3, buf0, buf2 def weights_init(init_type='gaussian'): def init_fun(m): classname = m.__class__.__name__ if (classname.find('Conv') == 0 or classname.find('Linear') == 0 ) and hasattr(m, 'weight'): if init_type == 'gaussian': nn.init.normal_(m.weight, 0.0, 0.02) elif init_type == 'xavier': nn.init.xavier_normal_(m.weight, gain=math.sqrt(2)) elif init_type == 'kaiming': nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') elif init_type == 'orthogonal': nn.init.orthogonal_(m.weight, gain=math.sqrt(2)) elif init_type == 'default': pass else: assert 0, 'Unsupported initialization: {}'.format(init_type) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.0) return init_fun class PartialConvNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__() self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, False) self.input_conv.apply(weights_init('kaiming')) self.slide_winsize = in_channels * kernel_size * kernel_size torch.nn.init.constant_(self.mask_conv.weight, 1.0) for param in self.mask_conv.parameters(): param.requires_grad = False def forward(self, input_0, input_1): primals_1 = self.input_conv.weight primals_4 = self.input_conv.bias primals_2 = self.mask_conv.weight primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
maayan-myheritage/3d-photo-inpainting
PartialConv
false
10,597
[ "MIT" ]
0
6293eecfeb55ceba019655723f6efe31e8ecb177
https://github.com/maayan-myheritage/3d-photo-inpainting/tree/6293eecfeb55ceba019655723f6efe31e8ecb177
Mul
import torch import torch as ch class Mul(ch.nn.Module): def __init__(self, weight): super(Mul, self).__init__() self.weight = weight def forward(self, x): return x * self.weight def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'weight': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch as ch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MulNew(ch.nn.Module): def __init__(self, weight): super(MulNew, self).__init__() self.weight = weight def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
njwfish/ffcv
Mul
false
10,598
[ "Apache-2.0" ]
0
3c219787da2fb8dbdaab24e75f34b3398ad7b7d1
https://github.com/njwfish/ffcv/tree/3c219787da2fb8dbdaab24e75f34b3398ad7b7d1
MPJPE
import torch import torch.nn as nn import torch.nn.functional class BaseMetric(nn.Module): def forward(self, y_pr, points_gt, gt_mask=None): """ Base forward method for metric evaluation Args: y_pr: 3D prediction of joints, tensor of shape (BATCH_SIZExN_JOINTSx3) points_gt: 3D gt of joints, tensor of shape (BATCH_SIZExN_JOINTSx3) gt_mask: boolean mask, tensor of shape (BATCH_SIZExN_JOINTS). Applied to results, if provided Returns: Metric as single value, if reduction is given, or as a tensor of values """ pass class MPJPE(BaseMetric): def __init__(self, reduction=None, confidence=0, **kwargs): super().__init__(**kwargs) self.confidence = confidence self.reduction = reduction def forward(self, y_pr, points_gt, gt_mask=None): if gt_mask is not None: points_gt[~gt_mask] = 0 dist_2d = torch.norm(points_gt - y_pr, dim=-1) if self.reduction: dist_2d = self.reduction(dist_2d, gt_mask) return dist_2d def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_linalg_vector_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tl.store(out_ptr0 + x0, tmp19, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_linalg_vector_norm_sub_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class BaseMetric(nn.Module): def forward(self, y_pr, points_gt, gt_mask=None): """ Base forward method for metric evaluation Args: y_pr: 3D prediction of joints, tensor of shape (BATCH_SIZExN_JOINTSx3) points_gt: 3D gt of joints, tensor of shape (BATCH_SIZExN_JOINTSx3) gt_mask: boolean mask, tensor of shape (BATCH_SIZExN_JOINTS). Applied to results, if provided Returns: Metric as single value, if reduction is given, or as a tensor of values """ pass class MPJPENew(BaseMetric): def __init__(self, reduction=None, confidence=0, **kwargs): super().__init__(**kwargs) self.confidence = confidence self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
miracleyoo/lifting_events_to_3d_hpe
MPJPE
false
10,599
[ "Apache-2.0" ]
0
dfe734ee055900d6ab90c064bf82db7672830ac7
https://github.com/miracleyoo/lifting_events_to_3d_hpe/tree/dfe734ee055900d6ab90c064bf82db7672830ac7
PCK
import torch import torch.nn as nn import torch.nn.functional class BaseMetric(nn.Module): def forward(self, y_pr, points_gt, gt_mask=None): """ Base forward method for metric evaluation Args: y_pr: 3D prediction of joints, tensor of shape (BATCH_SIZExN_JOINTSx3) points_gt: 3D gt of joints, tensor of shape (BATCH_SIZExN_JOINTSx3) gt_mask: boolean mask, tensor of shape (BATCH_SIZExN_JOINTS). Applied to results, if provided Returns: Metric as single value, if reduction is given, or as a tensor of values """ pass class PCK(BaseMetric): """ Percentage of correct keypoints according to a thresold value. Usually default threshold is 150mm """ def __init__(self, reduction=None, threshold=150, **kwargs): super().__init__(**kwargs) self.thr = threshold self.reduction = reduction def forward(self, y_pr, points_gt, gt_mask=None): if gt_mask is not None: points_gt[~gt_mask] = 0 dist_2d = (torch.norm(points_gt - y_pr, dim=-1) < self.thr).double() if self.reduction: dist_2d = self.reduction(dist_2d, gt_mask) return dist_2d def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_linalg_vector_norm_lt_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = 150.0 tmp21 = tmp19 < tmp20 tmp22 = tmp21.to(tl.float64) tl.store(out_ptr0 + x0, tmp22, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float64) get_raw_stream(0) triton_poi_fused__to_copy_linalg_vector_norm_lt_sub_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class BaseMetric(nn.Module): def forward(self, y_pr, points_gt, gt_mask=None): """ Base forward method for metric evaluation Args: y_pr: 3D prediction of joints, tensor of shape (BATCH_SIZExN_JOINTSx3) points_gt: 3D gt of joints, tensor of shape (BATCH_SIZExN_JOINTSx3) gt_mask: boolean mask, tensor of shape (BATCH_SIZExN_JOINTS). Applied to results, if provided Returns: Metric as single value, if reduction is given, or as a tensor of values """ pass class PCKNew(BaseMetric): """ Percentage of correct keypoints according to a thresold value. Usually default threshold is 150mm """ def __init__(self, reduction=None, threshold=150, **kwargs): super().__init__(**kwargs) self.thr = threshold self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
miracleyoo/lifting_events_to_3d_hpe
PCK
false
10,600
[ "Apache-2.0" ]
0
dfe734ee055900d6ab90c064bf82db7672830ac7
https://github.com/miracleyoo/lifting_events_to_3d_hpe/tree/dfe734ee055900d6ab90c064bf82db7672830ac7
DiceLoss
import torch from torch import nn import torch.nn.functional as F import torch._utils class BinaryDiceLoss(nn.Module): """Dice loss of binary class Args: smooth: A float number to smooth loss, and avoid NaN error, default: 1 p: Denominator value: \\sum{x^p} + \\sum{y^p}, default: 2 predict: A tensor of shape [N, *] target: A tensor of shape same with predict reduction: Reduction method to apply, return mean over batch if 'mean', return sum if 'sum', return a tensor of shape [N,] if 'none' Returns: Loss tensor according to arg reduction Raise: Exception if unexpected reduction """ def __init__(self, smooth=1, p=2, reduction='mean'): super(BinaryDiceLoss, self).__init__() self.smooth = smooth self.p = p self.reduction = reduction def forward(self, predict, target): assert predict.shape[0] == target.shape[0 ], "predict & target batch size don't match" predict = predict.contiguous().view(predict.shape[0], -1) target = target.contiguous().view(target.shape[0], -1) num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1 ) + self.smooth loss = 1 - num / den if self.reduction == 'mean': return loss.mean() elif self.reduction == 'sum': return loss.sum() elif self.reduction == 'none': return loss else: raise Exception('Unexpected reduction {}'.format(self.reduction)) class DiceLoss(nn.Module): """Dice loss, need one hot encode input Args: weight: An array of shape [num_classes,] ignore_index: class index to ignore predict: A tensor of shape [N, C, *] target: A tensor of same shape with predict other args pass to BinaryDiceLoss Return: same as BinaryDiceLoss """ def __init__(self, weight=None, ignore_index=None, **kwargs): super(DiceLoss, self).__init__() self.kwargs = kwargs self.weight = weight self.ignore_index = ignore_index def forward(self, predict, target): assert predict.shape == target.shape, 'predict & target shape do not match' dice = BinaryDiceLoss(**self.kwargs) total_loss = 0 predict = F.softmax(predict, dim=1) for i in range(target.shape[1]): if i != self.ignore_index: dice_loss = dice(predict[:, i], target[:, i]) if self.weight is not None: assert self.weight.shape[0] == target.shape[1 ], 'Expect weight shape [{}], get[{}]'.format(target .shape[1], self.weight.shape[0]) dice_loss *= self.weights[i] total_loss += dice_loss return total_loss / target.shape[1] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused_add_mul_pow_sum_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 * tmp0 tmp8 = tmp1 * tmp1 tmp9 = tmp7 + tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) @triton.jit def triton_per_fused_add_mul_pow_sum_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 * tmp0 tmp8 = tmp1 * tmp1 tmp9 = tmp7 + tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) @triton.jit def triton_per_fused_add_mul_pow_sum_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 * tmp0 tmp8 = tmp1 * tmp1 tmp9 = tmp7 + tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) @triton.jit def triton_per_fused_add_mul_pow_sum_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 * tmp0 tmp8 = tmp1 * tmp1 tmp9 = tmp7 + tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) @triton.jit def triton_per_fused_add_div_mean_rsub_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_ptr2 + r0, None) tmp12 = tl.load(in_ptr3 + r0, None) tmp19 = tl.load(in_ptr4 + r0, None) tmp21 = tl.load(in_ptr5 + r0, None) tmp28 = tl.load(in_ptr6 + r0, None) tmp30 = tl.load(in_ptr7 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 / tmp4 tmp6 = tmp1 - tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 + tmp1 tmp13 = tmp12 + tmp1 tmp14 = tmp11 / tmp13 tmp15 = tmp1 - tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp20 = tmp19 + tmp1 tmp22 = tmp21 + tmp1 tmp23 = tmp20 / tmp22 tmp24 = tmp1 - tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp29 = tmp28 + tmp1 tmp31 = tmp30 + tmp1 tmp32 = tmp29 / tmp31 tmp33 = tmp1 - tmp32 tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp37 = 4.0 tmp38 = tmp9 / tmp37 tmp39 = 0.0 tmp40 = tmp38 + tmp39 tmp41 = tmp18 / tmp37 tmp42 = tmp40 + tmp41 tmp43 = tmp27 / tmp37 tmp44 = tmp42 + tmp43 tmp45 = tmp36 / tmp37 tmp46 = tmp44 + tmp45 tmp47 = 0.25 tmp48 = tmp46 * tmp47 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp48, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_mul_pow_sum_2[grid(4)](buf1, arg1_1, buf2, buf3, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf11 = empty_strided_cuda((4,), (1,), torch.float32) buf12 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_mul_pow_sum_3[grid(4)](buf1, arg1_1, buf11, buf12, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((4,), (1,), torch.float32) buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_mul_pow_sum_4[grid(4)](buf1, arg1_1, buf5, buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf8 = empty_strided_cuda((4,), (1,), torch.float32) buf9 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_mul_pow_sum_5[grid(4)](buf1, arg1_1, buf8, buf9, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf1 buf10 = empty_strided_cuda((), (), torch.float32) buf14 = buf10 del buf10 triton_per_fused_add_div_mean_rsub_6[grid(1)](buf14, buf2, buf3, buf5, buf6, buf8, buf9, buf11, buf12, 1, 4, XBLOCK=1, num_warps =2, num_stages=1) del buf11 del buf12 del buf2 del buf3 del buf5 del buf6 del buf8 del buf9 return buf14, class BinaryDiceLoss(nn.Module): """Dice loss of binary class Args: smooth: A float number to smooth loss, and avoid NaN error, default: 1 p: Denominator value: \\sum{x^p} + \\sum{y^p}, default: 2 predict: A tensor of shape [N, *] target: A tensor of shape same with predict reduction: Reduction method to apply, return mean over batch if 'mean', return sum if 'sum', return a tensor of shape [N,] if 'none' Returns: Loss tensor according to arg reduction Raise: Exception if unexpected reduction """ def __init__(self, smooth=1, p=2, reduction='mean'): super(BinaryDiceLoss, self).__init__() self.smooth = smooth self.p = p self.reduction = reduction def forward(self, predict, target): assert predict.shape[0] == target.shape[0 ], "predict & target batch size don't match" predict = predict.contiguous().view(predict.shape[0], -1) target = target.contiguous().view(target.shape[0], -1) num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1 ) + self.smooth loss = 1 - num / den if self.reduction == 'mean': return loss.mean() elif self.reduction == 'sum': return loss.sum() elif self.reduction == 'none': return loss else: raise Exception('Unexpected reduction {}'.format(self.reduction)) class DiceLossNew(nn.Module): """Dice loss, need one hot encode input Args: weight: An array of shape [num_classes,] ignore_index: class index to ignore predict: A tensor of shape [N, C, *] target: A tensor of same shape with predict other args pass to BinaryDiceLoss Return: same as BinaryDiceLoss """ def __init__(self, weight=None, ignore_index=None, **kwargs): super(DiceLossNew, self).__init__() self.kwargs = kwargs self.weight = weight self.ignore_index = ignore_index def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ilcessadecalcular/segmentation
DiceLoss
false
10,601
[ "MIT" ]
0
24ba499a399efdba212ec5e2235b72ed8270cc24
https://github.com/ilcessadecalcular/segmentation/tree/24ba499a399efdba212ec5e2235b72ed8270cc24
CompositeActivation
import torch class CompositeActivation(torch.nn.Module): def forward(self, x): x = torch.atan(x) return torch.cat([x / 0.67, x * x / 0.6], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = libdevice.atan(tmp5) tmp7 = 1.4925373134328357 tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp11 & xmask, other=0.0) tmp15 = libdevice.atan(tmp14) tmp16 = tmp15 * tmp15 tmp17 = 1.6666666666666667 tmp18 = tmp16 * tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp11, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp10, tmp20) tl.store(out_ptr0 + x3, tmp21, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class CompositeActivationNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ndey96/lucent
CompositeActivation
false
10,602
[ "Apache-2.0" ]
0
d868d8ca52520bd245c1e5fcf3b026782f77e561
https://github.com/ndey96/lucent/tree/d868d8ca52520bd245c1e5fcf3b026782f77e561
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 12, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(12, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 37632 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 12 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 9408 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 2352 x4 = xindex % 2352 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 2368 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 2432 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (12, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 12, 5, 5), (300, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 12, 28, 28), (9408, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(37632)](buf1, primals_2, 37632, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 12, 14, 14), (2368, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 12, 14, 14), (2432, 196, 14, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(9408)](buf1, buf2, buf3, 9408, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 12, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(12, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
neal2018/torch_learn
Net
false
10,603
[ "MIT" ]
0
80bda3a44952aca6fce7156fe4aecb48ddd602ee
https://github.com/neal2018/torch_learn/tree/80bda3a44952aca6fce7156fe4aecb48ddd602ee
DeepCoxMixturesTorch
import torch import torch.nn as nn def create_representation(inputdim, layers, activation): """Helper function to generate the representation function for DSM. Deep Survival Machines learns a representation (\\ Phi(X) \\) for the input data. This representation is parameterized using a Non Linear Multilayer Perceptron (`torch.nn.Module`). This is a helper function designed to instantiate the representation for Deep Survival Machines. .. warning:: Not designed to be used directly. Parameters ---------- inputdim: int Dimensionality of the input features. layers: list A list consisting of the number of neurons in each hidden layer. activation: str Choice of activation function: One of 'ReLU6', 'ReLU' or 'SeLU'. Returns ---------- an MLP with torch.nn.Module with the specfied structure. """ if activation == 'ReLU6': act = nn.ReLU6() elif activation == 'ReLU': act = nn.ReLU() elif activation == 'SeLU': act = nn.SELU() modules = [] prevdim = inputdim for hidden in layers: modules.append(nn.Linear(prevdim, hidden, bias=False)) modules.append(act) prevdim = hidden return nn.Sequential(*modules) class DeepCoxMixturesTorch(nn.Module): """PyTorch model definition of the Deep Cox Mixture Survival Model. The Cox Mixture involves the assumption that the survival function of the individual to be a mixture of K Cox Models. Conditioned on each subgroup Z=k; the PH assumptions are assumed to hold and the baseline hazard rates is determined non-parametrically using an spline-interpolated Breslow's estimator. """ def _init_dcm_layers(self, lastdim): self.gate = torch.nn.Linear(lastdim, self.k, bias=False) self.expert = torch.nn.Linear(lastdim, self.k, bias=False) def __init__(self, inputdim, k, gamma=1, use_activation=False, layers= None, optimizer='Adam'): super(DeepCoxMixturesTorch, self).__init__() if not isinstance(k, int): raise ValueError(f'k must be int, but supplied k is {type(k)}') self.k = k self.optimizer = optimizer if layers is None: layers = [] self.layers = layers if len(layers) == 0: lastdim = inputdim else: lastdim = layers[-1] self._init_dcm_layers(lastdim) self.embedding = create_representation(inputdim, layers, 'ReLU6') self.gamma = gamma self.use_activation = use_activation def forward(self, x): gamma = self.gamma x = self.embedding(x) if self.use_activation: log_hazard_ratios = gamma * torch.nn.Tanh()(self.expert(x)) else: log_hazard_ratios = torch.clamp(self.expert(x), min=-gamma, max =gamma) log_gate_prob = torch.nn.LogSoftmax(dim=1)(self.gate(x)) return log_gate_prob, log_hazard_ratios def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputdim': 4, 'k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -1.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = tmp0 >= tmp1 tmp6 = tmp0 <= tmp3 tmp7 = tmp5 & tmp6 tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp7, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clamp_ge_le_logical_and_0[grid(256)](buf0, buf1, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf3 return buf4, buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf4, buf5 def create_representation(inputdim, layers, activation): """Helper function to generate the representation function for DSM. Deep Survival Machines learns a representation (\\ Phi(X) \\) for the input data. This representation is parameterized using a Non Linear Multilayer Perceptron (`torch.nn.Module`). This is a helper function designed to instantiate the representation for Deep Survival Machines. .. warning:: Not designed to be used directly. Parameters ---------- inputdim: int Dimensionality of the input features. layers: list A list consisting of the number of neurons in each hidden layer. activation: str Choice of activation function: One of 'ReLU6', 'ReLU' or 'SeLU'. Returns ---------- an MLP with torch.nn.Module with the specfied structure. """ if activation == 'ReLU6': act = nn.ReLU6() elif activation == 'ReLU': act = nn.ReLU() elif activation == 'SeLU': act = nn.SELU() modules = [] prevdim = inputdim for hidden in layers: modules.append(nn.Linear(prevdim, hidden, bias=False)) modules.append(act) prevdim = hidden return nn.Sequential(*modules) class DeepCoxMixturesTorchNew(nn.Module): """PyTorch model definition of the Deep Cox Mixture Survival Model. The Cox Mixture involves the assumption that the survival function of the individual to be a mixture of K Cox Models. Conditioned on each subgroup Z=k; the PH assumptions are assumed to hold and the baseline hazard rates is determined non-parametrically using an spline-interpolated Breslow's estimator. """ def _init_dcm_layers(self, lastdim): self.gate = torch.nn.Linear(lastdim, self.k, bias=False) self.expert = torch.nn.Linear(lastdim, self.k, bias=False) def __init__(self, inputdim, k, gamma=1, use_activation=False, layers= None, optimizer='Adam'): super(DeepCoxMixturesTorchNew, self).__init__() if not isinstance(k, int): raise ValueError(f'k must be int, but supplied k is {type(k)}') self.k = k self.optimizer = optimizer if layers is None: layers = [] self.layers = layers if len(layers) == 0: lastdim = inputdim else: lastdim = layers[-1] self._init_dcm_layers(lastdim) self.embedding = create_representation(inputdim, layers, 'ReLU6') self.gamma = gamma self.use_activation = use_activation def forward(self, input_0): primals_2 = self.gate.weight primals_3 = self.expert.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
mononitogoswami/auton-survival
DeepCoxMixturesTorch
false
10,604
[ "MIT" ]
0
04739adac55e47d3d2c61101d92784a9fbb2dd86
https://github.com/mononitogoswami/auton-survival/tree/04739adac55e47d3d2c61101d92784a9fbb2dd86
ReluSquared
import torch from torch.nn import functional as F from torch import nn class ReluSquared(nn.Module): def forward(self, x): return F.relu(x) ** 2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tmp2 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class ReluSquaredNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ncoop57/x-transformers
ReluSquared
false
10,605
[ "MIT" ]
0
b65f25384349abfc101001b42482b05745c861fa
https://github.com/ncoop57/x-transformers/tree/b65f25384349abfc101001b42482b05745c861fa
SpatialAttentionGate
import torch import torch.nn.functional as F import torch.nn as nn class SpatialAttentionGate(nn.Module): def __init__(self, channel, reduction=16): super(SpatialAttentionGate, self).__init__() self.fc1 = nn.Conv2d(channel, reduction, kernel_size=1, padding=0) self.fc2 = nn.Conv2d(reduction, 1, kernel_size=1, padding=0) def forward(self, x): x = self.fc1(x) x = F.relu(x, inplace=True) x = self.fc2(x) x = torch.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1024)](buf1, primals_2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_sigmoid_1[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf3 class SpatialAttentionGateNew(nn.Module): def __init__(self, channel, reduction=16): super(SpatialAttentionGateNew, self).__init__() self.fc1 = nn.Conv2d(channel, reduction, kernel_size=1, padding=0) self.fc2 = nn.Conv2d(reduction, 1, kernel_size=1, padding=0) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
lawwu/nni
SpatialAttentionGate
false
10,606
[ "MIT" ]
0
b869dd48dfe36392e7b78c70ea35eb6d4b4779dc
https://github.com/lawwu/nni/tree/b869dd48dfe36392e7b78c70ea35eb6d4b4779dc
RMSNorm
import torch from torch import nn class RMSNorm(nn.Module): def __init__(self, dim, eps=1e-08): super().__init__() self.scale = dim ** -0.5 self.eps = eps self.g = nn.Parameter(torch.ones(dim)) def forward(self, x): norm = torch.norm(x, dim=-1, keepdim=True) * self.scale return x / norm.clamp(min=self.eps) * self.g def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 0.5 tmp14 = tmp12 * tmp13 tmp15 = 1e-08 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = tmp0 / tmp16 tmp19 = tmp17 * tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_0[grid(256)]( primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class RMSNormNew(nn.Module): def __init__(self, dim, eps=1e-08): super().__init__() self.scale = dim ** -0.5 self.eps = eps self.g = nn.Parameter(torch.ones(dim)) def forward(self, input_0): primals_2 = self.g primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
ncoop57/x-transformers
RMSNorm
false
10,607
[ "MIT" ]
0
b65f25384349abfc101001b42482b05745c861fa
https://github.com/ncoop57/x-transformers/tree/b65f25384349abfc101001b42482b05745c861fa
LanguageModelCriterion
import torch import torch.nn as nn class LanguageModelCriterion(nn.Module): def __init__(self): super().__init__() def forward(self, x, target, mask): x = x.contiguous().view(-1, x.size(2)) target = target.contiguous().view(-1, 1) mask = mask.contiguous().view(-1, 1) output = -x.gather(1, target) * mask output = torch.sum(output) / torch.sum(mask) return output def get_inputs(): return [torch.ones([256, 4, 4], dtype=torch.int64), torch.ones([256], dtype=torch.int64), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_gather_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp1 = tl.full([RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tl.broadcast_to(tmp9, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = tmp13 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (256, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (256,), (1,)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_gather_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class LanguageModelCriterionNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
neal2018/torch_learn
LanguageModelCriterion
false
10,608
[ "MIT" ]
0
80bda3a44952aca6fce7156fe4aecb48ddd602ee
https://github.com/neal2018/torch_learn/tree/80bda3a44952aca6fce7156fe4aecb48ddd602ee
ScaleNorm
import torch from torch import nn class ScaleNorm(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.scale = dim ** -0.5 self.eps = eps self.g = nn.Parameter(torch.ones(1)) def forward(self, x): norm = torch.norm(x, dim=-1, keepdim=True) * self.scale return x / norm.clamp(min=self.eps) * self.g def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + 0) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 0.5 tmp14 = tmp12 * tmp13 tmp15 = 1e-05 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = tmp0 / tmp16 tmp20 = tmp17 * tmp19 tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_0[grid(256)]( primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class ScaleNormNew(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.scale = dim ** -0.5 self.eps = eps self.g = nn.Parameter(torch.ones(1)) def forward(self, input_0): primals_2 = self.g primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
ncoop57/x-transformers
ScaleNorm
false
10,609
[ "MIT" ]
0
b65f25384349abfc101001b42482b05745c861fa
https://github.com/ncoop57/x-transformers/tree/b65f25384349abfc101001b42482b05745c861fa
DuelingQNetwork
import torch import torch.nn as nn from collections import OrderedDict class DuelingQNetwork(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, hidden_advantage=[512, 512], hidden_state_value=[512, 512]): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed hidden_layers (list): List containing the hidden layer sizes """ super(DuelingQNetwork, self).__init__() self.seed = torch.manual_seed(seed) hidden_layers = [state_size] + hidden_advantage advantage_layers = OrderedDict() for idx, (hl_in, hl_out) in enumerate(zip(hidden_layers[:-1], hidden_layers[1:])): advantage_layers['adv_fc_' + str(idx)] = nn.Linear(hl_in, hl_out) advantage_layers['adv_activation_' + str(idx)] = nn.ReLU() advantage_layers['adv_output'] = nn.Linear(hidden_layers[-1], action_size) self.network_advantage = nn.Sequential(advantage_layers) value_layers = OrderedDict() hidden_layers = [state_size] + hidden_state_value for idx, (hl_in, hl_out) in enumerate(zip(hidden_layers[:-1], hidden_layers[1:])): value_layers['val_fc_' + str(idx)] = nn.Linear(hl_in, hl_out) value_layers['val_activation_' + str(idx)] = nn.ReLU() value_layers['val_output'] = nn.Linear(hidden_layers[-1], 1) self.network_value = nn.Sequential(value_layers) def forward(self, state): """Build a network that maps state -> action values.""" advantage = self.network_advantage(state) value = self.network_value(state) return advantage.sub_(advantage.mean()).add_(value) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_per_fused_add_mean_view_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex r4 = rindex // 4 tmp0 = tl.load(in_ptr0 + r0, None) tmp7 = tl.load(in_ptr1 + r4, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 0) tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp6 = tmp0 - tmp5 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tl.store(in_out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp11, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (512, 512), (512, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (4, 512), (512, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (512, 4), (4, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (512, 512), (512, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (1, 512), (512, 1)) assert_size_stride(primals_13, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf0 buf16 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf1, primals_2, buf16, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 512), (1, 512), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf2 buf15 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf3, primals_5, buf15, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 512), (1, 4), 0), out=buf5) del primals_8 buf6 = reinterpret_tensor(buf5, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf5 buf14 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf6, primals_9, buf14, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf7 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (64, 512), (512, 1), 0), reinterpret_tensor(primals_10, (512, 512), (1, 512), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf7 buf13 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf8, primals_11, buf13, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (64, 512), (512, 1), 0), reinterpret_tensor(primals_12, (512, 1), (1, 512), 0), out=buf9) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = buf11 del buf11 triton_per_fused_add_mean_view_1[grid(1)](buf12, buf4, buf9, primals_13, 1, 256, num_warps=2, num_stages=1) del buf4 del buf9 del primals_13 return buf12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 512), (512, 1), 0 ), reinterpret_tensor(buf3, (64, 512), (512, 1), 0 ), reinterpret_tensor(buf6, (64, 512), (512, 1), 0 ), reinterpret_tensor(buf8, (64, 512), (512, 1), 0 ), primals_12, buf13, primals_10, buf14, primals_6, buf15, primals_4, buf16 class DuelingQNetworkNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, hidden_advantage=[512, 512], hidden_state_value=[512, 512]): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed hidden_layers (list): List containing the hidden layer sizes """ super(DuelingQNetworkNew, self).__init__() self.seed = torch.manual_seed(seed) hidden_layers = [state_size] + hidden_advantage advantage_layers = OrderedDict() for idx, (hl_in, hl_out) in enumerate(zip(hidden_layers[:-1], hidden_layers[1:])): advantage_layers['adv_fc_' + str(idx)] = nn.Linear(hl_in, hl_out) advantage_layers['adv_activation_' + str(idx)] = nn.ReLU() advantage_layers['adv_output'] = nn.Linear(hidden_layers[-1], action_size) self.network_advantage = nn.Sequential(advantage_layers) value_layers = OrderedDict() hidden_layers = [state_size] + hidden_state_value for idx, (hl_in, hl_out) in enumerate(zip(hidden_layers[:-1], hidden_layers[1:])): value_layers['val_fc_' + str(idx)] = nn.Linear(hl_in, hl_out) value_layers['val_activation_' + str(idx)] = nn.ReLU() value_layers['val_output'] = nn.Linear(hidden_layers[-1], 1) self.network_value = nn.Sequential(value_layers) def forward(self, input_0): primals_1 = self.network_advantage.adv_fc_0.weight primals_2 = self.network_advantage.adv_fc_0.bias primals_4 = self.network_advantage.adv_fc_1.weight primals_5 = self.network_advantage.adv_fc_1.bias primals_6 = self.network_advantage.adv_output.weight primals_7 = self.network_advantage.adv_output.bias primals_8 = self.network_value.val_fc_0.weight primals_9 = self.network_value.val_fc_0.bias primals_10 = self.network_value.val_fc_1.weight primals_11 = self.network_value.val_fc_1.bias primals_12 = self.network_value.val_output.weight primals_13 = self.network_value.val_output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
nullbyte91/udacity-drl-navigation
DuelingQNetwork
false
10,610
[ "MIT" ]
0
d981ab906fd3dfc9939d639b2083d004cde0b961
https://github.com/nullbyte91/udacity-drl-navigation/tree/d981ab906fd3dfc9939d639b2083d004cde0b961
L2
import torch import torch.nn as nn class L2(nn.Module): def __init__(self): nn.Module.__init__(self) def forward(self, s, t): out = (s - t) ** 2 return (out.view(out.size(0), -1).sum(dim=1) + 1e-14) ** 0.5 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = 1e-14 tmp9 = tmp7 + tmp8 tmp10 = libdevice.sqrt(tmp9) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_pow_sum_0[grid(4)](buf1, arg0_1, arg1_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L2New(nn.Module): def __init__(self): nn.Module.__init__(self) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mrernst/rl_robotics_research
L2
false
10,611
[ "MIT" ]
0
0bc446cfb69591cb4ee3ce8d39815c463090a5f6
https://github.com/mrernst/rl_robotics_research/tree/0bc446cfb69591cb4ee3ce8d39815c463090a5f6
DotProd
import torch import numpy as np import torch.nn as nn class DotProd(nn.Module): def __init__(self): nn.Module.__init__(self) def forward(self, s, t): if isinstance(s, np.ndarray): s = torch.from_numpy(s).float() if isinstance(t, np.ndarray): t = torch.from_numpy(t).float() out = (s * t[:, None, :]).sum(dim=2)[:, 0] return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return reinterpret_tensor(buf0, (4, 4, 4), (64, 4, 1), 0), class DotProdNew(nn.Module): def __init__(self): nn.Module.__init__(self) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mrernst/rl_robotics_research
DotProd
false
10,612
[ "MIT" ]
0
0bc446cfb69591cb4ee3ce8d39815c463090a5f6
https://github.com/mrernst/rl_robotics_research/tree/0bc446cfb69591cb4ee3ce8d39815c463090a5f6
L1
import torch import numpy as np import torch.nn as nn class L1(nn.Module): def __init__(self): nn.Module.__init__(self) def forward(self, s, t): if isinstance(s, np.ndarray): s = torch.from_numpy(s).float() if isinstance(t, np.ndarray): t = torch.from_numpy(t).float() out = torch.abs(s - t) return out.view(out.size(0), -1).sum(dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_sum_0[grid(4)](arg0_1, arg1_1, buf0, 4, 64, XBLOCK =1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class L1New(nn.Module): def __init__(self): nn.Module.__init__(self) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mrernst/rl_robotics_research
L1
false
10,613
[ "MIT" ]
0
0bc446cfb69591cb4ee3ce8d39815c463090a5f6
https://github.com/mrernst/rl_robotics_research/tree/0bc446cfb69591cb4ee3ce8d39815c463090a5f6
SingleDeconv3DBlock
import torch from torch import nn import torch._utils class SingleDeconv3DBlock(nn.Module): def __init__(self, in_planes, out_planes): super().__init__() self.block = nn.ConvTranspose3d(in_planes, out_planes, kernel_size= 2, stride=2, padding=0, output_padding=0) def forward(self, x): return self.block(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'out_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2, 2), (32, 8, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=True, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(2048)](buf1, primals_2, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 8, 8, 8), (512, 64, 8, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) class SingleDeconv3DBlockNew(nn.Module): def __init__(self, in_planes, out_planes): super().__init__() self.block = nn.ConvTranspose3d(in_planes, out_planes, kernel_size= 2, stride=2, padding=0, output_padding=0) def forward(self, input_0): primals_1 = self.block.weight primals_2 = self.block.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ilcessadecalcular/segmentation
SingleDeconv3DBlock
false
10,614
[ "MIT" ]
0
24ba499a399efdba212ec5e2235b72ed8270cc24
https://github.com/ilcessadecalcular/segmentation/tree/24ba499a399efdba212ec5e2235b72ed8270cc24
QNetwork
import torch import torch.nn.functional as F import torch.nn as nn class QNetwork(nn.Module): def __init__(self, state_size, action_size, hidden_layer1=64, hidden_layer2=64): super(QNetwork, self).__init__() self.fc1 = nn.Linear(state_size, hidden_layer1) self.fc2 = nn.Linear(hidden_layer1, hidden_layer2) self.fc3 = nn.Linear(hidden_layer2, action_size) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return F.relu(self.fc3(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1, primals_2, buf8, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3, primals_5, buf7, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf5, primals_7, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor( buf3, (64, 64), (64, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class QNetworkNew(nn.Module): def __init__(self, state_size, action_size, hidden_layer1=64, hidden_layer2=64): super(QNetworkNew, self).__init__() self.fc1 = nn.Linear(state_size, hidden_layer1) self.fc2 = nn.Linear(hidden_layer1, hidden_layer2) self.fc3 = nn.Linear(hidden_layer2, action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
pardi/DRL_navigation
QNetwork
false
10,615
[ "Apache-2.0" ]
0
4b66edf696c34a53686c02ff91264f5d6b32dc02
https://github.com/pardi/DRL_navigation/tree/4b66edf696c34a53686c02ff91264f5d6b32dc02
convBlock
import torch import torch.nn as nn class convBlock(nn.Module): """ A convolutional block including conv, BN, nonliear activiation, residual connection """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True, batchnorm=False, residual=False, nonlinear=nn .LeakyReLU(0.2)): """ :param in_channels: :param out_channels: :param kernel_size: :param stride: :param padding: :param bias: :param batchnorm: :param residual: :param nonlinear: """ super(convBlock, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) self.bn = nn.BatchNorm3d(out_channels) if batchnorm else None self.nonlinear = nonlinear self.residual = residual def forward(self, x): x = self.conv(x) if self.bn: x = self.bn(x) if self.nonlinear: x = self.nonlinear(x) if self.residual: x += x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf2, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf1 class convBlockNew(nn.Module): """ A convolutional block including conv, BN, nonliear activiation, residual connection """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True, batchnorm=False, residual=False, nonlinear=nn .LeakyReLU(0.2)): """ :param in_channels: :param out_channels: :param kernel_size: :param stride: :param padding: :param bias: :param batchnorm: :param residual: :param nonlinear: """ super(convBlockNew, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) self.bn = nn.BatchNorm3d(out_channels) if batchnorm else None self.nonlinear = nonlinear self.residual = residual def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
norveclibalikci/easyreg-mirror
convBlock
false
10,616
[ "Apache-2.0" ]
0
a16254733fe957cc4024923f8dce91412966a189
https://github.com/norveclibalikci/easyreg-mirror/tree/a16254733fe957cc4024923f8dce91412966a189
NCCLoss
import torch import torch.nn as nn class NCCLoss(nn.Module): """ A implementation of the normalized cross correlation (NCC) """ def forward(self, input, target): input = input.view(input.shape[0], -1) target = target.view(target.shape[0], -1) input_minus_mean = input - torch.mean(input, 1).view(input.shape[0], 1) target_minus_mean = target - torch.mean(target, 1).view(input.shape [0], 1) nccSqr = (input_minus_mean * target_minus_mean).mean(1) / torch.sqrt( (input_minus_mean ** 2).mean(1) * (target_minus_mean ** 2).mean(1)) nccSqr = nccSqr.mean() return (1 - nccSqr) * input.shape[0] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_pow_sub_0(in_ptr0, in_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp5 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 64.0 tmp11 = tmp4 / tmp10 tmp12 = tmp0 - tmp11 tmp13 = tmp9 / tmp10 tmp14 = tmp5 - tmp13 tmp15 = tmp12 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = tmp12 * tmp12 tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK]) tmp23 = tl.where(xmask, tmp21, 0) tmp24 = tl.sum(tmp23, 1)[:, None] tmp25 = tmp14 * tmp14 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tl.store(out_ptr2 + x0, tmp19, xmask) tl.store(out_ptr3 + x0, tmp24, xmask) tl.store(out_ptr4 + x0, tmp29, xmask) @triton.jit def triton_per_fused_div_mean_mul_pow_rsub_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp5 = tl.load(in_ptr2 + r0, None) tmp1 = 64.0 tmp2 = tmp0 / tmp1 tmp4 = tmp3 / tmp1 tmp6 = tmp5 / tmp1 tmp7 = tmp4 * tmp6 tmp8 = libdevice.sqrt(tmp7) tmp9 = tmp2 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1.0 tmp16 = tmp15 - tmp14 tmp17 = tmp16 * tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) buf4 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mean_mul_pow_sub_0[grid(4)](arg0_1, arg1_1, buf2, buf3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 triton_per_fused_div_mean_mul_pow_rsub_sqrt_sub_1[grid(1)](buf6, buf2, buf3, buf4, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf3 del buf4 return buf6, class NCCLossNew(nn.Module): """ A implementation of the normalized cross correlation (NCC) """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
norveclibalikci/easyreg-mirror
NCCLoss
false
10,617
[ "Apache-2.0" ]
0
a16254733fe957cc4024923f8dce91412966a189
https://github.com/norveclibalikci/easyreg-mirror/tree/a16254733fe957cc4024923f8dce91412966a189
LocalResponseNormLayer
import torch import torch.nn as nn import torch.nn.functional as F class LocalResponseNormLayer(nn.Module): def forward(self, tensor, size=5, alpha=9.999999747378752e-05, beta= 0.75, k=1.0): return F.local_response_norm(tensor, size=size, alpha=alpha, beta= beta, k=k) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex tmp48 = tl.load(in_ptr0 + x3, xmask) tmp0 = -2 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-32 + x3), tmp5 & xmask, other=0.0) tmp7 = tmp6 * tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = -1 + x1 tmp11 = tmp10 >= tmp1 tmp12 = tmp10 < tmp3 tmp13 = tmp11 & tmp12 tmp14 = tl.load(in_ptr0 + (-16 + x3), tmp13 & xmask, other=0.0) tmp15 = tmp14 * tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp13, tmp15, tmp16) tmp18 = tmp17 + tmp9 tmp19 = x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tl.load(in_ptr0 + x3, tmp22 & xmask, other=0.0) tmp24 = tmp23 * tmp23 tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp22, tmp24, tmp25) tmp27 = tmp26 + tmp18 tmp28 = 1 + x1 tmp29 = tmp28 >= tmp1 tmp30 = tmp28 < tmp3 tmp31 = tmp29 & tmp30 tmp32 = tl.load(in_ptr0 + (16 + x3), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp35 + tmp27 tmp37 = 2 + x1 tmp38 = tmp37 >= tmp1 tmp39 = tmp37 < tmp3 tmp40 = tmp38 & tmp39 tmp41 = tl.load(in_ptr0 + (32 + x3), tmp40 & xmask, other=0.0) tmp42 = tmp41 * tmp41 tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp40, tmp42, tmp43) tmp45 = tmp44 + tmp36 tmp46 = 0.2 tmp47 = tmp45 * tmp46 tmp49 = 9.999999747378752e-05 tmp50 = tmp47 * tmp49 tmp51 = 1.0 tmp52 = tmp50 + tmp51 tmp53 = 0.75 tmp54 = libdevice.pow(tmp52, tmp53) tmp55 = tmp48 / tmp54 tl.store(in_out_ptr0 + x3, tmp55, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0[grid(256) ](buf1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf1, class LocalResponseNormLayerNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ndey96/lucent
LocalResponseNormLayer
false
10,618
[ "Apache-2.0" ]
0
d868d8ca52520bd245c1e5fcf3b026782f77e561
https://github.com/ndey96/lucent/tree/d868d8ca52520bd245c1e5fcf3b026782f77e561
JointsCELoss
import torch import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn as nn class JointsCELoss(nn.Module): def __init__(self): super(JointsCELoss, self).__init__() self.criterion = nn.MSELoss(reduction='mean') def forward(self, output, target): batch_size = output.size(0) num_joints = output.size(1) loss = 0 for idx in range(num_joints): class_gt = target[:, idx].view(batch_size) class_pred = output[:, idx] loss += self.criterion(class_pred, class_gt) return loss / num_joints def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp23 = tmp21 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 4.0 tmp29 = tmp6 / tmp28 tmp30 = 0.0 tmp31 = tmp29 + tmp30 tmp32 = tmp13 / tmp28 tmp33 = tmp31 + tmp32 tmp34 = tmp20 / tmp28 tmp35 = tmp33 + tmp34 tmp36 = tmp27 / tmp28 tmp37 = tmp35 + tmp36 tmp38 = 0.25 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mse_loss_0[grid(1)](buf4, arg0_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, class JointsCELossNew(nn.Module): def __init__(self): super(JointsCELossNew, self).__init__() self.criterion = nn.MSELoss(reduction='mean') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
nuguziii/deep-high-resolution-net.pytorch
JointsCELoss
false
10,619
[ "MIT" ]
0
3c053e97201fbeb35ff48cbc567ffb37b5e0b436
https://github.com/nuguziii/deep-high-resolution-net.pytorch/tree/3c053e97201fbeb35ff48cbc567ffb37b5e0b436
JointsDistLoss
import torch import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn as nn class JointsDistLoss(nn.Module): def __init__(self): super(JointsDistLoss, self).__init__() self.criterion = nn.MSELoss(reduction='mean') def forward(self, output, target): batch = output.size(0) output.size(1) output = output.reshape(batch, 32, 2) target = target.reshape(batch, 32, 2) loss = self.criterion(output[:, :, 0], target[:, :, 0] ) + 0.3 * self.criterion(output[:, :, 1], target[:, :, 1]) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 2 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 2 * r0, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 2 * r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 2 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 128.0 tmp15 = tmp6 / tmp14 tmp16 = tmp13 / tmp14 tmp17 = 0.3 tmp18 = tmp16 * tmp17 tmp19 = tmp15 + tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mse_loss_mul_0[grid(1)](buf2, arg0_1, arg1_1, 1, 128, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class JointsDistLossNew(nn.Module): def __init__(self): super(JointsDistLossNew, self).__init__() self.criterion = nn.MSELoss(reduction='mean') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
nuguziii/deep-high-resolution-net.pytorch
JointsDistLoss
false
10,620
[ "MIT" ]
0
3c053e97201fbeb35ff48cbc567ffb37b5e0b436
https://github.com/nuguziii/deep-high-resolution-net.pytorch/tree/3c053e97201fbeb35ff48cbc567ffb37b5e0b436
Bottleneck
import torch from torch import nn from collections import OrderedDict class Bottleneck(nn.Module): def __init__(self, in_channels, out_channels): super(Bottleneck, self).__init__() m = OrderedDict() m['conv1'] = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) m['relu1'] = nn.ReLU(True) m['conv2'] = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=2, bias=False, dilation=2) m['relu2'] = nn.ReLU(True) m['conv3'] = nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False) self.group1 = nn.Sequential(m) self.relu = nn.Sequential(nn.ReLU(True)) def forward(self, x): out = self.group1(x) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=256, num_warps =4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(256)](buf3, 256, XBLOCK=256, num_warps =4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) return buf4, primals_1, primals_2, primals_3, primals_4, buf1, buf3 class BottleneckNew(nn.Module): def __init__(self, in_channels, out_channels): super(BottleneckNew, self).__init__() m = OrderedDict() m['conv1'] = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) m['relu1'] = nn.ReLU(True) m['conv2'] = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=2, bias=False, dilation=2) m['relu2'] = nn.ReLU(True) m['conv3'] = nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False) self.group1 = nn.Sequential(m) self.relu = nn.Sequential(nn.ReLU(True)) def forward(self, input_0): primals_1 = self.group1.conv1.weight primals_3 = self.group1.conv2.weight primals_4 = self.group1.conv3.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
nivedk/SPANet
Bottleneck
false
10,621
[ "BSD-3-Clause" ]
0
1bd84ae67732f9885af65dcbd286075008d46e91
https://github.com/nivedk/SPANet/tree/1bd84ae67732f9885af65dcbd286075008d46e91
Attention
import torch from torch import nn class Attention(nn.Module): def __init__(self, in_channels): super(Attention, self).__init__() self.out_channels = int(in_channels / 2) self.conv1 = nn.Conv2d(in_channels, self.out_channels, kernel_size= 3, padding=1, stride=1) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, padding=1, stride=1) self.relu2 = nn.ReLU() self.conv3 = nn.Conv2d(self.out_channels, 4, kernel_size=1, padding =0, stride=1) self.sigmod = nn.Sigmoid() def forward(self, x): out = self.conv1(x) out = self.relu1(out) out = self.conv2(out) out = self.relu2(out) out = self.conv3(out) out = self.sigmod(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 2 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (2, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (4, 2, 1, 1), (2, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(128)](buf1, primals_2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 4, 4), (32, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(128)](buf3, primals_5, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_sigmoid_1[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5 class AttentionNew(nn.Module): def __init__(self, in_channels): super(AttentionNew, self).__init__() self.out_channels = int(in_channels / 2) self.conv1 = nn.Conv2d(in_channels, self.out_channels, kernel_size= 3, padding=1, stride=1) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, padding=1, stride=1) self.relu2 = nn.ReLU() self.conv3 = nn.Conv2d(self.out_channels, 4, kernel_size=1, padding =0, stride=1) self.sigmod = nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
nivedk/SPANet
Attention
false
10,623
[ "BSD-3-Clause" ]
0
1bd84ae67732f9885af65dcbd286075008d46e91
https://github.com/nivedk/SPANet/tree/1bd84ae67732f9885af65dcbd286075008d46e91
Sparsemax
from torch.autograd import Function import torch import torch.nn as nn def _make_ix_like(X, dim): d = X.size(dim) rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype) view = [1] * X.dim() view[0] = -1 return rho.view(view).transpose(0, dim) def _roll_last(X, dim): if dim == -1: return X elif dim < 0: dim = X.dim() - dim perm = [i for i in range(X.dim()) if i != dim] + [dim] return X.permute(perm) def _sparsemax_threshold_and_support(X, dim=-1, k=None): """Core computation for sparsemax: optimal threshold and support size. Parameters ---------- X : torch.Tensor The input tensor to compute thresholds over. dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- tau : torch.Tensor like `X`, with all but the `dim` dimension intact the threshold value for each vector support_size : torch LongTensor, shape like `tau` the number of nonzeros in each vector. """ if k is None or k >= X.shape[dim]: topk, _ = torch.sort(X, dim=dim, descending=True) else: topk, _ = torch.topk(X, k=k, dim=dim) topk_cumsum = topk.cumsum(dim) - 1 rhos = _make_ix_like(topk, dim) support = rhos * topk > topk_cumsum support_size = support.sum(dim=dim).unsqueeze(dim) tau = topk_cumsum.gather(dim, support_size - 1) tau /= support_size if k is not None and k < X.shape[dim]: unsolved = (support_size == k).squeeze(dim) if torch.any(unsolved): in_ = _roll_last(X, dim)[unsolved] tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k) _roll_last(tau, dim)[unsolved] = tau_ _roll_last(support_size, dim)[unsolved] = ss_ return tau, support_size def sparsemax(X, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- X : torch.Tensor The input tensor. dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return SparsemaxFunction.apply(X, dim, k) class SparsemaxFunction(Function): @classmethod def forward(cls, ctx, X, dim=-1, k=None): ctx.dim = dim max_val, _ = X.max(dim=dim, keepdim=True) X = X - max_val tau, supp_size = _sparsemax_threshold_and_support(X, dim=dim, k=k) output = torch.clamp(X - tau, min=0) ctx.save_for_backward(supp_size, output) return output @classmethod def backward(cls, ctx, grad_output): supp_size, output = ctx.saved_tensors dim = ctx.dim grad_input = grad_output.clone() grad_input[output == 0] = 0 v_hat = grad_input.sum(dim=dim) / supp_size.squeeze() v_hat = v_hat.unsqueeze(dim) grad_input = torch.where(output != 0, grad_input - v_hat, grad_input) return grad_input, None, None class Sparsemax(nn.Module): def __init__(self, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. """ self.dim = dim self.k = k super(Sparsemax, self).__init__() def forward(self, X): return sparsemax(X, dim=self.dim, k=self.k) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.autograd import Function import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_cumsum_max_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = r1 tmp10 = tmp9.to(tl.int16) tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13, _tmp14 = triton_helpers.sort_with_index(tmp11, tmp12, None, 1, stable=False, descending=True) tmp15 = tmp13.to(tl.float32) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp17, = tl.associative_scan((tmp16,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + 4 * x0), tmp8, xmask) tl.store(out_ptr1 + (r1 + 4 * x0), tmp13, xmask) tl.store(out_ptr2 + (r1 + 4 * x0), tmp17, xmask) @triton.jit def triton_poi_fused_gt_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp1 * tmp0 tmp4 = tmp3 - tmp1 tmp5 = tmp2 > tmp4 tmp6 = tmp5.to(tl.int64) tmp8 = 2.0 tmp9 = tmp8 * tmp7 tmp11 = tmp10 - tmp1 tmp12 = tmp9 > tmp11 tmp13 = tmp12.to(tl.int64) tmp14 = tmp6 + tmp13 tmp16 = 3.0 tmp17 = tmp16 * tmp15 tmp19 = tmp18 - tmp1 tmp20 = tmp17 > tmp19 tmp21 = tmp20.to(tl.int64) tmp22 = tmp14 + tmp21 tmp24 = 4.0 tmp25 = tmp24 * tmp23 tmp27 = tmp26 - tmp1 tmp28 = tmp25 > tmp27 tmp29 = tmp28.to(tl.int64) tmp30 = tmp22 + tmp29 tl.store(out_ptr0 + x0, tmp30, xmask) @triton.jit def triton_poi_fused_clamp_div_gather_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.full([1], 1, tl.int64) tmp3 = tmp1 - tmp2 tmp4 = tl.full([XBLOCK], 4, tl.int32) tmp5 = tmp3 + tmp4 tmp6 = tmp3 < 0 tmp7 = tl.where(tmp6, tmp5, tmp3) tl.device_assert((0 <= tmp7) & (tmp7 < 4) | ~xmask, 'index out of bounds: 0 <= tmp7 < 4') tmp9 = tl.load(in_ptr2 + (tmp7 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp10 = 1.0 tmp11 = tmp9 - tmp10 tmp12 = tmp1.to(tl.float32) tmp13 = tmp11 / tmp12 tmp14 = tmp0 - tmp13 tmp15 = 0.0 tmp16 = triton_helpers.maximum(tmp14, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cumsum_max_sort_sub_0[grid(64)](arg0_1, buf0, buf1, buf3, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) triton_poi_fused_gt_mul_sub_sum_1[grid(64)](buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf1 del buf1 triton_poi_fused_clamp_div_gather_sub_2[grid(256)](buf0, buf4, buf3, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf3 del buf4 return buf5, def _make_ix_like(X, dim): d = X.size(dim) rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype) view = [1] * X.dim() view[0] = -1 return rho.view(view).transpose(0, dim) def _roll_last(X, dim): if dim == -1: return X elif dim < 0: dim = X.dim() - dim perm = [i for i in range(X.dim()) if i != dim] + [dim] return X.permute(perm) def _sparsemax_threshold_and_support(X, dim=-1, k=None): """Core computation for sparsemax: optimal threshold and support size. Parameters ---------- X : torch.Tensor The input tensor to compute thresholds over. dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- tau : torch.Tensor like `X`, with all but the `dim` dimension intact the threshold value for each vector support_size : torch LongTensor, shape like `tau` the number of nonzeros in each vector. """ if k is None or k >= X.shape[dim]: topk, _ = torch.sort(X, dim=dim, descending=True) else: topk, _ = torch.topk(X, k=k, dim=dim) topk_cumsum = topk.cumsum(dim) - 1 rhos = _make_ix_like(topk, dim) support = rhos * topk > topk_cumsum support_size = support.sum(dim=dim).unsqueeze(dim) tau = topk_cumsum.gather(dim, support_size - 1) tau /= support_size if k is not None and k < X.shape[dim]: unsolved = (support_size == k).squeeze(dim) if torch.any(unsolved): in_ = _roll_last(X, dim)[unsolved] tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k) _roll_last(tau, dim)[unsolved] = tau_ _roll_last(support_size, dim)[unsolved] = ss_ return tau, support_size def sparsemax(X, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- X : torch.Tensor The input tensor. dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return SparsemaxFunction.apply(X, dim, k) class SparsemaxFunction(Function): @classmethod def forward(cls, ctx, X, dim=-1, k=None): ctx.dim = dim max_val, _ = X.max(dim=dim, keepdim=True) X = X - max_val tau, supp_size = _sparsemax_threshold_and_support(X, dim=dim, k=k) output = torch.clamp(X - tau, min=0) ctx.save_for_backward(supp_size, output) return output @classmethod def backward(cls, ctx, grad_output): supp_size, output = ctx.saved_tensors dim = ctx.dim grad_input = grad_output.clone() grad_input[output == 0] = 0 v_hat = grad_input.sum(dim=dim) / supp_size.squeeze() v_hat = v_hat.unsqueeze(dim) grad_input = torch.where(output != 0, grad_input - v_hat, grad_input) return grad_input, None, None class SparsemaxNew(nn.Module): def __init__(self, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. """ self.dim = dim self.k = k super(SparsemaxNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mtreviso/entmax
Sparsemax
false
10,624
[ "MIT" ]
0
5b029d07fe00d7aacc77c8e684a5796d29287575
https://github.com/mtreviso/entmax/tree/5b029d07fe00d7aacc77c8e684a5796d29287575
Standardize
from torch.nn import Module import torch import torch.utils.data from torch.nn import init from torch.nn.parameter import Parameter class Standardize(Module): """ Applies (element-wise) standardization with trainable translation parameter μ and scale parameter σ, i.e. computes (x - μ) / σ where '/' is applied element-wise. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn a translation parameter μ. Default: ``True`` Attributes: mu: the learnable translation parameter μ. std: the learnable scale parameter σ. """ __constants__ = ['mu'] def __init__(self, in_features, bias=True, eps=1e-06): super(Standardize, self).__init__() self.in_features = in_features self.out_features = in_features self.eps = eps self.std = Parameter(torch.Tensor(in_features)) if bias: self.mu = Parameter(torch.Tensor(in_features)) else: self.register_parameter('mu', None) self.reset_parameters() def reset_parameters(self): init.constant_(self.std, 1) if self.mu is not None: init.constant_(self.mu, 0) def forward(self, x): if self.mu is not None: x -= self.mu x = torch.div(x, self.std + self.eps) return x def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.mu is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.utils.data from torch.nn import init from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1e-06 tmp5 = tmp3 + tmp4 tmp6 = tmp2 / tmp5 tl.store(out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr1 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, buf1, primals_3, buf0 class StandardizeNew(Module): """ Applies (element-wise) standardization with trainable translation parameter μ and scale parameter σ, i.e. computes (x - μ) / σ where '/' is applied element-wise. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn a translation parameter μ. Default: ``True`` Attributes: mu: the learnable translation parameter μ. std: the learnable scale parameter σ. """ __constants__ = ['mu'] def __init__(self, in_features, bias=True, eps=1e-06): super(StandardizeNew, self).__init__() self.in_features = in_features self.out_features = in_features self.eps = eps self.std = Parameter(torch.Tensor(in_features)) if bias: self.mu = Parameter(torch.Tensor(in_features)) else: self.register_parameter('mu', None) self.reset_parameters() def reset_parameters(self): init.constant_(self.std, 1) if self.mu is not None: init.constant_(self.mu, 0) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.mu is not None) def forward(self, input_0): primals_1 = self.std primals_3 = self.mu primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
kevinwss/Deep-SAD-Baseline
Standardize
false
10,625
[ "MIT" ]
0
b704725cc44ab5e7aa9bb09503a4c5f244fa907b
https://github.com/kevinwss/Deep-SAD-Baseline/tree/b704725cc44ab5e7aa9bb09503a4c5f244fa907b
ResizeConv2d
import torch from torch import nn import torch.nn.functional as F class ResizeConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, scale_factor, mode='nearest'): super().__init__() self.scale_factor = scale_factor self.mode = mode self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=1) def forward(self, x): x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode) x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'scale_factor': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 9 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 3, 3), (36, 9, 3, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(144)](buf2, primals_3, 144, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class ResizeConv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, scale_factor, mode='nearest'): super().__init__() self.scale_factor = scale_factor self.mode = mode self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=1) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
neuronphysics/FEAIML
ResizeConv2d
false
10,626
[ "MIT" ]
0
a31ae0d9f526f489fca1ca4b01dd8f06115de450
https://github.com/neuronphysics/FEAIML/tree/a31ae0d9f526f489fca1ca4b01dd8f06115de450
CrossLayer
import torch import torch.nn as nn import torch.optim class CrossLayer(nn.Module): def __init__(self, d, dropout): super().__init__() self.linear = nn.Linear(d, d) self.dropout = nn.Dropout(dropout) def forward(self, x0, x): return self.dropout(x0 * self.linear(x)) + x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2, xmask) tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](buf1, primals_4, primals_2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class CrossLayerNew(nn.Module): def __init__(self, d, dropout): super().__init__() self.linear = nn.Linear(d, d) self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
piers-hinds/rtdl
CrossLayer
false
10,627
[ "Apache-2.0" ]
0
66cf9b90d2269395152dabf32653bdd599ddb12e
https://github.com/piers-hinds/rtdl/tree/66cf9b90d2269395152dabf32653bdd599ddb12e
LayerShift
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class LayerShift(nn.Module): def __init__(self, init=1.0): super().__init__() self.bias = torch.nn.Parameter(torch.zeros(1)) def forward(self, x): return x - self.bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 - tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LayerShiftNew(nn.Module): def __init__(self, init=1.0): super().__init__() self.bias = torch.nn.Parameter(torch.zeros(1)) def forward(self, input_0): primals_1 = self.bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ptillet/Fixup
LayerShift
false
10,628
[ "BSD-3-Clause" ]
0
c36dbe7f2cce71c4308afc43ab6e8551e567be30
https://github.com/ptillet/Fixup/tree/c36dbe7f2cce71c4308afc43ab6e8551e567be30
Decoder
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F class Decoder(nn.Module): """ VAE decoder """ def __init__(self, img_channels, latent_size): super(Decoder, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.fc1 = nn.Linear(latent_size, 6144) self.deconv1 = nn.ConvTranspose2d(128, 128, [1, 8], stride=[1, 2]) self.deconv2 = nn.ConvTranspose2d(128, 64, [1, 16], stride=[1, 2]) self.deconv3 = nn.ConvTranspose2d(64, 32, [1, 16], stride=[1, 2]) self.deconv4 = nn.ConvTranspose2d(32, img_channels, [1, 22], stride =[1, 2]) def forward(self, x): x = F.relu(self.fc1(x)) x = x.view(x.size(0), -1, 16, 3) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) reconstruction = torch.sigmoid(self.deconv4(x)) return reconstruction def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'img_channels': 4, 'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 8 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 8 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 22 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 22 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 88 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_view_4(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 512 xnumel = 48 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_out_ptr0 + (x2 + 48 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (x2 + 48 * y0), xmask & ymask, eviction_policy ='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (y0 + 128 * x2 + 6144 * y1), tmp4, xmask & ymask) tl.store(out_ptr1 + (x2 + 48 * y3), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_8(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 3200 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 12800 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + 3200 * y3), tmp3, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (6144, 4), (4, 1)) assert_size_stride(primals_2, (6144,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (128, 128, 1, 8), (1024, 8, 8, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 64, 1, 16), (1024, 16, 16, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 32, 1, 16), (512, 16, 16, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 4, 1, 22), (88, 22, 22, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((128, 128, 1, 8), (1024, 1, 1024, 128), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16384, 8)](primals_4, buf0, 16384, 8, XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((128, 64, 1, 16), (1024, 1, 1024, 64), torch.float32) triton_poi_fused_1[grid(8192, 16)](primals_6, buf1, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf2 = empty_strided_cuda((64, 32, 1, 16), (512, 1, 512, 32), torch .float32) triton_poi_fused_2[grid(2048, 16)](primals_8, buf2, 2048, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((32, 4, 1, 22), (88, 1, 88, 4), torch.float32 ) triton_poi_fused_3[grid(128, 22)](primals_10, buf3, 128, 22, XBLOCK =32, YBLOCK=32, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((4, 6144), (6144, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 6144 ), (1, 4), 0), out=buf4) del primals_1 buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 128, 16, 3), (6144, 1, 384, 128), torch.float32) buf15 = empty_strided_cuda((4, 6144), (6144, 1), torch.bool) triton_poi_fused_relu_threshold_backward_view_4[grid(512, 48)](buf5, primals_2, buf6, buf15, 512, 48, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf5 del primals_2 buf7 = extern_kernels.convolution(buf6, buf0, stride=(1, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 16, 12), (24576, 1, 1536, 128)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_5[grid(98304)](buf8, primals_5, 98304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf1, stride=(1, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 16, 38), (38912, 1, 2432, 64)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_6[grid(155648)](buf10, primals_7, 155648, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf2, stride=(1, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 32, 16, 90), (46080, 1, 2880, 32)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_7[grid(184320)](buf12, primals_9, 184320, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf13 = extern_kernels.convolution(buf12, buf3, stride=(1, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 4, 16, 200), (12800, 1, 800, 4)) buf14 = empty_strided_cuda((4, 4, 16, 200), (12800, 3200, 200, 1), torch.float32) triton_poi_fused_convolution_sigmoid_8[grid(16, 3200)](buf13, primals_11, buf14, 16, 3200, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) del buf13 del primals_11 return (buf14, primals_3, buf0, buf1, buf2, buf3, buf6, buf8, buf10, buf12, buf14, buf15) class DecoderNew(nn.Module): """ VAE decoder """ def __init__(self, img_channels, latent_size): super(DecoderNew, self).__init__() self.latent_size = latent_size self.img_channels = img_channels self.fc1 = nn.Linear(latent_size, 6144) self.deconv1 = nn.ConvTranspose2d(128, 128, [1, 8], stride=[1, 2]) self.deconv2 = nn.ConvTranspose2d(128, 64, [1, 16], stride=[1, 2]) self.deconv3 = nn.ConvTranspose2d(64, 32, [1, 16], stride=[1, 2]) self.deconv4 = nn.ConvTranspose2d(32, img_channels, [1, 22], stride =[1, 2]) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.deconv1.weight primals_5 = self.deconv1.bias primals_6 = self.deconv2.weight primals_7 = self.deconv2.bias primals_8 = self.deconv3.weight primals_9 = self.deconv3.bias primals_10 = self.deconv4.weight primals_11 = self.deconv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
parthjaggi/world-models
Decoder
false
10,629
[ "MIT" ]
0
534b3a3474761e83da6c251bce97bea527e7435f
https://github.com/parthjaggi/world-models/tree/534b3a3474761e83da6c251bce97bea527e7435f
NgramCombined
import torch import torch.nn as nn import torch.nn.functional as F import torch.cuda import torch.distributed class NgramCombined(nn.Module): def __init__(self, n_gram): super(NgramCombined, self).__init__() self.n_gram = n_gram def forward(self, x): out = x if self.n_gram > 1: for i_gram in range(1, self.n_gram): out = F.pad(x.transpose(-1, -2), [i_gram, 0], mode= 'constant', value=0).transpose(-1, -2)[:, :-i_gram, :] + x return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_gram': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex % 4 x2 = xindex y3 = yindex y1 = yindex // 4 tmp4 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp0 = -3 + y0 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-12 + x2 + 4 * y3), tmp2 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp5, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(16, 4)](arg0_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class NgramCombinedNew(nn.Module): def __init__(self, n_gram): super(NgramCombinedNew, self).__init__() self.n_gram = n_gram def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
phuongnm-bkhn/OpenNMT-py
NgramCombined
false
10,630
[ "MIT" ]
0
554a826139f1bfc55f4ea6a3e7491858c2afec4c
https://github.com/phuongnm-bkhn/OpenNMT-py/tree/554a826139f1bfc55f4ea6a3e7491858c2afec4c
SoftDiceLoss
import torch import torch.nn as nn class SoftDiceLoss(nn.Module): """ Soft Dice Loss """ def __init__(self, weight=None, size_average=True): super(SoftDiceLoss, self).__init__() def forward(self, logits, targets): smooth = 1.0 logits = torch.sigmoid(logits) iflat = logits.view(-1) tflat = targets.view(-1) intersection = (iflat * tflat).sum() return 1 - (2.0 * intersection + smooth) / (iflat.sum() + tflat.sum () + smooth) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = tl.broadcast_to(tmp1, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = tl.broadcast_to(tmp2, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 2.0 tmp14 = tmp6 * tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = tmp9 + tmp12 tmp18 = tmp17 + tmp15 tmp19 = tmp16 / tmp18 tmp20 = tmp15 - tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class SoftDiceLossNew(nn.Module): """ Soft Dice Loss """ def __init__(self, weight=None, size_average=True): super(SoftDiceLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
prateekstark/unet.pytorch
SoftDiceLoss
false
10,631
[ "MIT" ]
0
b6ef6302f35ca93c6c818215c915e05b7f3055dc
https://github.com/prateekstark/unet.pytorch/tree/b6ef6302f35ca93c6c818215c915e05b7f3055dc
HSwish
import torch import torch.utils.data from torch import nn class HSwish(nn.Module): """Hard Swish activation function. See: https://arxiv.org/abs/1905.02244 """ def forward(self, x): return x * nn.functional.relu6(x + 3).div_(6) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tmp9 = tmp0 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HSwishNew(nn.Module): """Hard Swish activation function. See: https://arxiv.org/abs/1905.02244 """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
prabhum456/determined
HSwish
false
10,632
[ "Apache-2.0" ]
0
7e8017df0f62d80d21f5483578e2d5abd0e30935
https://github.com/prabhum456/determined/tree/7e8017df0f62d80d21f5483578e2d5abd0e30935
RewardEstimator
import math import torch import torch.nn as nn import torch.nn.functional as F def reset_parameters_util_x(model): for module in model.modules(): if isinstance(module, nn.Linear): nn.init.xavier_normal_(module.weight.data, 1) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.GRUCell): for mm in module.parameters(): if mm.data.ndimension() == 2: nn.init.xavier_normal_(mm.data, 1) elif mm.data.ndimension() == 1: mm.data.zero_() class RewardEstimator(nn.Module): """Estimates the reward the agent will receive. Value used as a baseline in REINFORCE loss""" def __init__(self, hid_dim): super(RewardEstimator, self).__init__() self.hid_dim = hid_dim self.v1 = nn.Linear(hid_dim, math.ceil(hid_dim / 2)) self.v2 = nn.Linear(math.ceil(hid_dim / 2), 1) self.reset_parameters() def reset_parameters(self): reset_parameters_util_x(self) def forward(self, x): x = x.detach() x = F.relu(self.v1(x)) x = self.v2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hid_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (1, 2), (2, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1, primals_3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), ( 2, 1), 0), reinterpret_tensor(primals_4, (2, 1), (1, 2), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 2), (2, 1), 0), primals_4, buf4 def reset_parameters_util_x(model): for module in model.modules(): if isinstance(module, nn.Linear): nn.init.xavier_normal_(module.weight.data, 1) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.GRUCell): for mm in module.parameters(): if mm.data.ndimension() == 2: nn.init.xavier_normal_(mm.data, 1) elif mm.data.ndimension() == 1: mm.data.zero_() class RewardEstimatorNew(nn.Module): """Estimates the reward the agent will receive. Value used as a baseline in REINFORCE loss""" def __init__(self, hid_dim): super(RewardEstimatorNew, self).__init__() self.hid_dim = hid_dim self.v1 = nn.Linear(hid_dim, math.ceil(hid_dim / 2)) self.v2 = nn.Linear(math.ceil(hid_dim / 2), 1) self.reset_parameters() def reset_parameters(self): reset_parameters_util_x(self) def forward(self, input_0): primals_2 = self.v1.weight primals_3 = self.v1.bias primals_4 = self.v2.weight primals_5 = self.v2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
olipinski/MultimodalGame
RewardEstimator
false
10,633
[ "BSD-3-Clause" ]
0
cfacc66baebfadb6ed6a8b44b3dd71a298285d68
https://github.com/olipinski/MultimodalGame/tree/cfacc66baebfadb6ed6a8b44b3dd71a298285d68
TextProcessor
import torch import torch.nn as nn import torch.nn.functional as F def reset_parameters_util_x(model): for module in model.modules(): if isinstance(module, nn.Linear): nn.init.xavier_normal_(module.weight.data, 1) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.GRUCell): for mm in module.parameters(): if mm.data.ndimension() == 2: nn.init.xavier_normal_(mm.data, 1) elif mm.data.ndimension() == 1: mm.data.zero_() class TextProcessor(nn.Module): """Processes sentence representations to the correct hidden dimension""" def __init__(self, desc_dim, hid_dim): super(TextProcessor, self).__init__() self.desc_dim = desc_dim self.hid_dim = hid_dim self.transform = nn.Linear(desc_dim, hid_dim) self.reset_parameters() def reset_parameters(self): reset_parameters_util_x(self) def forward(self, desc): bs, num_classes, desc_dim = desc.size() desc = desc.view(-1, desc_dim) out = self.transform(desc) out = out.view(bs, num_classes, -1) return F.relu(out) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'desc_dim': 4, 'hid_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf2 def reset_parameters_util_x(model): for module in model.modules(): if isinstance(module, nn.Linear): nn.init.xavier_normal_(module.weight.data, 1) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.GRUCell): for mm in module.parameters(): if mm.data.ndimension() == 2: nn.init.xavier_normal_(mm.data, 1) elif mm.data.ndimension() == 1: mm.data.zero_() class TextProcessorNew(nn.Module): """Processes sentence representations to the correct hidden dimension""" def __init__(self, desc_dim, hid_dim): super(TextProcessorNew, self).__init__() self.desc_dim = desc_dim self.hid_dim = hid_dim self.transform = nn.Linear(desc_dim, hid_dim) self.reset_parameters() def reset_parameters(self): reset_parameters_util_x(self) def forward(self, input_0): primals_2 = self.transform.weight primals_3 = self.transform.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
olipinski/MultimodalGame
TextProcessor
false
10,634
[ "BSD-3-Clause" ]
0
cfacc66baebfadb6ed6a8b44b3dd71a298285d68
https://github.com/olipinski/MultimodalGame/tree/cfacc66baebfadb6ed6a8b44b3dd71a298285d68
BinLinear
import torch from itertools import product as product import torch.nn.functional as F from torch import nn import torch.optim import torch.utils.data class BinQuant(torch.autograd.Function): """BinaryConnect quantization. Refer: https://pytorch.org/tutorials/beginner/examples_autograd/two_layer_net_custom_function.html https://discuss.pytorch.org/t/difference-between-apply-an-call-for-an-autograd-function/13845/3 """ @staticmethod def forward(ctx, w): """Require w be in range of [0, 1]. Otherwise, it is not in activate range. """ return w.sign() @staticmethod def backward(ctx, grad_o): grad_i = grad_o.clone() return grad_i class BinLinear(nn.Linear): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, x, *args): self.weight_q = BinQuant.apply(self.weight) y = F.linear(x, self.weight_q, self.bias) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from itertools import product as product from torch import nn import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sign_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp1 < tmp0 tmp3 = tmp2.to(tl.int8) tmp4 = tmp0 < tmp1 tmp5 = tmp4.to(tl.int8) tmp6 = tmp3 - tmp5 tmp7 = tmp6.to(tmp0.dtype) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sign_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf0, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class BinQuant(torch.autograd.Function): """BinaryConnect quantization. Refer: https://pytorch.org/tutorials/beginner/examples_autograd/two_layer_net_custom_function.html https://discuss.pytorch.org/t/difference-between-apply-an-call-for-an-autograd-function/13845/3 """ @staticmethod def forward(ctx, w): """Require w be in range of [0, 1]. Otherwise, it is not in activate range. """ return w.sign() @staticmethod def backward(ctx, grad_o): grad_i = grad_o.clone() return grad_i class BinLinearNew(nn.Linear): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ninfueng/a-PyTorch-Tutorial-to-Object-Detection
BinLinear
false
10,635
[ "MIT" ]
0
fc7544720a7e939f5a56f4f7214e4965b7775f77
https://github.com/ninfueng/a-PyTorch-Tutorial-to-Object-Detection/tree/fc7544720a7e939f5a56f4f7214e4965b7775f77
Actor
import torch import torch.nn as nn import torch.nn.functional as F class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(Actor, self).__init__() self.l1 = nn.Linear(state_dim, 4) self.l2 = nn.Linear(4, 4) self.l3 = nn.Linear(4, action_dim) self.max_action = max_action def forward(self, state): a = F.relu(self.l1(state)) a = F.relu(self.l2(a)) return self.max_action * torch.tanh(self.l3(a)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf4, primals_6, buf6, primals_4, buf7 class ActorNew(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(ActorNew, self).__init__() self.l1 = nn.Linear(state_dim, 4) self.l2 = nn.Linear(4, 4) self.l3 = nn.Linear(4, action_dim) self.max_action = max_action def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
pkj415/CityLearn
Actor
false
10,636
[ "MIT" ]
0
912d1e28270fba2d11a713dc7f0445d59d620511
https://github.com/pkj415/CityLearn/tree/912d1e28270fba2d11a713dc7f0445d59d620511