entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
GIoU_loss
import torch def Interction_Union(outputs, targets): width_o = outputs[:, 2] width_t = targets[:, 2] height_o = outputs[:, 3] height_t = targets[:, 3] x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2, targets[:, 0] + targets[:, 2] / 2), 1), 1)[0] x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2, targets[:, 0] - targets[:, 2] / 2), 1), 1)[0] y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2, targets[:, 1] + targets[:, 3] / 2), 1), 1)[0] y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2, targets[:, 1] - targets[:, 3] / 2), 1), 1)[0] Area_o = torch.mul(width_o, height_o) Area_t = torch.mul(width_t, height_t) Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min)) Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min)) Inter = torch.mul(Inter_w, Inter_t) zeros = torch.zeros_like(Inter) Inter = torch.where(Inter < 0, zeros, Inter) Union = torch.add(Area_o, Area_t).sub(Inter) return Inter, Union, x_max, x_min, y_max, y_min class GIoU_loss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, outputs, targets): Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs, targets) IoU = torch.div(Inter, Union) C_width = x_max.sub(x_min) C_height = y_max.sub(y_min) C = torch.mul(C_width, C_height) GIoU = IoU.sub(torch.div(C.sub(Union), C)) ones = torch.ones_like(GIoU) loss = ones.sub(GIoU) zeros = torch.zeros_like(loss) loss = torch.where(loss < 0, zeros, loss) return torch.sum(loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_lt_max_min_mul_ones_like_sub_sum_where_zeros_like_0( in_ptr0, in_ptr1, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 % 4 r0 = rindex % 4 r2 = rindex // 16 r3 = rindex % 16 tmp98 = tl.load(in_ptr0 + (32 + r3 + 64 * r2), None) tmp99 = tl.load(in_ptr1 + (32 + r3 + 64 * r2), None) tmp103 = tl.load(in_ptr0 + (48 + r3 + 64 * r2), None) tmp104 = tl.load(in_ptr1 + (48 + r3 + 64 * r2), None) tmp0 = r1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [XBLOCK, RBLOCK]), tmp4, other=0.0) tmp6 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp15 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp16 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp17 = tmp16 * tmp7 tmp18 = tmp15 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tmp22 = 4 + r1 tmp24 = tmp22 < tmp3 tmp25 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp26 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp27 = tmp26 * tmp7 tmp28 = tmp25 + tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp22 >= tmp3 tmp33 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp34 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp35 = tmp34 * tmp7 tmp36 = tmp33 + tmp35 tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp31, tmp36, tmp37) tmp39 = tl.where(tmp24, tmp30, tmp38) tmp40 = triton_helpers.maximum(tmp21, tmp39) tmp41 = tmp5 - tmp8 tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp4, tmp41, tmp42) tmp44 = tmp15 - tmp17 tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype) tmp46 = tl.where(tmp12, tmp44, tmp45) tmp47 = tl.where(tmp4, tmp43, tmp46) tmp48 = tmp25 - tmp27 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp24, tmp48, tmp49) tmp51 = tmp33 - tmp35 tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp31, tmp51, tmp52) tmp54 = tl.where(tmp24, tmp50, tmp53) tmp55 = triton_helpers.minimum(tmp47, tmp54) tmp56 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp57 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp58 = tmp57 * tmp7 tmp59 = tmp56 + tmp58 tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype) tmp61 = tl.where(tmp4, tmp59, tmp60) tmp62 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp63 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp64 = tmp63 * tmp7 tmp65 = tmp62 + tmp64 tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype) tmp67 = tl.where(tmp12, tmp65, tmp66) tmp68 = tl.where(tmp4, tmp61, tmp67) tmp69 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp70 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp71 = tmp70 * tmp7 tmp72 = tmp69 + tmp71 tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype) tmp74 = tl.where(tmp24, tmp72, tmp73) tmp75 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp76 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp77 = tmp76 * tmp7 tmp78 = tmp75 + tmp77 tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype) tmp80 = tl.where(tmp31, tmp78, tmp79) tmp81 = tl.where(tmp24, tmp74, tmp80) tmp82 = triton_helpers.maximum(tmp68, tmp81) tmp83 = tmp56 - tmp58 tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype) tmp85 = tl.where(tmp4, tmp83, tmp84) tmp86 = tmp62 - tmp64 tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype) tmp88 = tl.where(tmp12, tmp86, tmp87) tmp89 = tl.where(tmp4, tmp85, tmp88) tmp90 = tmp69 - tmp71 tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype) tmp92 = tl.where(tmp24, tmp90, tmp91) tmp93 = tmp75 - tmp77 tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype) tmp95 = tl.where(tmp31, tmp93, tmp94) tmp96 = tl.where(tmp24, tmp92, tmp95) tmp97 = triton_helpers.minimum(tmp89, tmp96) tmp100 = tmp98 + tmp99 tmp101 = tmp40 - tmp55 tmp102 = tmp100 - tmp101 tmp105 = tmp103 + tmp104 tmp106 = tmp82 - tmp97 tmp107 = tmp105 - tmp106 tmp108 = tmp102 * tmp107 tmp109 = tmp98 * tmp103 tmp110 = tmp99 * tmp104 tmp111 = tmp109 + tmp110 tmp112 = 0.0 tmp113 = tmp108 < tmp112 tmp114 = tl.where(tmp113, tmp112, tmp108) tmp115 = tmp111 - tmp114 tmp116 = tmp114 / tmp115 tmp117 = tmp101 * tmp106 tmp118 = tmp117 - tmp115 tmp119 = tmp118 / tmp117 tmp120 = tmp116 - tmp119 tmp121 = 1.0 tmp122 = tmp121 - tmp120 tmp123 = tmp122 < tmp112 tmp124 = tl.where(tmp123, tmp112, tmp122) tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK]) tmp127 = tl.sum(tmp125, 1)[:, None] tl.store(out_ptr5 + tl.full([XBLOCK, 1], 0, tl.int32), tmp127, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf7 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_div_lt_max_min_mul_ones_like_sub_sum_where_zeros_like_0[ grid(1)](arg0_1, arg1_1, buf7, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf7, def Interction_Union(outputs, targets): width_o = outputs[:, 2] width_t = targets[:, 2] height_o = outputs[:, 3] height_t = targets[:, 3] x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2, targets[:, 0] + targets[:, 2] / 2), 1), 1)[0] x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2, targets[:, 0] - targets[:, 2] / 2), 1), 1)[0] y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2, targets[:, 1] + targets[:, 3] / 2), 1), 1)[0] y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2, targets[:, 1] - targets[:, 3] / 2), 1), 1)[0] Area_o = torch.mul(width_o, height_o) Area_t = torch.mul(width_t, height_t) Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min)) Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min)) Inter = torch.mul(Inter_w, Inter_t) zeros = torch.zeros_like(Inter) Inter = torch.where(Inter < 0, zeros, Inter) Union = torch.add(Area_o, Area_t).sub(Inter) return Inter, Union, x_max, x_min, y_max, y_min class GIoU_lossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
debrouchovea/ReproduceGoturn
GIoU_loss
false
3,411
[ "MIT" ]
0
d60f13c781ca612cacc17536530bbee989bdfa45
https://github.com/debrouchovea/ReproduceGoturn/tree/d60f13c781ca612cacc17536530bbee989bdfa45
IoU_loss
import torch def Interction_Union(outputs, targets): width_o = outputs[:, 2] width_t = targets[:, 2] height_o = outputs[:, 3] height_t = targets[:, 3] x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2, targets[:, 0] + targets[:, 2] / 2), 1), 1)[0] x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2, targets[:, 0] - targets[:, 2] / 2), 1), 1)[0] y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2, targets[:, 1] + targets[:, 3] / 2), 1), 1)[0] y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2, targets[:, 1] - targets[:, 3] / 2), 1), 1)[0] Area_o = torch.mul(width_o, height_o) Area_t = torch.mul(width_t, height_t) Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min)) Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min)) Inter = torch.mul(Inter_w, Inter_t) zeros = torch.zeros_like(Inter) Inter = torch.where(Inter < 0, zeros, Inter) Union = torch.add(Area_o, Area_t).sub(Inter) return Inter, Union, x_max, x_min, y_max, y_min class IoU_loss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, outputs, targets): Inter, Union, _, _, _, _ = Interction_Union(outputs, targets) zeros = torch.zeros_like(Inter) loss = torch.div(Inter, Union) loss = 1 - loss loss = torch.where(loss < 0, zeros, loss) return torch.sum(loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_lt_max_min_mul_rsub_sub_sum_where_zeros_like_0( in_ptr0, in_ptr1, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 % 4 r0 = rindex % 4 r2 = rindex // 16 r3 = rindex % 16 tmp98 = tl.load(in_ptr0 + (32 + r3 + 64 * r2), None) tmp99 = tl.load(in_ptr1 + (32 + r3 + 64 * r2), None) tmp103 = tl.load(in_ptr0 + (48 + r3 + 64 * r2), None) tmp104 = tl.load(in_ptr1 + (48 + r3 + 64 * r2), None) tmp0 = r1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [XBLOCK, RBLOCK]), tmp4, other=0.0) tmp6 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp15 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp16 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp17 = tmp16 * tmp7 tmp18 = tmp15 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tmp22 = 4 + r1 tmp24 = tmp22 < tmp3 tmp25 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp26 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp27 = tmp26 * tmp7 tmp28 = tmp25 + tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp22 >= tmp3 tmp33 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp34 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp35 = tmp34 * tmp7 tmp36 = tmp33 + tmp35 tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp31, tmp36, tmp37) tmp39 = tl.where(tmp24, tmp30, tmp38) tmp40 = triton_helpers.maximum(tmp21, tmp39) tmp41 = tmp5 - tmp8 tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp4, tmp41, tmp42) tmp44 = tmp15 - tmp17 tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype) tmp46 = tl.where(tmp12, tmp44, tmp45) tmp47 = tl.where(tmp4, tmp43, tmp46) tmp48 = tmp25 - tmp27 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp24, tmp48, tmp49) tmp51 = tmp33 - tmp35 tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp31, tmp51, tmp52) tmp54 = tl.where(tmp24, tmp50, tmp53) tmp55 = triton_helpers.minimum(tmp47, tmp54) tmp56 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp57 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp58 = tmp57 * tmp7 tmp59 = tmp56 + tmp58 tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype) tmp61 = tl.where(tmp4, tmp59, tmp60) tmp62 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp63 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp64 = tmp63 * tmp7 tmp65 = tmp62 + tmp64 tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype) tmp67 = tl.where(tmp12, tmp65, tmp66) tmp68 = tl.where(tmp4, tmp61, tmp67) tmp69 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp70 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp71 = tmp70 * tmp7 tmp72 = tmp69 + tmp71 tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype) tmp74 = tl.where(tmp24, tmp72, tmp73) tmp75 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp76 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp77 = tmp76 * tmp7 tmp78 = tmp75 + tmp77 tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype) tmp80 = tl.where(tmp31, tmp78, tmp79) tmp81 = tl.where(tmp24, tmp74, tmp80) tmp82 = triton_helpers.maximum(tmp68, tmp81) tmp83 = tmp56 - tmp58 tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype) tmp85 = tl.where(tmp4, tmp83, tmp84) tmp86 = tmp62 - tmp64 tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype) tmp88 = tl.where(tmp12, tmp86, tmp87) tmp89 = tl.where(tmp4, tmp85, tmp88) tmp90 = tmp69 - tmp71 tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype) tmp92 = tl.where(tmp24, tmp90, tmp91) tmp93 = tmp75 - tmp77 tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype) tmp95 = tl.where(tmp31, tmp93, tmp94) tmp96 = tl.where(tmp24, tmp92, tmp95) tmp97 = triton_helpers.minimum(tmp89, tmp96) tmp100 = tmp98 + tmp99 tmp101 = tmp40 - tmp55 tmp102 = tmp100 - tmp101 tmp105 = tmp103 + tmp104 tmp106 = tmp82 - tmp97 tmp107 = tmp105 - tmp106 tmp108 = tmp102 * tmp107 tmp109 = 0.0 tmp110 = tmp108 < tmp109 tmp111 = tl.where(tmp110, tmp109, tmp108) tmp112 = tmp98 * tmp103 tmp113 = tmp99 * tmp104 tmp114 = tmp112 + tmp113 tmp115 = tmp114 - tmp111 tmp116 = tmp111 / tmp115 tmp117 = 1.0 tmp118 = tmp117 - tmp116 tmp119 = tmp118 < tmp109 tmp120 = tl.where(tmp119, tmp109, tmp118) tmp121 = tl.broadcast_to(tmp120, [XBLOCK, RBLOCK]) tmp123 = tl.sum(tmp121, 1)[:, None] tl.store(out_ptr3 + tl.full([XBLOCK, 1], 0, tl.int32), tmp123, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf6 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_div_lt_max_min_mul_rsub_sub_sum_where_zeros_like_0[ grid(1)](arg0_1, arg1_1, buf6, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf6, def Interction_Union(outputs, targets): width_o = outputs[:, 2] width_t = targets[:, 2] height_o = outputs[:, 3] height_t = targets[:, 3] x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2, targets[:, 0] + targets[:, 2] / 2), 1), 1)[0] x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2, targets[:, 0] - targets[:, 2] / 2), 1), 1)[0] y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2, targets[:, 1] + targets[:, 3] / 2), 1), 1)[0] y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2, targets[:, 1] - targets[:, 3] / 2), 1), 1)[0] Area_o = torch.mul(width_o, height_o) Area_t = torch.mul(width_t, height_t) Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min)) Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min)) Inter = torch.mul(Inter_w, Inter_t) zeros = torch.zeros_like(Inter) Inter = torch.where(Inter < 0, zeros, Inter) Union = torch.add(Area_o, Area_t).sub(Inter) return Inter, Union, x_max, x_min, y_max, y_min class IoU_lossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
debrouchovea/ReproduceGoturn
IoU_loss
false
3,412
[ "MIT" ]
0
d60f13c781ca612cacc17536530bbee989bdfa45
https://github.com/debrouchovea/ReproduceGoturn/tree/d60f13c781ca612cacc17536530bbee989bdfa45
GRUCell
import torch import numpy as np import torch.nn.functional as F from torch import nn class GRUCell(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(GRUCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = nn.Linear(input_size, 3 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / np.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden): x = x.view(-1, x.size(-1)) gate_x = self.x2h(x) gate_h = self.h2h(hidden) gate_x = gate_x.squeeze() gate_h = gate_h.squeeze() i_r, i_i, i_n = gate_x.chunk(3, 1) h_r, h_i, h_n = gate_h.chunk(3, 1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + resetgate * h_n) hy = newgate + inputgate * (hidden - newgate) return hy def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask) tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask) tmp12 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask) tmp19 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp11 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = libdevice.tanh(tmp17) tmp20 = tmp19 - tmp18 tmp21 = tmp5 * tmp20 tmp22 = tmp18 + tmp21 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp18, xmask) tl.store(out_ptr3 + x2, tmp22, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor( primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sub_tanh_0[grid(16)](buf0, primals_3, buf1, primals_6, buf3, buf2, buf4, buf5, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf0 del primals_3 return buf5, primals_6, primals_1, reinterpret_tensor(buf1, (4, 4), (12, 1), 8), buf2, buf3, buf4 class GRUCellNew(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super(GRUCellNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = nn.Linear(input_size, 3 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / np.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, input_0, input_1): primals_2 = self.x2h.weight primals_3 = self.x2h.bias primals_4 = self.h2h.weight primals_5 = self.h2h.bias primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
deutschmn/PM2.5-GNN
GRUCell
false
3,413
[ "MIT" ]
0
82e3fe2f25465451cbbdd6350c91a0242ecaa1c1
https://github.com/deutschmn/PM2.5-GNN/tree/82e3fe2f25465451cbbdd6350c91a0242ecaa1c1
CIoU_loss
import torch import numpy as np def Interction_Union(outputs, targets): width_o = outputs[:, 2] width_t = targets[:, 2] height_o = outputs[:, 3] height_t = targets[:, 3] x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2, targets[:, 0] + targets[:, 2] / 2), 1), 1)[0] x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2, targets[:, 0] - targets[:, 2] / 2), 1), 1)[0] y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2, targets[:, 1] + targets[:, 3] / 2), 1), 1)[0] y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2, targets[:, 1] - targets[:, 3] / 2), 1), 1)[0] Area_o = torch.mul(width_o, height_o) Area_t = torch.mul(width_t, height_t) Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min)) Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min)) Inter = torch.mul(Inter_w, Inter_t) zeros = torch.zeros_like(Inter) Inter = torch.where(Inter < 0, zeros, Inter) Union = torch.add(Area_o, Area_t).sub(Inter) return Inter, Union, x_max, x_min, y_max, y_min def Center_points(outputs, targets): x_o = outputs[:, 0] y_o = outputs[:, 1] x_t = targets[:, 0] y_t = targets[:, 1] return x_o, y_o, x_t, y_t class DIoU_loss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, outputs, targets): Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs, targets) IoU = torch.div(Inter, Union) C_width = x_max.sub(x_min) C_height = y_max.sub(y_min) C = torch.mul(C_width, C_height) x_o, y_o, x_t, y_t = Center_points(outputs, targets) dis = torch.add(torch.pow(x_o.sub(x_t), 2), torch.pow(y_o.sub(y_t), 2)) R_DIoU = torch.div(dis, torch.pow(C, 2)) ones = torch.ones_like(IoU) loss = torch.add(ones.sub(IoU), R_DIoU) zeros = torch.zeros_like(loss) loss = torch.where(loss < 0, zeros, loss) return torch.sum(loss) class CIoU_loss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, outputs, targets): Inter, Union, _x_max, _x_min, _y_max, _y_min = Interction_Union(outputs , targets) IoU = torch.div(Inter, Union) loss_DIoU = DIoU_loss() loss = loss_DIoU(outputs, targets) width_o = outputs[:, 2] width_t = targets[:, 2] height_o = outputs[:, 3] height_t = targets[:, 3] v = torch.pow(torch.arctan(torch.div(width_t, height_t)).sub(torch. arctan(torch.div(width_o, height_o))), 2) * 4 / (np.pi * np.pi) alpha = torch.div(v, 1 + v.sub(IoU)) R_CIoU = torch.mul(alpha, v) loss = torch.add(loss, R_CIoU) zeros = torch.zeros_like(loss) loss = torch.where(loss < 0, zeros, loss) return torch.sum(loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_atan_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0( in_ptr0, in_ptr1, out_ptr8, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 % 4 r0 = rindex % 4 r2 = rindex // 16 r3 = rindex % 16 tmp98 = tl.load(in_ptr0 + (32 + r3 + 64 * r2), None) tmp99 = tl.load(in_ptr1 + (32 + r3 + 64 * r2), None) tmp103 = tl.load(in_ptr0 + (48 + r3 + 64 * r2), None) tmp104 = tl.load(in_ptr1 + (48 + r3 + 64 * r2), None) tmp119 = tl.load(in_ptr0 + (r3 + 64 * r2), None) tmp120 = tl.load(in_ptr1 + (r3 + 64 * r2), None) tmp123 = tl.load(in_ptr0 + (16 + r3 + 64 * r2), None) tmp124 = tl.load(in_ptr1 + (16 + r3 + 64 * r2), None) tmp0 = r1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [XBLOCK, RBLOCK]), tmp4, other=0.0) tmp6 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp15 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp16 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp17 = tmp16 * tmp7 tmp18 = tmp15 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tmp22 = 4 + r1 tmp24 = tmp22 < tmp3 tmp25 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp26 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp27 = tmp26 * tmp7 tmp28 = tmp25 + tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp22 >= tmp3 tmp33 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp34 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp35 = tmp34 * tmp7 tmp36 = tmp33 + tmp35 tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp31, tmp36, tmp37) tmp39 = tl.where(tmp24, tmp30, tmp38) tmp40 = triton_helpers.maximum(tmp21, tmp39) tmp41 = tmp5 - tmp8 tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp4, tmp41, tmp42) tmp44 = tmp15 - tmp17 tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype) tmp46 = tl.where(tmp12, tmp44, tmp45) tmp47 = tl.where(tmp4, tmp43, tmp46) tmp48 = tmp25 - tmp27 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp24, tmp48, tmp49) tmp51 = tmp33 - tmp35 tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp31, tmp51, tmp52) tmp54 = tl.where(tmp24, tmp50, tmp53) tmp55 = triton_helpers.minimum(tmp47, tmp54) tmp56 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp57 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp58 = tmp57 * tmp7 tmp59 = tmp56 + tmp58 tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype) tmp61 = tl.where(tmp4, tmp59, tmp60) tmp62 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp63 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp64 = tmp63 * tmp7 tmp65 = tmp62 + tmp64 tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype) tmp67 = tl.where(tmp12, tmp65, tmp66) tmp68 = tl.where(tmp4, tmp61, tmp67) tmp69 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp70 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp71 = tmp70 * tmp7 tmp72 = tmp69 + tmp71 tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype) tmp74 = tl.where(tmp24, tmp72, tmp73) tmp75 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp76 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp77 = tmp76 * tmp7 tmp78 = tmp75 + tmp77 tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype) tmp80 = tl.where(tmp31, tmp78, tmp79) tmp81 = tl.where(tmp24, tmp74, tmp80) tmp82 = triton_helpers.maximum(tmp68, tmp81) tmp83 = tmp56 - tmp58 tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype) tmp85 = tl.where(tmp4, tmp83, tmp84) tmp86 = tmp62 - tmp64 tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype) tmp88 = tl.where(tmp12, tmp86, tmp87) tmp89 = tl.where(tmp4, tmp85, tmp88) tmp90 = tmp69 - tmp71 tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype) tmp92 = tl.where(tmp24, tmp90, tmp91) tmp93 = tmp75 - tmp77 tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype) tmp95 = tl.where(tmp31, tmp93, tmp94) tmp96 = tl.where(tmp24, tmp92, tmp95) tmp97 = triton_helpers.minimum(tmp89, tmp96) tmp100 = tmp98 + tmp99 tmp101 = tmp40 - tmp55 tmp102 = tmp100 - tmp101 tmp105 = tmp103 + tmp104 tmp106 = tmp82 - tmp97 tmp107 = tmp105 - tmp106 tmp108 = tmp102 * tmp107 tmp109 = 0.0 tmp110 = tmp108 < tmp109 tmp111 = tl.where(tmp110, tmp109, tmp108) tmp112 = tmp98 * tmp103 tmp113 = tmp99 * tmp104 tmp114 = tmp112 + tmp113 tmp115 = tmp114 - tmp111 tmp116 = tmp111 / tmp115 tmp117 = 1.0 tmp118 = tmp117 - tmp116 tmp121 = tmp119 - tmp120 tmp122 = tmp121 * tmp121 tmp125 = tmp123 - tmp124 tmp126 = tmp125 * tmp125 tmp127 = tmp122 + tmp126 tmp128 = tmp101 * tmp106 tmp129 = tmp128 * tmp128 tmp130 = tmp127 / tmp129 tmp131 = tmp118 + tmp130 tmp132 = tmp131 < tmp109 tmp133 = tl.where(tmp132, tmp109, tmp131) tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK]) tmp136 = tl.sum(tmp134, 1)[:, None] tmp137 = tmp99 / tmp104 tmp138 = libdevice.atan(tmp137) tmp139 = tmp98 / tmp103 tmp140 = libdevice.atan(tmp139) tmp141 = tmp138 - tmp140 tmp142 = tmp141 * tmp141 tmp143 = 4.0 tmp144 = tmp142 * tmp143 tmp145 = 0.10132118364233778 tmp146 = tmp144 * tmp145 tmp147 = tmp146 - tmp116 tmp148 = tmp147 + tmp117 tmp149 = tmp146 / tmp148 tmp150 = tmp149 * tmp146 tmp151 = tmp136 + tmp150 tmp152 = tmp151 < tmp109 tmp153 = tl.where(tmp152, tmp109, tmp151) tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK]) tmp156 = tl.sum(tmp154, 1)[:, None] tl.store(out_ptr8 + tl.full([XBLOCK, 1], 0, tl.int32), tmp156, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf13 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_atan_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0[ grid(1)](arg0_1, arg1_1, buf13, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf13, def Interction_Union(outputs, targets): width_o = outputs[:, 2] width_t = targets[:, 2] height_o = outputs[:, 3] height_t = targets[:, 3] x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2, targets[:, 0] + targets[:, 2] / 2), 1), 1)[0] x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2, targets[:, 0] - targets[:, 2] / 2), 1), 1)[0] y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2, targets[:, 1] + targets[:, 3] / 2), 1), 1)[0] y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2, targets[:, 1] - targets[:, 3] / 2), 1), 1)[0] Area_o = torch.mul(width_o, height_o) Area_t = torch.mul(width_t, height_t) Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min)) Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min)) Inter = torch.mul(Inter_w, Inter_t) zeros = torch.zeros_like(Inter) Inter = torch.where(Inter < 0, zeros, Inter) Union = torch.add(Area_o, Area_t).sub(Inter) return Inter, Union, x_max, x_min, y_max, y_min def Center_points(outputs, targets): x_o = outputs[:, 0] y_o = outputs[:, 1] x_t = targets[:, 0] y_t = targets[:, 1] return x_o, y_o, x_t, y_t class DIoU_loss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, outputs, targets): Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs, targets) IoU = torch.div(Inter, Union) C_width = x_max.sub(x_min) C_height = y_max.sub(y_min) C = torch.mul(C_width, C_height) x_o, y_o, x_t, y_t = Center_points(outputs, targets) dis = torch.add(torch.pow(x_o.sub(x_t), 2), torch.pow(y_o.sub(y_t), 2)) R_DIoU = torch.div(dis, torch.pow(C, 2)) ones = torch.ones_like(IoU) loss = torch.add(ones.sub(IoU), R_DIoU) zeros = torch.zeros_like(loss) loss = torch.where(loss < 0, zeros, loss) return torch.sum(loss) class CIoU_lossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
debrouchovea/ReproduceGoturn
CIoU_loss
false
3,414
[ "MIT" ]
0
d60f13c781ca612cacc17536530bbee989bdfa45
https://github.com/debrouchovea/ReproduceGoturn/tree/d60f13c781ca612cacc17536530bbee989bdfa45
MidNet4
import torch import torch.nn as nn class MidNet4(nn.Module): def forward(self, x_in): """Network with dilation rate 4 :param x_in: input convolutional features :returns: processed convolutional features :rtype: Tensor """ x = self.lrelu(self.conv1(x_in)) x = self.lrelu(self.conv2(x)) x = self.lrelu(self.conv3(x)) x = self.conv4(x) return x def __init__(self, in_channels=16): """FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A """ super(MidNet4, self).__init__() self.lrelu = nn.LeakyReLU() self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 4, 4) self.conv2 = nn.Conv2d(64, 64, 3, 1, 4, 4) self.conv3 = nn.Conv2d(64, 64, 3, 1, 4, 4) self.conv4 = nn.Conv2d(64, 64, 3, 1, 4, 4) def get_inputs(): return [torch.rand([4, 16, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf0, primals_2, buf1, buf2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) buf5 = buf0 del buf0 triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf3, primals_5, buf4, buf5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) buf8 = buf3 del buf3 triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf6, primals_7, buf7, buf8, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf6 del primals_7 buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf10 = buf9 del buf9 triton_poi_fused_convolution_1[grid(1048576)](buf10, primals_9, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf4, buf5, buf7, buf8) class MidNet4New(nn.Module): def __init__(self, in_channels=16): """FIXME! briefly describe function :param in_channels: Input channels :returns: N/A :rtype: N/A """ super(MidNet4New, self).__init__() self.lrelu = nn.LeakyReLU() self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 4, 4) self.conv2 = nn.Conv2d(64, 64, 3, 1, 4, 4) self.conv3 = nn.Conv2d(64, 64, 3, 1, 4, 4) self.conv4 = nn.Conv2d(64, 64, 3, 1, 4, 4) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
deshwalmahesh/CURL---cpu-gpu
MidNet4
false
3,415
[ "BSD-3-Clause" ]
0
f4e87275b6cce556b9e04a188cf7ae13d810d82a
https://github.com/deshwalmahesh/CURL---cpu-gpu/tree/f4e87275b6cce556b9e04a188cf7ae13d810d82a
ParallelLinear
import torch import numpy as np import torch.nn as nn class ParallelLinear(nn.Module): def __init__(self, n_parallel, in_features, out_features, act=None, random_bias=False): super().__init__() self.act = act self.weight = nn.Parameter(torch.Tensor(n_parallel, in_features, out_features)) self.bias = nn.Parameter(torch.Tensor(n_parallel, out_features)) with torch.no_grad(): self.weight.normal_(0.0, np.sqrt(2.0 / in_features)) if random_bias: self.bias.normal_(0.0, np.sqrt(2.0 / in_features)) else: self.bias.zero_() def forward(self, x): x = torch.bmm(x, self.weight) + self.bias[:, None, :] if self.act: x = self.act(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_parallel': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_2, primals_1, out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0) class ParallelLinearNew(nn.Module): def __init__(self, n_parallel, in_features, out_features, act=None, random_bias=False): super().__init__() self.act = act self.weight = nn.Parameter(torch.Tensor(n_parallel, in_features, out_features)) self.bias = nn.Parameter(torch.Tensor(n_parallel, out_features)) with torch.no_grad(): self.weight.normal_(0.0, np.sqrt(2.0 / in_features)) if random_bias: self.bias.normal_(0.0, np.sqrt(2.0 / in_features)) else: self.bias.zero_() def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dholzmueller/nn_inconsistency
ParallelLinear
false
3,416
[ "Apache-2.0" ]
0
67954d71cdbbc61fda7da1f624c19985b0e51708
https://github.com/dholzmueller/nn_inconsistency/tree/67954d71cdbbc61fda7da1f624c19985b0e51708
CPUForgetMult
import torch class CPUForgetMult(torch.nn.Module): def __init__(self): super(CPUForgetMult, self).__init__() def forward(self, f, x, hidden_init=None): result = [] forgets = f.split(1, dim=0) prev_h = hidden_init for i, h in enumerate((f * x).split(1, dim=0)): if prev_h is not None: h = h + (1 - forgets[i]) * prev_h h = h.view(h.size()[1:]) result.append(h) prev_h = h return torch.stack(result) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_stack_0(in_ptr0, in_ptr1, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (128 + x0), xmask) tmp1 = tl.load(in_ptr1 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (64 + x0), xmask) tmp6 = tl.load(in_ptr1 + (64 + x0), xmask) tmp9 = tl.load(in_ptr0 + x0, xmask) tmp10 = tl.load(in_ptr1 + x0, xmask) tmp16 = tl.load(in_ptr0 + (192 + x0), xmask) tmp17 = tl.load(in_ptr1 + (192 + x0), xmask) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp7 = tmp5 * tmp6 tmp8 = tmp3 - tmp5 tmp11 = tmp9 * tmp10 tmp12 = tmp8 * tmp11 tmp13 = tmp7 + tmp12 tmp14 = tmp4 * tmp13 tmp15 = tmp2 + tmp14 tmp18 = tmp16 * tmp17 tmp19 = tmp3 - tmp16 tmp20 = tmp19 * tmp15 tmp21 = tmp18 + tmp20 tl.store(out_ptr1 + x0, tmp13, xmask) tl.store(out_ptr2 + x0, tmp11, xmask) tl.store(out_ptr3 + x0, tmp15, xmask) tl.store(out_ptr4 + x0, tmp21, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 64) buf1 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0) buf3 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 128) buf4 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 192) get_raw_stream(0) triton_poi_fused_add_mul_rsub_stack_0[grid(64)](arg0_1, arg1_1, buf2, buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), class CPUForgetMultNew(torch.nn.Module): def __init__(self): super(CPUForgetMultNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dido1998/cruxeval
CPUForgetMult
false
3,417
[ "BSD-3-Clause" ]
0
229f7562c3f5e0da6432728e1c42402f51473a84
https://github.com/dido1998/cruxeval/tree/229f7562c3f5e0da6432728e1c42402f51473a84
SurnameClassifier
from torch.nn import Module import torch from torch.nn import Linear from torch.nn.functional import softmax from torch.nn.functional import relu from torch.nn.functional import dropout class SurnameClassifier(Module): def __init__(self, input_dim: 'int', hidden_dim: 'int', output_dim: 'int' ) ->None: super().__init__() self.fc1 = Linear(input_dim, hidden_dim) self.fc2 = Linear(hidden_dim, output_dim) def forward(self, x_in, apply_activator: 'bool'=False): intermediate_vector = relu(self.fc1(x_in)) prediction_vector = self.fc2(dropout(intermediate_vector, p=0.5)) if apply_activator: prediction_vector = softmax(prediction_vector, dim=1) return prediction_vector def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True) buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_5 return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_4, buf6 class SurnameClassifierNew(Module): def __init__(self, input_dim: 'int', hidden_dim: 'int', output_dim: 'int' ) ->None: super().__init__() self.fc1 = Linear(input_dim, hidden_dim) self.fc2 = Linear(hidden_dim, output_dim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dbradf/nlp-pytorch
SurnameClassifier
false
3,418
[ "Apache-2.0" ]
0
957e3c5a1edf1f2ae9a8e281729395bed886bc87
https://github.com/dbradf/nlp-pytorch/tree/957e3c5a1edf1f2ae9a8e281729395bed886bc87
LeafClassifier
import torch import torch.utils.data from torch import nn class LeafClassifier(nn.Module): def __init__(self, feature_size, hidden_size): super(LeafClassifier, self).__init__() self.mlp1 = nn.Linear(feature_size, hidden_size) self.mlp2 = nn.Linear(hidden_size, 1) def forward(self, input_feature): output = torch.relu(self.mlp1(input_feature)) output = self.mlp2(output) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'feature_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf4 class LeafClassifierNew(nn.Module): def __init__(self, feature_size, hidden_size): super(LeafClassifierNew, self).__init__() self.mlp1 = nn.Linear(feature_size, hidden_size) self.mlp2 = nn.Linear(hidden_size, 1) def forward(self, input_0): primals_1 = self.mlp1.weight primals_2 = self.mlp1.bias primals_4 = self.mlp2.weight primals_5 = self.mlp2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dips4717/ui-hier-net
LeafClassifier
false
3,419
[ "MIT" ]
0
7c93168b6150ea00e15638504cf561eda98de5c6
https://github.com/dips4717/ui-hier-net/tree/7c93168b6150ea00e15638504cf561eda98de5c6
ConvolutionModule
import torch from torch import Tensor from torch import nn class Swish(torch.nn.Module): """Construct an Swish object.""" def forward(self, x: 'Tensor') ->Tensor: """Return Swich activation function.""" return x * torch.sigmoid(x) class ConvolutionModule(nn.Module): """ConvolutionModule in Conformer model. Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py Args: channels (int): The number of channels of conv layers. kernel_size (int): Kernerl size of conv layers. bias (bool): Whether to use bias in conv layers (default=True). """ def __init__(self, channels: 'int', kernel_size: 'int', bias: 'bool'=True ) ->None: """Construct an ConvolutionModule object.""" super(ConvolutionModule, self).__init__() assert (kernel_size - 1) % 2 == 0 self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias) self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias =bias) self.norm = nn.LayerNorm(channels) self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=bias) self.activation = Swish() def forward(self, x: 'Tensor') ->Tensor: """Compute convolution module. Args: x: Input tensor (#time, batch, channels). Returns: Tensor: Output tensor (#time, batch, channels). """ x = x.permute(1, 2, 0) x = self.pointwise_conv1(x) x = nn.functional.glu(x, dim=1) x = self.depthwise_conv(x) x = x.permute(0, 2, 1) x = self.norm(x) x = x.permute(0, 2, 1) x = self.activation(x) x = self.pointwise_conv2(x) return x.permute(2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'kernel_size': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import Tensor from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_glu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 32 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp23, xmask) @triton.jit def triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y1 = yindex // 4 y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.sigmoid(tmp8) tmp10 = tmp8 * tmp9 tl.store(out_ptr1 + (y0 + 4 * x2 + 16 * y1), tmp10, xmask & ymask) tl.store(out_ptr2 + (x2 + 4 * y3), tmp10, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 4), (32, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(128)](buf2, primals_3, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = buf0 del buf0 triton_poi_fused_glu_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=4, bias=None) assert_size_stride(buf4, (4, 4, 4), (16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_3[grid(64)](buf5, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_4[grid(16)](buf5, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_mul_native_layer_norm_sigmoid_5[grid( 16, 4)](buf5, buf6, buf7, primals_6, primals_7, buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf6 del buf7 buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 4), (16, 4, 1)) del buf10 buf12 = buf11 del buf11 triton_poi_fused_convolution_3[grid(64)](buf12, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 return (reinterpret_tensor(buf12, (4, 4, 4), (1, 16, 4), 0), primals_2, primals_4, primals_6, primals_7, primals_8, reinterpret_tensor( primals_1, (4, 4, 4), (4, 1, 16), 0), buf2, buf3, buf5, buf9) class Swish(torch.nn.Module): """Construct an Swish object.""" def forward(self, x: 'Tensor') ->Tensor: """Return Swich activation function.""" return x * torch.sigmoid(x) class ConvolutionModuleNew(nn.Module): """ConvolutionModule in Conformer model. Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py Args: channels (int): The number of channels of conv layers. kernel_size (int): Kernerl size of conv layers. bias (bool): Whether to use bias in conv layers (default=True). """ def __init__(self, channels: 'int', kernel_size: 'int', bias: 'bool'=True ) ->None: """Construct an ConvolutionModule object.""" super(ConvolutionModuleNew, self).__init__() assert (kernel_size - 1) % 2 == 0 self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias) self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias =bias) self.norm = nn.LayerNorm(channels) self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=bias) self.activation = Swish() def forward(self, input_0): primals_2 = self.pointwise_conv1.weight primals_3 = self.pointwise_conv1.bias primals_4 = self.depthwise_conv.weight primals_5 = self.depthwise_conv.bias primals_6 = self.norm.weight primals_7 = self.norm.bias primals_8 = self.pointwise_conv2.weight primals_9 = self.pointwise_conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
desh2608/icefall
ConvolutionModule
false
3,420
[ "Apache-2.0" ]
0
1603744469d167d848e074f2ea98c587153205fa
https://github.com/desh2608/icefall/tree/1603744469d167d848e074f2ea98c587153205fa
LocalNet
import torch import torch.nn as nn class LocalNet(nn.Module): def forward(self, x_in): """Defines a double convolution :param x_in: input convolutional features :returns: convolutional features :rtype: Tensor """ x = self.lrelu(self.conv1(self.refpad(x_in))) x = self.lrelu(self.conv2(self.refpad(x))) return x def __init__(self, in_channels=16, out_channels=64): """Initialisation function :param in_channels: number of input channels :param out_channels: number of output channels :returns: N/A :rtype: N/A """ super(LocalNet, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, 0, 1) self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 0, 1) self.lrelu = nn.LeakyReLU() self.refpad = nn.ReflectionPad2d(1) def get_inputs(): return [torch.rand([4, 16, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x4 = xindex // 36 x2 = xindex // 36 % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 0.01 tmp5 = tmp3 * tmp4 tmp6 = tl.where(tmp0, tmp3, tmp5) tl.store(out_ptr0 + x5, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 16, 4, 4), (256, 16, 4, 1)) assert_size_stride(primals_2, (64, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(2304)](primals_1, buf0, 2304, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1)) buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_1[grid(4096)](buf1, primals_3, buf2, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2[grid(9216)]( buf2, buf1, primals_3, buf3, 9216, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1)) buf5 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool) buf6 = buf1 del buf1 triton_poi_fused_convolution_leaky_relu_3[grid(4096)](buf4, primals_5, buf5, buf6, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 return buf6, primals_2, primals_4, buf0, buf2, buf3, buf5 class LocalNetNew(nn.Module): def __init__(self, in_channels=16, out_channels=64): """Initialisation function :param in_channels: number of input channels :param out_channels: number of output channels :returns: N/A :rtype: N/A """ super(LocalNetNew, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, 0, 1) self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 0, 1) self.lrelu = nn.LeakyReLU() self.refpad = nn.ReflectionPad2d(1) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
deshwalmahesh/CURL---cpu-gpu
LocalNet
false
3,421
[ "BSD-3-Clause" ]
0
f4e87275b6cce556b9e04a188cf7ae13d810d82a
https://github.com/deshwalmahesh/CURL---cpu-gpu/tree/f4e87275b6cce556b9e04a188cf7ae13d810d82a
DIoU_loss
import torch def Interction_Union(outputs, targets): width_o = outputs[:, 2] width_t = targets[:, 2] height_o = outputs[:, 3] height_t = targets[:, 3] x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2, targets[:, 0] + targets[:, 2] / 2), 1), 1)[0] x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2, targets[:, 0] - targets[:, 2] / 2), 1), 1)[0] y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2, targets[:, 1] + targets[:, 3] / 2), 1), 1)[0] y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2, targets[:, 1] - targets[:, 3] / 2), 1), 1)[0] Area_o = torch.mul(width_o, height_o) Area_t = torch.mul(width_t, height_t) Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min)) Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min)) Inter = torch.mul(Inter_w, Inter_t) zeros = torch.zeros_like(Inter) Inter = torch.where(Inter < 0, zeros, Inter) Union = torch.add(Area_o, Area_t).sub(Inter) return Inter, Union, x_max, x_min, y_max, y_min def Center_points(outputs, targets): x_o = outputs[:, 0] y_o = outputs[:, 1] x_t = targets[:, 0] y_t = targets[:, 1] return x_o, y_o, x_t, y_t class DIoU_loss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, outputs, targets): Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs, targets) IoU = torch.div(Inter, Union) C_width = x_max.sub(x_min) C_height = y_max.sub(y_min) C = torch.mul(C_width, C_height) x_o, y_o, x_t, y_t = Center_points(outputs, targets) dis = torch.add(torch.pow(x_o.sub(x_t), 2), torch.pow(y_o.sub(y_t), 2)) R_DIoU = torch.div(dis, torch.pow(C, 2)) ones = torch.ones_like(IoU) loss = torch.add(ones.sub(IoU), R_DIoU) zeros = torch.zeros_like(loss) loss = torch.where(loss < 0, zeros, loss) return torch.sum(loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0( in_ptr0, in_ptr1, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 % 4 r0 = rindex % 4 r2 = rindex // 16 r3 = rindex % 16 tmp98 = tl.load(in_ptr0 + (32 + r3 + 64 * r2), None) tmp99 = tl.load(in_ptr1 + (32 + r3 + 64 * r2), None) tmp103 = tl.load(in_ptr0 + (48 + r3 + 64 * r2), None) tmp104 = tl.load(in_ptr1 + (48 + r3 + 64 * r2), None) tmp119 = tl.load(in_ptr0 + (r3 + 64 * r2), None) tmp120 = tl.load(in_ptr1 + (r3 + 64 * r2), None) tmp123 = tl.load(in_ptr0 + (16 + r3 + 64 * r2), None) tmp124 = tl.load(in_ptr1 + (16 + r3 + 64 * r2), None) tmp0 = r1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [XBLOCK, RBLOCK]), tmp4, other=0.0) tmp6 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp15 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp16 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp17 = tmp16 * tmp7 tmp18 = tmp15 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tmp22 = 4 + r1 tmp24 = tmp22 < tmp3 tmp25 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp26 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp27 = tmp26 * tmp7 tmp28 = tmp25 + tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp22 >= tmp3 tmp33 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp34 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp35 = tmp34 * tmp7 tmp36 = tmp33 + tmp35 tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp31, tmp36, tmp37) tmp39 = tl.where(tmp24, tmp30, tmp38) tmp40 = triton_helpers.maximum(tmp21, tmp39) tmp41 = tmp5 - tmp8 tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype) tmp43 = tl.where(tmp4, tmp41, tmp42) tmp44 = tmp15 - tmp17 tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype) tmp46 = tl.where(tmp12, tmp44, tmp45) tmp47 = tl.where(tmp4, tmp43, tmp46) tmp48 = tmp25 - tmp27 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp24, tmp48, tmp49) tmp51 = tmp33 - tmp35 tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp31, tmp51, tmp52) tmp54 = tl.where(tmp24, tmp50, tmp53) tmp55 = triton_helpers.minimum(tmp47, tmp54) tmp56 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp57 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp4, other=0.0) tmp58 = tmp57 * tmp7 tmp59 = tmp56 + tmp58 tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype) tmp61 = tl.where(tmp4, tmp59, tmp60) tmp62 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp63 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * (-4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp12, other=0.0) tmp64 = tmp63 * tmp7 tmp65 = tmp62 + tmp64 tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype) tmp67 = tl.where(tmp12, tmp65, tmp66) tmp68 = tl.where(tmp4, tmp61, tmp67) tmp69 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp70 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * (4 + r1) + 64 * r2, [XBLOCK, RBLOCK]), tmp24, other=0.0) tmp71 = tmp70 * tmp7 tmp72 = tmp69 + tmp71 tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype) tmp74 = tl.where(tmp24, tmp72, tmp73) tmp75 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp76 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [ XBLOCK, RBLOCK]), tmp31, other=0.0) tmp77 = tmp76 * tmp7 tmp78 = tmp75 + tmp77 tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype) tmp80 = tl.where(tmp31, tmp78, tmp79) tmp81 = tl.where(tmp24, tmp74, tmp80) tmp82 = triton_helpers.maximum(tmp68, tmp81) tmp83 = tmp56 - tmp58 tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype) tmp85 = tl.where(tmp4, tmp83, tmp84) tmp86 = tmp62 - tmp64 tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype) tmp88 = tl.where(tmp12, tmp86, tmp87) tmp89 = tl.where(tmp4, tmp85, tmp88) tmp90 = tmp69 - tmp71 tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype) tmp92 = tl.where(tmp24, tmp90, tmp91) tmp93 = tmp75 - tmp77 tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype) tmp95 = tl.where(tmp31, tmp93, tmp94) tmp96 = tl.where(tmp24, tmp92, tmp95) tmp97 = triton_helpers.minimum(tmp89, tmp96) tmp100 = tmp98 + tmp99 tmp101 = tmp40 - tmp55 tmp102 = tmp100 - tmp101 tmp105 = tmp103 + tmp104 tmp106 = tmp82 - tmp97 tmp107 = tmp105 - tmp106 tmp108 = tmp102 * tmp107 tmp109 = 0.0 tmp110 = tmp108 < tmp109 tmp111 = tl.where(tmp110, tmp109, tmp108) tmp112 = tmp98 * tmp103 tmp113 = tmp99 * tmp104 tmp114 = tmp112 + tmp113 tmp115 = tmp114 - tmp111 tmp116 = tmp111 / tmp115 tmp117 = 1.0 tmp118 = tmp117 - tmp116 tmp121 = tmp119 - tmp120 tmp122 = tmp121 * tmp121 tmp125 = tmp123 - tmp124 tmp126 = tmp125 * tmp125 tmp127 = tmp122 + tmp126 tmp128 = tmp101 * tmp106 tmp129 = tmp128 * tmp128 tmp130 = tmp127 / tmp129 tmp131 = tmp118 + tmp130 tmp132 = tmp131 < tmp109 tmp133 = tl.where(tmp132, tmp109, tmp131) tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK]) tmp136 = tl.sum(tmp134, 1)[:, None] tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp136, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf6 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0[ grid(1)](arg0_1, arg1_1, buf6, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf6, def Interction_Union(outputs, targets): width_o = outputs[:, 2] width_t = targets[:, 2] height_o = outputs[:, 3] height_t = targets[:, 3] x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2, targets[:, 0] + targets[:, 2] / 2), 1), 1)[0] x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2, targets[:, 0] - targets[:, 2] / 2), 1), 1)[0] y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2, targets[:, 1] + targets[:, 3] / 2), 1), 1)[0] y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2, targets[:, 1] - targets[:, 3] / 2), 1), 1)[0] Area_o = torch.mul(width_o, height_o) Area_t = torch.mul(width_t, height_t) Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min)) Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min)) Inter = torch.mul(Inter_w, Inter_t) zeros = torch.zeros_like(Inter) Inter = torch.where(Inter < 0, zeros, Inter) Union = torch.add(Area_o, Area_t).sub(Inter) return Inter, Union, x_max, x_min, y_max, y_min def Center_points(outputs, targets): x_o = outputs[:, 0] y_o = outputs[:, 1] x_t = targets[:, 0] y_t = targets[:, 1] return x_o, y_o, x_t, y_t class DIoU_lossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
debrouchovea/ReproduceGoturn
DIoU_loss
false
3,422
[ "MIT" ]
0
d60f13c781ca612cacc17536530bbee989bdfa45
https://github.com/debrouchovea/ReproduceGoturn/tree/d60f13c781ca612cacc17536530bbee989bdfa45
ContrastiveLoss
import torch import torch.nn as nn class ContrastiveLoss(nn.Module): def __init__(self, margin=0.2): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, imgs, caps): scores = torch.mm(imgs, caps.t()) diag = scores.diag() cost_s = torch.clamp((self.margin - diag).expand_as(scores) + scores, min=0) cost_im = torch.clamp((self.margin - diag.view(-1, 1)).expand_as( scores) + scores, min=0) diag_s = torch.diag(cost_s.diag()) diag_im = torch.diag(cost_im.diag()) cost_s = cost_s - diag_s cost_im = cost_im - diag_im return cost_s.sum() + cost_im.sum() def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_clamp_diag_embed_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + r2, None) tmp17 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last') tmp1 = 0.2 tmp2 = tmp1 - tmp0 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = r0 tmp8 = r1 tmp9 = tmp7 == tmp8 tmp10 = tmp2 + tmp0 tmp11 = triton_helpers.maximum(tmp10, tmp5) tmp12 = tl.where(tmp9, tmp11, tmp5) tmp13 = tmp6 - tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp18 = tmp1 - tmp17 tmp19 = tmp18 + tmp3 tmp20 = triton_helpers.maximum(tmp19, tmp5) tmp21 = tmp20 - tmp12 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = tmp16 + tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_clamp_diag_embed_sub_sum_0[grid(1)](buf3, buf0, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf3, class ContrastiveLossNew(nn.Module): def __init__(self, margin=0.2): super(ContrastiveLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
debayan/dsve-loc
ContrastiveLoss
false
3,423
[ "BSD-3-Clause-Clear" ]
0
21b1e1837668b6daa0881514d0756e9bec039fcb
https://github.com/debayan/dsve-loc/tree/21b1e1837668b6daa0881514d0756e9bec039fcb
SimpleFloorModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleFloorModule(torch.nn.Module): def forward(self, a, b): c = a + b return torch.floor(c) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_floor_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_floor_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleFloorModuleNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
andreas-hommel/glow
SimpleFloorModule
false
3,424
[ "Apache-2.0" ]
0
2bbbf8188a2a941e85677c83f2146bbd076a262e
https://github.com/andreas-hommel/glow/tree/2bbbf8188a2a941e85677c83f2146bbd076a262e
biaffine_mapping
import torch import torch.nn as nn import torch.utils.data.dataloader import torch.nn class biaffine_mapping(nn.Module): def __init__(self, input_size_x, input_size_y, output_size, bias_x, bias_y, initializer=None): super(biaffine_mapping, self).__init__() self.bias_x = bias_x self.bias_y = bias_y self.output_size = output_size self.initilizer = None if self.bias_x: input_size1 = input_size_x + 1 input_size2 = input_size_y + 1 self.biaffine_map = nn.Parameter(torch.Tensor(input_size1, output_size, input_size2)) self.initialize() def initialize(self): if self.initilizer is None: torch.nn.init.orthogonal_(self.biaffine_map) else: self.initilizer(self.biaffine_map) def forward(self, x, y): batch_size, bucket_size = x.shape[0], x.shape[1] if self.bias_x: x = torch.cat([x, torch.ones([batch_size, bucket_size, 1], device=x.device)], axis=2) if self.bias_y: y = torch.cat([y, torch.ones([batch_size, bucket_size, 1], device=y.device)], axis=2) x_set_size, y_set_size = x.shape[-1], y.shape[-1] x = x.reshape(-1, x_set_size) biaffine_map = self.biaffine_map.reshape(x_set_size, -1) biaffine_mapping = torch.matmul(x, biaffine_map).reshape(batch_size, -1, y_set_size) biaffine_mapping = biaffine_mapping.bmm(torch.transpose(y, 1, 2) ).reshape(batch_size, bucket_size, self.output_size, bucket_size) biaffine_mapping = biaffine_mapping.transpose(2, 3) return biaffine_mapping def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size_x': 4, 'input_size_y': 4, 'output_size': 4, 'bias_x': 4, 'bias_y': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (5, 4, 5), (20, 5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_view_0[grid(80)](primals_1, buf0, 80, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 20), (20, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (5, 20), (20, 1), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused_cat_view_0[grid(80)](primals_2, buf2, 80, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 5), (80, 5, 1), 0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), out=buf3) del buf1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 1, 4), 0 ), buf2, reinterpret_tensor(buf0, (5, 16), (1, 5), 0) class biaffine_mappingNew(nn.Module): def __init__(self, input_size_x, input_size_y, output_size, bias_x, bias_y, initializer=None): super(biaffine_mappingNew, self).__init__() self.bias_x = bias_x self.bias_y = bias_y self.output_size = output_size self.initilizer = None if self.bias_x: input_size1 = input_size_x + 1 input_size2 = input_size_y + 1 self.biaffine_map = nn.Parameter(torch.Tensor(input_size1, output_size, input_size2)) self.initialize() def initialize(self): if self.initilizer is None: torch.nn.init.orthogonal_(self.biaffine_map) else: self.initilizer(self.biaffine_map) def forward(self, input_0, input_1): primals_3 = self.biaffine_map primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
ciaochiaociao/CLNER
biaffine_mapping
false
3,425
[ "MIT" ]
0
a31fb1c3bfdaa5d62147dc892489d29a85e6b385
https://github.com/ciaochiaociao/CLNER/tree/a31fb1c3bfdaa5d62147dc892489d29a85e6b385
MultiHeadAttention
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data class MultiHeadAttention(nn.Module): def __init__(self, channels, out_channels, n_heads, p_dropout=0.0, window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.p_dropout = p_dropout self.window_size = window_size self.heads_share = heads_share self.block_length = block_length self.proximal_bias = proximal_bias self.proximal_init = proximal_init self.attn = None self.k_channels = channels // n_heads self.conv_q = nn.Conv1d(channels, channels, 1) self.conv_k = nn.Conv1d(channels, channels, 1) self.conv_v = nn.Conv1d(channels, channels, 1) self.conv_o = nn.Conv1d(channels, out_channels, 1) self.drop = nn.Dropout(p_dropout) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) nn.init.xavier_uniform_(self.conv_q.weight) nn.init.xavier_uniform_(self.conv_k.weight) nn.init.xavier_uniform_(self.conv_v.weight) if proximal_init: with torch.no_grad(): self.conv_k.weight.copy_(self.conv_q.weight) self.conv_k.bias.copy_(self.conv_q.bias) def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query / math.sqrt(self.k_channels), key. transpose(-2, -1)) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query / math.sqrt( self.k_channels), key_relative_embeddings) scores_local = self._relative_position_to_absolute_position( rel_logits) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) if self.block_length is not None: assert t_s == t_t, 'Local attention is only available for self-attention.' block_mask = torch.ones_like(scores).triu(-self.block_length ).tril(self.block_length) scores = scores.masked_fill(block_mask == 0, -10000.0) p_attn = F.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): """ x: [b, h, l, m] y: [h or 1, m, d] ret: [b, h, l, d] """ ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): """ x: [b, h, l, d] y: [h or 1, m, d] ret: [b, h, l, m] """ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): 2 * self.window_size + 1 pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max(self.window_size + 1 - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad(relative_embeddings, commons .convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): """ x: [b, h, l, 2*l-1] ret: [b, h, l, l] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ 0, length - 1]])) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final def _absolute_position_to_relative_position(self, x): """ x: [b, h, l, l] ret: [b, h, l, 2*l-1] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ length, 0]])) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff) ), 0), 0) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'out_channels': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(64)](buf3, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf4 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = buf2 del buf2 triton_poi_fused_convolution_1[grid(64)](buf8, primals_8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4), (16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_1[grid(64)](buf11, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 return (buf11, buf7, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)) class MultiHeadAttentionNew(nn.Module): def __init__(self, channels, out_channels, n_heads, p_dropout=0.0, window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.p_dropout = p_dropout self.window_size = window_size self.heads_share = heads_share self.block_length = block_length self.proximal_bias = proximal_bias self.proximal_init = proximal_init self.attn = None self.k_channels = channels // n_heads self.conv_q = nn.Conv1d(channels, channels, 1) self.conv_k = nn.Conv1d(channels, channels, 1) self.conv_v = nn.Conv1d(channels, channels, 1) self.conv_o = nn.Conv1d(channels, out_channels, 1) self.drop = nn.Dropout(p_dropout) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) nn.init.xavier_uniform_(self.conv_q.weight) nn.init.xavier_uniform_(self.conv_k.weight) nn.init.xavier_uniform_(self.conv_v.weight) if proximal_init: with torch.no_grad(): self.conv_k.weight.copy_(self.conv_q.weight) self.conv_k.bias.copy_(self.conv_q.bias) def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query / math.sqrt(self.k_channels), key. transpose(-2, -1)) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query / math.sqrt( self.k_channels), key_relative_embeddings) scores_local = self._relative_position_to_absolute_position( rel_logits) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) if self.block_length is not None: assert t_s == t_t, 'Local attention is only available for self-attention.' block_mask = torch.ones_like(scores).triu(-self.block_length ).tril(self.block_length) scores = scores.masked_fill(block_mask == 0, -10000.0) p_attn = F.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): """ x: [b, h, l, m] y: [h or 1, m, d] ret: [b, h, l, d] """ ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): """ x: [b, h, l, d] y: [h or 1, m, d] ret: [b, h, l, m] """ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): 2 * self.window_size + 1 pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max(self.window_size + 1 - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad(relative_embeddings, commons .convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): """ x: [b, h, l, 2*l-1] ret: [b, h, l, l] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ 0, length - 1]])) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final def _absolute_position_to_relative_position(self, x): """ x: [b, h, l, l] ret: [b, h, l, 2*l-1] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ length, 0]])) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff) ), 0), 0) def forward(self, input_0, input_1): primals_1 = self.conv_q.weight primals_2 = self.conv_q.bias primals_4 = self.conv_k.weight primals_5 = self.conv_k.bias primals_7 = self.conv_v.weight primals_8 = self.conv_v.bias primals_9 = self.conv_o.weight primals_10 = self.conv_o.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
dimitrijejankov/vits
MultiHeadAttention
false
3,426
[ "MIT" ]
0
d2f6385c7946c2355433804796b541ffae0a3d9f
https://github.com/dimitrijejankov/vits/tree/d2f6385c7946c2355433804796b541ffae0a3d9f
ResBlock
import torch from torch import nn class ResBlock(nn.Module): def __init__(self, in_chans, out_chans, drop_prob, same='False'): super().__init__() self.in_chans = in_chans self.out_chans = out_chans self.drop_prob = drop_prob self.conv = nn.Conv2d(in_chans, out_chans, kernel_size=1) self.conv1 = nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1) self.in1 = nn.InstanceNorm2d(out_chans) self.relu = nn.PReLU() self.conv2 = nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1) self.in2 = nn.InstanceNorm2d(out_chans) self.same = same def forward(self, input): shortcuts = self.conv(input) if self.same == 'True': shortcuts = input out = self.conv1(input) out = self.in1(out) out = self.relu(out) out = self.conv2(out) out = self.in2(out) out += shortcuts out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_chans': 4, 'out_chans': 4, 'drop_prob': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + 0) tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp2 - tmp12 tmp25 = tmp24 * tmp23 tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp30 = tmp29 * tmp25 tmp31 = tl.where(tmp27, tmp25, tmp30) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp23, xmask) tl.store(out_ptr1 + (r2 + 16 * x3), tmp31, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_1( in_out_ptr0, in_out_ptr1, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp24 = tl.load(in_out_ptr2 + (r2 + 16 * x3), xmask, other=0.0) tmp25 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + 0) tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp26 = tmp24 + tmp25 tmp27 = tmp2 - tmp12 tmp28 = tmp27 * tmp23 tmp29 = tmp28 + tmp26 tmp30 = 0.0 tmp31 = tmp29 > tmp30 tmp34 = tmp33 * tmp29 tmp35 = tl.where(tmp31, tmp29, tmp34) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp23, xmask) tl.store(in_out_ptr2 + (r2 + 16 * x3), tmp26, xmask) tl.store(out_ptr1 + (r2 + 16 * x3), tmp35, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf7 = reinterpret_tensor(buf5, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf5 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_0[ grid(16)](buf3, buf7, primals_5, primals_6, buf4, buf8, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf14 = reinterpret_tensor(buf12, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf12 buf1 = buf0 del buf0 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit__prelu_kernel_convolution_1[ grid(16)](buf10, buf14, buf1, primals_8, primals_2, primals_6, buf11, buf15, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_8 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_7, buf1, buf3, buf4, buf7, buf8, buf10, buf11, buf14) class ResBlockNew(nn.Module): def __init__(self, in_chans, out_chans, drop_prob, same='False'): super().__init__() self.in_chans = in_chans self.out_chans = out_chans self.drop_prob = drop_prob self.conv = nn.Conv2d(in_chans, out_chans, kernel_size=1) self.conv1 = nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1) self.in1 = nn.InstanceNorm2d(out_chans) self.relu = nn.PReLU() self.conv2 = nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1) self.in2 = nn.InstanceNorm2d(out_chans) self.same = same def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.conv1.weight primals_5 = self.conv1.bias primals_6 = self.relu.weight primals_7 = self.conv2.weight primals_8 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
divelab/mri
ResBlock
false
3,427
[ "MIT" ]
0
e181b446acfc6f9ac3f42657f710dd583e77d1aa
https://github.com/divelab/mri/tree/e181b446acfc6f9ac3f42657f710dd583e77d1aa
MiniBatchDiscrimination
import torch import torch.nn as nn from torch.nn import init class MiniBatchDiscrimination(nn.Module): """ source: https://gist.github.com/t-ae/732f78671643de97bbe2c46519972491 paper: Salimans et al. 2016. Improved Methods for Training GANs """ def __init__(self, in_features, out_features, kernel_dims, mean=False): super().__init__() self.in_features = in_features self.out_features = out_features self.kernel_dims = kernel_dims self.mean = mean self.T = nn.Parameter(torch.Tensor(in_features, out_features, kernel_dims), requires_grad=True) init.normal_(self.T, 0, 1) def forward(self, x): matrices = x.mm(self.T.view(self.in_features, -1)) matrices = matrices.view(-1, self.out_features, self.kernel_dims) M = matrices.unsqueeze(0) M_T = M.permute(1, 0, 2, 3) norm = torch.abs(M - M_T).sum(3) expnorm = torch.exp(-norm) o_b = expnorm.sum(0) - 1 if self.mean: o_b /= x.size(0) - 1 x = torch.cat([x, o_b], 1) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4, 'kernel_dims': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_abs_exp_neg_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (16 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr0 + (17 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (18 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (19 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr0 + (32 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr0 + (33 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp46 = tl.load(in_ptr0 + (34 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (35 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp57 = tl.load(in_ptr0 + (48 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp60 = tl.load(in_ptr0 + (49 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp64 = tl.load(in_ptr0 + (50 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp68 = tl.load(in_ptr0 + (51 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl_math.abs(tmp16) tmp18 = tmp13 + tmp17 tmp19 = -tmp18 tmp20 = tl_math.exp(tmp19) tmp22 = tmp0 - tmp21 tmp23 = tl_math.abs(tmp22) tmp25 = tmp4 - tmp24 tmp26 = tl_math.abs(tmp25) tmp27 = tmp23 + tmp26 tmp29 = tmp9 - tmp28 tmp30 = tl_math.abs(tmp29) tmp31 = tmp27 + tmp30 tmp33 = tmp14 - tmp32 tmp34 = tl_math.abs(tmp33) tmp35 = tmp31 + tmp34 tmp36 = -tmp35 tmp37 = tl_math.exp(tmp36) tmp38 = tmp20 + tmp37 tmp40 = tmp0 - tmp39 tmp41 = tl_math.abs(tmp40) tmp43 = tmp4 - tmp42 tmp44 = tl_math.abs(tmp43) tmp45 = tmp41 + tmp44 tmp47 = tmp9 - tmp46 tmp48 = tl_math.abs(tmp47) tmp49 = tmp45 + tmp48 tmp51 = tmp14 - tmp50 tmp52 = tl_math.abs(tmp51) tmp53 = tmp49 + tmp52 tmp54 = -tmp53 tmp55 = tl_math.exp(tmp54) tmp56 = tmp38 + tmp55 tmp58 = tmp0 - tmp57 tmp59 = tl_math.abs(tmp58) tmp61 = tmp4 - tmp60 tmp62 = tl_math.abs(tmp61) tmp63 = tmp59 + tmp62 tmp65 = tmp9 - tmp64 tmp66 = tl_math.abs(tmp65) tmp67 = tmp63 + tmp66 tmp69 = tmp14 - tmp68 tmp70 = tl_math.abs(tmp69) tmp71 = tmp67 + tmp70 tmp72 = -tmp71 tmp73 = tl_math.exp(tmp72) tmp74 = tmp56 + tmp73 tl.store(out_ptr0 + x2, tmp74, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = 1.0 tmp11 = tmp9 - tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 16), (16, 1), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_exp_neg_sub_sum_0[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_1, buf1, buf2, 32, XBLOCK= 32, num_warps=1, num_stages=1) del buf1 return buf2, buf0, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class MiniBatchDiscriminationNew(nn.Module): """ source: https://gist.github.com/t-ae/732f78671643de97bbe2c46519972491 paper: Salimans et al. 2016. Improved Methods for Training GANs """ def __init__(self, in_features, out_features, kernel_dims, mean=False): super().__init__() self.in_features = in_features self.out_features = out_features self.kernel_dims = kernel_dims self.mean = mean self.T = nn.Parameter(torch.Tensor(in_features, out_features, kernel_dims), requires_grad=True) init.normal_(self.T, 0, 1) def forward(self, input_0): primals_2 = self.T primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
danielnflam/GAN-Tests
MiniBatchDiscrimination
false
3,428
[ "BSD-3-Clause" ]
0
f112e27b802d717f64a8f2cfa79b9898667da14c
https://github.com/danielnflam/GAN-Tests/tree/f112e27b802d717f64a8f2cfa79b9898667da14c
BetaMish
import torch import torch.nn as nn class BetaMish(nn.Module): def __init__(self): super().__init__() def forward(self, x): beta = 1.5 return x * torch.tanh(torch.log(torch.pow(1 + torch.exp(x), beta))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_log_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = 1.5 tmp5 = libdevice.pow(tmp3, tmp4) tmp6 = tl_math.log(tmp5) tmp7 = libdevice.tanh(tmp6) tmp8 = tmp0 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_log_mul_pow_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class BetaMishNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dcrmg/Efficient-Segmentation-Networks
BetaMish
false
3,429
[ "MIT" ]
0
e2f2d90d69e4e9af464678b0f02bc754c28f643d
https://github.com/dcrmg/Efficient-Segmentation-Networks/tree/e2f2d90d69e4e9af464678b0f02bc754c28f643d
BasicMotionEncoder
from _paritybench_helpers import _mock_config import torch import torch.nn.functional as F import torch.nn as nn class BasicMotionEncoder(nn.Module): def __init__(self, args): super(BasicMotionEncoder, self).__init__() self.args = args cor_planes = args.corr_levels * (2 * args.corr_radius + 1) self.convc1 = nn.Conv2d(cor_planes, 64, 1, padding=0) self.convc2 = nn.Conv2d(64, 64, 3, padding=1) self.convf1 = nn.Conv2d(2, 64, 7, padding=3) self.convf2 = nn.Conv2d(64, 64, 3, padding=1) self.conv = nn.Conv2d(64 + 64, 128 - 2, 3, padding=1) def forward(self, flow, corr): cor = F.relu(self.convc1(corr)) cor = F.relu(self.convc2(cor)) flo = F.relu(self.convf1(flow)) flo = F.relu(self.convf2(flo)) cor_flo = torch.cat([cor, flo], dim=1) out = F.relu(self.conv(cor_flo)) return torch.cat([out, flow], dim=1) def get_inputs(): return [torch.rand([4, 2, 64, 64]), torch.rand([4, 36, 64, 64])] def get_init_inputs(): return [[], {'args': _mock_config(corr_levels=4, corr_radius=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 144 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 36 y1 = yindex // 36 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 36 * x2 + 147456 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 2 y1 = yindex // 2 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 2 * x2 + 98 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 8 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 2 y1 = yindex // 2 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 2 * x2 + 8192 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16128 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr3 + (-64 + x0), tmp12, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x2, tmp21, None) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 128 x0 = xindex % 4096 x2 = xindex // 524288 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 126, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (126 * x0 + 516096 * x2 + x1), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (2 * x0 + 8192 * x2 + (-126 + x1)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 126 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (64, 36, 1, 1), (36, 1, 1, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 36, 64, 64), (147456, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 2, 7, 7), (98, 49, 7, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (4, 2, 64, 64), (8192, 4096, 64, 1)) assert_size_stride(primals_9, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_10, (64,), (1,)) assert_size_stride(primals_11, (126, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_12, (126,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 36, 64, 64), (147456, 1, 2304, 36), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(144, 4096)](primals_3, buf0, 144, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_1[grid(4096, 9)](primals_4, buf1, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((64, 2, 7, 7), (98, 1, 14, 2), torch.float32) triton_poi_fused_2[grid(128, 49)](primals_6, buf2, 128, 49, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_6 buf3 = empty_strided_cuda((4, 2, 64, 64), (8192, 1, 128, 2), torch. float32) triton_poi_fused_3[grid(8, 4096)](primals_8, buf3, 8, 4096, XBLOCK= 128, YBLOCK=8, num_warps=4, num_stages=1) del primals_8 buf4 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_1[grid(4096, 9)](primals_9, buf4, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_9 buf5 = empty_strided_cuda((126, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16128, 9)](primals_11, buf5, 16128, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_11 buf6 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_5[grid(1048576)](buf7, primals_2, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf8 = extern_kernels.convolution(buf7, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf9 = extern_kernels.convolution(buf3, buf2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_5[grid(1048576)](buf10, primals_7, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf12 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128), torch.float32) triton_poi_fused_cat_6[grid(2097152)](buf8, primals_5, buf11, primals_10, buf12, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf13 = extern_kernels.convolution(buf12, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 126, 64, 64), (516096, 1, 8064, 126)) buf14 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_7[grid(2097152)](buf13, primals_12, buf3, buf14, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf15 = empty_strided_cuda((4, 126, 64, 64), (516096, 1, 8064, 126), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(2064384)]( buf13, primals_12, buf15, 2064384, XBLOCK=1024, num_warps=4, num_stages=1) del buf13 del primals_12 buf16 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_9[grid(1048576)]( buf11, primals_10, buf16, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf11 del primals_10 buf17 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_9[grid(1048576)]( buf8, primals_5, buf17, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf8 del primals_5 return (buf14, primals_1, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf10, buf12, buf15, buf16, buf17) class BasicMotionEncoderNew(nn.Module): def __init__(self, args): super(BasicMotionEncoderNew, self).__init__() self.args = args cor_planes = args.corr_levels * (2 * args.corr_radius + 1) self.convc1 = nn.Conv2d(cor_planes, 64, 1, padding=0) self.convc2 = nn.Conv2d(64, 64, 3, padding=1) self.convf1 = nn.Conv2d(2, 64, 7, padding=3) self.convf2 = nn.Conv2d(64, 64, 3, padding=1) self.conv = nn.Conv2d(64 + 64, 128 - 2, 3, padding=1) def forward(self, input_0, input_1): primals_1 = self.convc1.weight primals_2 = self.convc1.bias primals_4 = self.convc2.weight primals_5 = self.convc2.bias primals_6 = self.convf1.weight primals_7 = self.convf1.bias primals_9 = self.convf2.weight primals_10 = self.convf2.bias primals_11 = self.conv.weight primals_12 = self.conv.bias primals_8 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
BrianPugh/RAFT-Stereo
BasicMotionEncoder
false
3,430
[ "MIT" ]
0
494dd79545411eee56e32540bfd6f45a16c74a19
https://github.com/BrianPugh/RAFT-Stereo/tree/494dd79545411eee56e32540bfd6f45a16c74a19
Critic
import torch import torch.nn as nn import torch.nn.functional as F class Encoder(nn.Module): """Encodes the static & dynamic states using 1d Convolution.""" def __init__(self, input_size, hidden_size): super(Encoder, self).__init__() self.conv = nn.Conv1d(input_size, hidden_size, kernel_size=1) def forward(self, input): output = self.conv(input) return output class Critic(nn.Module): """Estimates the problem complexity. This is a basic module that just looks at the log-probabilities predicted by the encoder + decoder, and returns an estimate of complexity """ def __init__(self, dynamic_size, hidden_size): super(Critic, self).__init__() self.dynamic_encoder = Encoder(dynamic_size, hidden_size) self.fc1 = nn.Conv1d(hidden_size, 20, kernel_size=1) self.fc2 = nn.Conv1d(20, 20, kernel_size=1) self.fc3 = nn.Conv1d(20, 1, kernel_size=1) for p in self.parameters(): if len(p.shape) > 1: nn.init.xavier_uniform_(p) def forward(self, dynamic): dynamic_hidden = self.dynamic_encoder(dynamic) output = F.relu(self.fc1(dynamic_hidden)) output = F.relu(self.fc2(output)) output = self.fc3(output).sum(dim=2) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dynamic_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 20 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp4 + tmp2 tmp6 = tmp3 + tmp5 tmp8 = tmp7 + tmp2 tmp9 = tmp6 + tmp8 tmp11 = tmp10 + tmp2 tmp12 = tmp9 + tmp11 tl.store(out_ptr0 + x0, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (20, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (20, 20, 1), (20, 1, 1)) assert_size_stride(primals_7, (20,), (1,)) assert_size_stride(primals_8, (1, 20, 1), (20, 1, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 20, 4), (80, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(320)](buf3, primals_5, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf4, (4, 20, 4), (80, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_1[grid(320)](buf5, primals_7, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf6, (4, 1, 4), (4, 4, 1)) buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused_convolution_sum_2[grid(4)](buf6, primals_9, buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf6 del primals_9 return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5) class Encoder(nn.Module): """Encodes the static & dynamic states using 1d Convolution.""" def __init__(self, input_size, hidden_size): super(Encoder, self).__init__() self.conv = nn.Conv1d(input_size, hidden_size, kernel_size=1) def forward(self, input): output = self.conv(input) return output class CriticNew(nn.Module): """Estimates the problem complexity. This is a basic module that just looks at the log-probabilities predicted by the encoder + decoder, and returns an estimate of complexity """ def __init__(self, dynamic_size, hidden_size): super(CriticNew, self).__init__() self.dynamic_encoder = Encoder(dynamic_size, hidden_size) self.fc1 = nn.Conv1d(hidden_size, 20, kernel_size=1) self.fc2 = nn.Conv1d(20, 20, kernel_size=1) self.fc3 = nn.Conv1d(20, 1, kernel_size=1) for p in self.parameters(): if len(p.shape) > 1: nn.init.xavier_uniform_(p) def forward(self, input_0): primals_1 = self.dynamic_encoder.conv.weight primals_2 = self.dynamic_encoder.conv.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
dimichai/City-Metro-Network-Expansion-with-RL
Critic
false
3,431
[ "MIT" ]
0
54cfec74d89b4e4fc912d480a3025e4c75e3b196
https://github.com/dimichai/City-Metro-Network-Expansion-with-RL/tree/54cfec74d89b4e4fc912d480a3025e4c75e3b196
ActNorm2D
import torch import torch.nn as nn from torch.nn import Parameter from torch.nn.parameter import Parameter class ActNorm2D(nn.Module): def __init__(self, num_channels, eps=1e-05): super(ActNorm2D, self).__init__() self.eps = eps self.num_channels = num_channels self._log_scale = Parameter(torch.Tensor(num_channels)) self._shift = Parameter(torch.Tensor(num_channels)) self._init = False def log_scale(self): return self._log_scale[None, :, None, None] def shift(self): return self._shift[None, :, None, None] def forward(self, x): if not self._init: with torch.no_grad(): assert self.num_channels == x.size(1) mean = torch.transpose(x, 0, 1).contiguous().view(self. num_channels, -1).mean(dim=1) zero_mean = x - mean[None, :, None, None] var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view( self.num_channels, -1).mean(dim=1) std = (var + self.eps) ** 0.5 log_scale = torch.log(1.0 / std) self._shift.data = -mean * torch.exp(log_scale) self._log_scale.data = log_scale self._init = True log_scale = self.log_scale() logdet = log_scale.sum() * x.size(2) * x.size(3) return x * torch.exp(log_scale) + self.shift(), logdet def inverse(self, x): return (x - self.shift()) * torch.exp(-self.log_scale()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.nn import Parameter from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mul_sum_0[grid(1)](buf2, primals_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_exp_mul_1[grid(256)](primals_2, primals_1, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf1, buf2, primals_1, primals_2 class ActNorm2DNew(nn.Module): def __init__(self, num_channels, eps=1e-05): super(ActNorm2DNew, self).__init__() self.eps = eps self.num_channels = num_channels self._log_scale = Parameter(torch.Tensor(num_channels)) self._shift = Parameter(torch.Tensor(num_channels)) self._init = False def log_scale(self): return self._log_scale[None, :, None, None] def shift(self): return self._shift[None, :, None, None] def inverse(self, x): return (x - self.shift()) * torch.exp(-self.log_scale()) def forward(self, input_0): primals_1 = self._log_scale primals_3 = self._shift primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
david-klindt/invertible-resnet
ActNorm2D
false
3,432
[ "MIT" ]
0
ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89
https://github.com/david-klindt/invertible-resnet/tree/ac6756a7ba5d0dbcb6b4cec43f8b86079318fd89
CombineContext
import torch from torch import nn class CombineContext(nn.Module): def __init__(self, num_features, num_context_features): super(CombineContext, self).__init__() self.linear = nn.Linear(num_features + num_context_features, num_features) def forward(self, token, prev_context_vector): x = torch.cat((token, prev_context_vector), 1) return self.linear(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_features': 4, 'num_context_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 return buf1, buf0 class CombineContextNew(nn.Module): def __init__(self, num_features, num_context_features): super(CombineContextNew, self).__init__() self.linear = nn.Linear(num_features + num_context_features, num_features) def forward(self, input_0, input_1): primals_3 = self.linear.weight primals_4 = self.linear.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
dmcinerney/Summarization
CombineContext
false
3,433
[ "Apache-2.0" ]
0
4d30900757308f7981a6544b4d6890f15133f269
https://github.com/dmcinerney/Summarization/tree/4d30900757308f7981a6544b4d6890f15133f269
FC_ELU
import torch from torch import nn class FC_ELU(nn.Module): def __init__(self, in_dim, hidden_units): super(FC_ELU, self).__init__() self.fc = nn.Linear(in_dim, hidden_units) self.elu = nn.ELU() def forward(self, x): out = self.fc(x) out = self.elu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'hidden_units': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class FC_ELUNew(nn.Module): def __init__(self, in_dim, hidden_units): super(FC_ELUNew, self).__init__() self.fc = nn.Linear(in_dim, hidden_units) self.elu = nn.ELU() def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
donaldo3/Neural-voice-cloning
FC_ELU
false
3,434
[ "MIT" ]
0
a67cb8d34f5674e2c613d131f18182ad56d8f32f
https://github.com/donaldo3/Neural-voice-cloning/tree/a67cb8d34f5674e2c613d131f18182ad56d8f32f
Backbone
import torch class Backbone(torch.nn.Module): def __init__(self, input_size=4, hidden_size=10, latent_size=2): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.latent_size = latent_size self.dense1 = torch.nn.Linear(self.input_size, self.hidden_size) self.dense2 = torch.nn.Linear(self.hidden_size, self.latent_size) self.activation = torch.nn.Sigmoid() def forward(self, x): x = self.activation(self.dense1(x)) out = self.activation(self.dense2(x)) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 10), (10, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(640)](buf1, primals_2, 640, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 2), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf2 triton_poi_fused_sigmoid_1[grid(128)](buf3, primals_5, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, primals_4 class BackboneNew(torch.nn.Module): def __init__(self, input_size=4, hidden_size=10, latent_size=2): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.latent_size = latent_size self.dense1 = torch.nn.Linear(self.input_size, self.hidden_size) self.dense2 = torch.nn.Linear(self.hidden_size, self.latent_size) self.activation = torch.nn.Sigmoid() def forward(self, input_0): primals_1 = self.dense1.weight primals_2 = self.dense1.bias primals_4 = self.dense2.weight primals_5 = self.dense2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dmoebius-dm/prototorch_models
Backbone
false
3,435
[ "MIT" ]
0
71602bf38a09148eab13d98c9f89589b345ac570
https://github.com/dmoebius-dm/prototorch_models/tree/71602bf38a09148eab13d98c9f89589b345ac570
RankScaledGaussianPrior
import torch def rank_scaled_gaussian(distances, lambd): order = torch.argsort(distances, dim=1) ranks = torch.argsort(order, dim=1) return torch.exp(-torch.exp(-ranks / lambd) * distances) class RankScaledGaussianPrior(torch.nn.Module): def __init__(self, lambd): super().__init__() self.lambd = lambd def forward(self, distances): return rank_scaled_gaussian(distances, self.lambd) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'lambd': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sort_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0) tmp1 = r2 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) _tmp5, tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=False) tmp7 = tmp6.to(tl.int64) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) _tmp9, tmp10 = triton_helpers.sort_with_index(tmp8, tmp4, None, 1, stable=False, descending=False) tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp6, xmask) tl.store(out_ptr1 + (x0 + 16 * r2 + 64 * x1), tmp10, xmask) @triton.jit def triton_poi_fused_div_exp_mul_neg_sort_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp8 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0.to(tl.int64) tmp2 = -tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = 0.25 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = -tmp6 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16) get_raw_stream(0) triton_per_fused_sort_0[grid(64)](arg0_1, buf1, buf3, 64, 4, XBLOCK =1, num_warps=2, num_stages=1) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_exp_mul_neg_sort_1[grid(256)](buf3, arg0_1, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf3 return buf4, def rank_scaled_gaussian(distances, lambd): order = torch.argsort(distances, dim=1) ranks = torch.argsort(order, dim=1) return torch.exp(-torch.exp(-ranks / lambd) * distances) class RankScaledGaussianPriorNew(torch.nn.Module): def __init__(self, lambd): super().__init__() self.lambd = lambd def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dmoebius-dm/prototorch_models
RankScaledGaussianPrior
false
3,436
[ "MIT" ]
0
71602bf38a09148eab13d98c9f89589b345ac570
https://github.com/dmoebius-dm/prototorch_models/tree/71602bf38a09148eab13d98c9f89589b345ac570
SimpleMultiheadAttention
import torch from torch import nn class SimpleMultiheadAttention(nn.Module): def __init__(self, d_x, d_attn, num_heads): super(SimpleMultiheadAttention, self).__init__() self.single_head_attn = nn.Linear(d_x, d_attn) self.multi_head_attn = nn.Linear(d_attn, num_heads) def forward(self, x): y = self.single_head_attn(x) nn.functional.relu(y) y = self.multi_head_attn(y) y = nn.functional.softmax(y) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_x': 4, 'd_attn': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, buf3, primals_4 class SimpleMultiheadAttentionNew(nn.Module): def __init__(self, d_x, d_attn, num_heads): super(SimpleMultiheadAttentionNew, self).__init__() self.single_head_attn = nn.Linear(d_x, d_attn) self.multi_head_attn = nn.Linear(d_attn, num_heads) def forward(self, input_0): primals_1 = self.single_head_attn.weight primals_2 = self.single_head_attn.bias primals_4 = self.multi_head_attn.weight primals_5 = self.multi_head_attn.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
donaldo3/Neural-voice-cloning
SimpleMultiheadAttention
false
3,437
[ "MIT" ]
0
a67cb8d34f5674e2c613d131f18182ad56d8f32f
https://github.com/donaldo3/Neural-voice-cloning/tree/a67cb8d34f5674e2c613d131f18182ad56d8f32f
UpSampleAndHalveChannels
import torch from torch import Tensor import torch.nn as nn class UpSampleAndHalveChannels(nn.Module): """ Doubles the spatial dimensions (H,W) but halves the number of channels. Inverse of the DownSample function in blocks.py From Diakogiannis et al. doi: 10.1016/j.isprsjprs.2020.01.013 """ def __init__(self, _in_channels, _factor=2): super().__init__() self.in_channels = _in_channels self.factor = _factor self.upSample = nn.Upsample(scale_factor=self.factor, mode= 'bilinear', align_corners=None) self.halveChannels = nn.Conv2d(in_channels=self.in_channels, out_channels=self.in_channels // self.factor, kernel_size=(1, 1 ), stride=1, padding=0, dilation=1, bias=False) def forward(self, x: 'Tensor') ->Tensor: out = self.upSample(x) out = self.halveChannels(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'_in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 3, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = x0 tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 + tmp2 tmp16 = tmp15 * tmp2 tmp17 = tmp16 - tmp2 tmp18 = triton_helpers.maximum(tmp17, tmp6) tmp19 = tmp18.to(tl.int32) tmp20 = tmp19 + tmp9 tmp21 = triton_helpers.minimum(tmp20, tmp11) tmp22 = tl.load(in_ptr0 + (tmp21 + 4 * tmp12 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (tmp19 + 4 * tmp12 + 16 * x2), xmask, eviction_policy='evict_last') tmp24 = tmp22 - tmp23 tmp25 = tmp19.to(tl.float32) tmp26 = tmp18 - tmp25 tmp27 = triton_helpers.maximum(tmp26, tmp6) tmp28 = 1.0 tmp29 = triton_helpers.minimum(tmp27, tmp28) tmp30 = tmp24 * tmp29 tmp31 = tmp23 + tmp30 tmp32 = tl.load(in_ptr0 + (tmp19 + 4 * tmp8 + 16 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp21 + 4 * tmp8 + 16 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp29 tmp36 = tmp32 + tmp35 tmp37 = tmp31 - tmp36 tmp38 = tmp8.to(tl.float32) tmp39 = tmp7 - tmp38 tmp40 = triton_helpers.maximum(tmp39, tmp6) tmp41 = triton_helpers.minimum(tmp40, tmp28) tmp42 = tmp37 * tmp41 tmp43 = tmp36 + tmp42 tl.store(in_out_ptr0 + x4, tmp43, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (1024)](buf2, primals_1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 2, 8, 8), (128, 64, 8, 1)) return buf3, primals_2, buf2 class UpSampleAndHalveChannelsNew(nn.Module): """ Doubles the spatial dimensions (H,W) but halves the number of channels. Inverse of the DownSample function in blocks.py From Diakogiannis et al. doi: 10.1016/j.isprsjprs.2020.01.013 """ def __init__(self, _in_channels, _factor=2): super().__init__() self.in_channels = _in_channels self.factor = _factor self.upSample = nn.Upsample(scale_factor=self.factor, mode= 'bilinear', align_corners=None) self.halveChannels = nn.Conv2d(in_channels=self.in_channels, out_channels=self.in_channels // self.factor, kernel_size=(1, 1 ), stride=1, padding=0, dilation=1, bias=False) def forward(self, input_0): primals_2 = self.halveChannels.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
danielnflam/GAN-Tests
UpSampleAndHalveChannels
false
3,438
[ "BSD-3-Clause" ]
0
f112e27b802d717f64a8f2cfa79b9898667da14c
https://github.com/danielnflam/GAN-Tests/tree/f112e27b802d717f64a8f2cfa79b9898667da14c
Fire
import math import torch import torch.nn as nn class Fire(nn.Module): def __init__(self, inplanes, squeeze_planes, expand_planes): super(Fire, self).__init__() self.conv1 = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1, stride=1) self.relu1 = nn.ELU(inplace=True) self.conv2 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=1, stride=1) self.conv3 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=3, stride=1, padding=1) self.relu2 = nn.ELU(inplace=True) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) def forward(self, x): x = self.conv1(x) x = self.relu1(x) out1 = self.conv2(x) out2 = self.conv3(x) out = torch.cat([out1, out2], 1) out = self.relu2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'squeeze_planes': 4, 'expand_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_cat_elu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr3 + (-4 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tmp19 = 0.0 tmp20 = tmp18 > tmp19 tmp21 = 1.0 tmp22 = tmp18 * tmp21 tmp23 = libdevice.expm1(tmp22) tmp24 = tmp23 * tmp21 tmp25 = tl.where(tmp20, tmp22, tmp24) tl.store(out_ptr0 + x3, tmp25, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_elu_1[grid(512)](buf2, primals_5, buf3, primals_7, buf4, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_5 del primals_7 return buf4, primals_1, primals_3, primals_4, primals_6, buf1, buf4 class FireNew(nn.Module): def __init__(self, inplanes, squeeze_planes, expand_planes): super(FireNew, self).__init__() self.conv1 = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1, stride=1) self.relu1 = nn.ELU(inplace=True) self.conv2 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=1, stride=1) self.conv3 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=3, stride=1, padding=1) self.relu2 = nn.ELU(inplace=True) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
dcrmg/Efficient-Segmentation-Networks
Fire
false
3,439
[ "MIT" ]
0
e2f2d90d69e4e9af464678b0f02bc754c28f643d
https://github.com/dcrmg/Efficient-Segmentation-Networks/tree/e2f2d90d69e4e9af464678b0f02bc754c28f643d
GenNoise
import torch import torch.nn as nn class GenNoise(nn.Module): def __init__(self, dim2): super(GenNoise, self).__init__() self.dim2 = dim2 def forward(self, input): a = list(input.size()) a[1] = self.dim2 b = torch.zeros(a).type_as(input.data) b.normal_() x = torch.autograd.Variable(b) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim2': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(256)](buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = torch.ops.aten.normal_functional.default(buf0) del buf0 buf2 = buf1 del buf1 return buf2, class GenNoiseNew(nn.Module): def __init__(self, dim2): super(GenNoiseNew, self).__init__() self.dim2 = dim2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dustlrdk/noise2self
GenNoise
false
3,440
[ "MIT" ]
0
46e8c4650f7ec4f664448417fecd39b4cae477f7
https://github.com/dustlrdk/noise2self/tree/46e8c4650f7ec4f664448417fecd39b4cae477f7
ParallelDilatedConv
import torch import torch.nn as nn class ParallelDilatedConv(nn.Module): def __init__(self, inplanes, planes): super(ParallelDilatedConv, self).__init__() self.dilated_conv_1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1) self.dilated_conv_2 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=2, dilation=2) self.dilated_conv_3 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=3, dilation=3) self.dilated_conv_4 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=4, dilation=4) self.relu1 = nn.ELU(inplace=True) self.relu2 = nn.ELU(inplace=True) self.relu3 = nn.ELU(inplace=True) self.relu4 = nn.ELU(inplace=True) def forward(self, x): out1 = self.dilated_conv_1(x) out2 = self.dilated_conv_2(x) out3 = self.dilated_conv_3(x) out4 = self.dilated_conv_4(x) out1 = self.relu1(out1) out2 = self.relu2(out2) out3 = self.relu3(out3) out4 = self.relu4(out4) out = out1 + out2 + out3 + out4 return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_convolution_elu_0(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_out_ptr2 + x3, xmask) tmp7 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_out_ptr3 + x3, xmask) tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp8 = tmp6 + tmp7 tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp2 > tmp12 tmp14 = 1.0 tmp15 = tmp2 * tmp14 tmp16 = libdevice.expm1(tmp15) tmp17 = tmp16 * tmp14 tmp18 = tl.where(tmp13, tmp15, tmp17) tmp19 = tmp5 > tmp12 tmp20 = tmp5 * tmp14 tmp21 = libdevice.expm1(tmp20) tmp22 = tmp21 * tmp14 tmp23 = tl.where(tmp19, tmp20, tmp22) tmp24 = tmp18 + tmp23 tmp25 = tmp8 > tmp12 tmp26 = tmp8 * tmp14 tmp27 = libdevice.expm1(tmp26) tmp28 = tmp27 * tmp14 tmp29 = tl.where(tmp25, tmp26, tmp28) tmp30 = tmp24 + tmp29 tmp31 = tmp11 > tmp12 tmp32 = tmp11 * tmp14 tmp33 = libdevice.expm1(tmp32) tmp34 = tmp33 * tmp14 tmp35 = tl.where(tmp31, tmp32, tmp34) tmp36 = tmp30 + tmp35 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(in_out_ptr1 + x3, tmp5, xmask) tl.store(in_out_ptr2 + x3, tmp8, xmask) tl.store(in_out_ptr3 + x3, tmp11, xmask) tl.store(out_ptr0 + x3, tmp36, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = extern_kernels.convolution(primals_3, primals_6, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = extern_kernels.convolution(primals_3, primals_8, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf3 = buf2 del buf2 buf5 = buf4 del buf4 buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_convolution_elu_0[grid(256)](buf1, buf3, buf5, buf7, primals_2, primals_5, primals_7, primals_9, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_5 del primals_7 del primals_9 return (buf8, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf7) class ParallelDilatedConvNew(nn.Module): def __init__(self, inplanes, planes): super(ParallelDilatedConvNew, self).__init__() self.dilated_conv_1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1) self.dilated_conv_2 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=2, dilation=2) self.dilated_conv_3 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=3, dilation=3) self.dilated_conv_4 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=4, dilation=4) self.relu1 = nn.ELU(inplace=True) self.relu2 = nn.ELU(inplace=True) self.relu3 = nn.ELU(inplace=True) self.relu4 = nn.ELU(inplace=True) def forward(self, input_0): primals_1 = self.dilated_conv_1.weight primals_2 = self.dilated_conv_1.bias primals_4 = self.dilated_conv_2.weight primals_5 = self.dilated_conv_2.bias primals_6 = self.dilated_conv_3.weight primals_7 = self.dilated_conv_3.bias primals_8 = self.dilated_conv_4.weight primals_9 = self.dilated_conv_4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
dcrmg/Efficient-Segmentation-Networks
ParallelDilatedConv
false
3,441
[ "MIT" ]
0
e2f2d90d69e4e9af464678b0f02bc754c28f643d
https://github.com/dcrmg/Efficient-Segmentation-Networks/tree/e2f2d90d69e4e9af464678b0f02bc754c28f643d
Tanh
import torch import torch.nn as nn class Tanh(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Tanh, self).__init__() def forward(self, x): return torch.tanh(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class TanhNew(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(TanhNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dustlrdk/noise2self
Tanh
false
3,442
[ "MIT" ]
0
46e8c4650f7ec4f664448417fecd39b4cae477f7
https://github.com/dustlrdk/noise2self/tree/46e8c4650f7ec4f664448417fecd39b4cae477f7
StdConv2d
import torch import torch.nn as nn import torch.utils import torch.nn.functional as F class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-10) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-10 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 64 * x0), tmp23, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_sqrt_sub_var_mean_0[grid(4)](buf3, primals_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_1[grid(16)](buf6, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf6, primals_1, primals_3, buf3, buf4 class StdConv2dNew(nn.Conv2d): def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dustasa/senior_software_HW
StdConv2d
false
3,443
[ "Apache-2.0" ]
0
767d1d7bbd5e7d7414c17fa14b92b942e53d84ed
https://github.com/dustasa/senior_software_HW/tree/767d1d7bbd5e7d7414c17fa14b92b942e53d84ed
StateCritic
import torch import torch.nn as nn import torch.nn.functional as F class Encoder(nn.Module): """Encodes the static & dynamic states using 1d Convolution.""" def __init__(self, input_size, hidden_size): super(Encoder, self).__init__() self.conv = nn.Conv1d(input_size, hidden_size, kernel_size=1) def forward(self, input): output = self.conv(input) return output class StateCritic(nn.Module): """Estimates the problem complexity. This is a basic module that just looks at the log-probabilities predicted by the encoder + decoder, and returns an estimate of complexity """ def __init__(self, static_size, dynamic_size, hidden_size): super(StateCritic, self).__init__() self.static_encoder = Encoder(static_size, hidden_size) self.dynamic_encoder = Encoder(dynamic_size, hidden_size) self.fc1 = nn.Conv1d(hidden_size * 2, 20, kernel_size=1) self.fc2 = nn.Conv1d(20, 20, kernel_size=1) self.fc3 = nn.Conv1d(20, 1, kernel_size=1) for p in self.parameters(): if len(p.shape) > 1: nn.init.xavier_uniform_(p) def forward(self, static, dynamic): static_hidden = self.static_encoder(static) dynamic_hidden = self.dynamic_encoder(dynamic) hidden = torch.cat((static_hidden, dynamic_hidden), 1) output = F.relu(self.fc1(hidden)) output = F.relu(self.fc2(output)) output = self.fc3(output).sum() return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'static_size': 4, 'dynamic_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp10 & xmask, other=0.0) tmp14 = tl.load(in_ptr3 + (-4 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 20 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_per_fused_convolution_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (20, 8, 1), (8, 1, 1)) assert_size_stride(primals_8, (20,), (1,)) assert_size_stride(primals_9, (20, 20, 1), (20, 1, 1)) assert_size_stride(primals_10, (20,), (1,)) assert_size_stride(primals_11, (1, 20, 1), (20, 1, 1)) assert_size_stride(primals_12, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](buf0, primals_2, buf1, primals_5, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_5 buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 20, 4), (80, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_1[grid(320)](buf4, primals_8, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf4, primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf5, (4, 20, 4), (80, 4, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_1[grid(320)](buf6, primals_10, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_10 buf7 = extern_kernels.convolution(buf6, primals_11, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf7, (4, 1, 4), (4, 4, 1)) buf8 = empty_strided_cuda((), (), torch.float32) triton_per_fused_convolution_sum_2[grid(1)](buf7, primals_12, buf8, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf7 del primals_12 return (buf8, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, primals_11, buf2, buf4, buf6) class Encoder(nn.Module): """Encodes the static & dynamic states using 1d Convolution.""" def __init__(self, input_size, hidden_size): super(Encoder, self).__init__() self.conv = nn.Conv1d(input_size, hidden_size, kernel_size=1) def forward(self, input): output = self.conv(input) return output class StateCriticNew(nn.Module): """Estimates the problem complexity. This is a basic module that just looks at the log-probabilities predicted by the encoder + decoder, and returns an estimate of complexity """ def __init__(self, static_size, dynamic_size, hidden_size): super(StateCriticNew, self).__init__() self.static_encoder = Encoder(static_size, hidden_size) self.dynamic_encoder = Encoder(dynamic_size, hidden_size) self.fc1 = nn.Conv1d(hidden_size * 2, 20, kernel_size=1) self.fc2 = nn.Conv1d(20, 20, kernel_size=1) self.fc3 = nn.Conv1d(20, 1, kernel_size=1) for p in self.parameters(): if len(p.shape) > 1: nn.init.xavier_uniform_(p) def forward(self, input_0, input_1): primals_1 = self.static_encoder.conv.weight primals_2 = self.static_encoder.conv.bias primals_4 = self.dynamic_encoder.conv.weight primals_5 = self.dynamic_encoder.conv.bias primals_7 = self.fc1.weight primals_8 = self.fc1.bias primals_9 = self.fc2.weight primals_10 = self.fc2.bias primals_11 = self.fc3.weight primals_12 = self.fc3.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
dimichai/City-Metro-Network-Expansion-with-RL
StateCritic
false
3,444
[ "MIT" ]
0
54cfec74d89b4e4fc912d480a3025e4c75e3b196
https://github.com/dimichai/City-Metro-Network-Expansion-with-RL/tree/54cfec74d89b4e4fc912d480a3025e4c75e3b196
SIREN_layer
import torch import numpy as np import torch.nn as nn def act(act_fun='LeakyReLU'): """ Either string defining an activation function or module (e.g. nn.ReLU) """ if isinstance(act_fun, str): if act_fun == 'LeakyReLU': return nn.LeakyReLU(0.2, inplace=True) elif act_fun == 'Swish': return Swish() elif act_fun[:3] == 'ELU': if len(act_fun) > 3: param = float(act_fun[3:]) return nn.ELU(param, inplace=True) return nn.ELU(inplace=True) elif act_fun == 'ReLU': return nn.ReLU() elif act_fun == 'tanh': return Tanh() elif act_fun == 'sine': return Sin() elif act_fun == 'soft': return nn.Softplus() elif act_fun == 'none': return nn.Sequential() else: assert False else: return act_fun() class Swish(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Swish, self).__init__() self.s = nn.Sigmoid() def forward(self, x): return x * self.s(x) class Tanh(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Tanh, self).__init__() def forward(self, x): return torch.tanh(x) class Sin(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Sin, self).__init__() def forward(self, x): return torch.sin(x) class SIREN_layer(nn.Module): def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30): super(SIREN_layer, self).__init__() self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias =True) self.act_fun = act(act_fun) self.omega_0 = omega_0 self.in_features = ch_in self.frist = frist self.init() def init(self): with torch.no_grad(): if self.frist: self.conv1.weight.uniform_(-1 / self.in_features, 1 / self. in_features) else: self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) / self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0) def forward(self, x): x = self.conv1(x) return self.act_fun(self.omega_0 * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ch_in': 4, 'ch_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_mul_sin_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 30.0 tmp4 = tmp2 * tmp3 tmp5 = tl_math.sin(tmp4) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_mul_sin_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, buf1 def act(act_fun='LeakyReLU'): """ Either string defining an activation function or module (e.g. nn.ReLU) """ if isinstance(act_fun, str): if act_fun == 'LeakyReLU': return nn.LeakyReLU(0.2, inplace=True) elif act_fun == 'Swish': return Swish() elif act_fun[:3] == 'ELU': if len(act_fun) > 3: param = float(act_fun[3:]) return nn.ELU(param, inplace=True) return nn.ELU(inplace=True) elif act_fun == 'ReLU': return nn.ReLU() elif act_fun == 'tanh': return Tanh() elif act_fun == 'sine': return Sin() elif act_fun == 'soft': return nn.Softplus() elif act_fun == 'none': return nn.Sequential() else: assert False else: return act_fun() class Swish(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Swish, self).__init__() self.s = nn.Sigmoid() def forward(self, x): return x * self.s(x) class Tanh(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Tanh, self).__init__() def forward(self, x): return torch.tanh(x) class Sin(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Sin, self).__init__() def forward(self, x): return torch.sin(x) class SIREN_layerNew(nn.Module): def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30): super(SIREN_layerNew, self).__init__() self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias =True) self.act_fun = act(act_fun) self.omega_0 = omega_0 self.in_features = ch_in self.frist = frist self.init() def init(self): with torch.no_grad(): if self.frist: self.conv1.weight.uniform_(-1 / self.in_features, 1 / self. in_features) else: self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) / self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dustlrdk/noise2self
SIREN_layer
false
3,445
[ "MIT" ]
0
46e8c4650f7ec4f664448417fecd39b4cae477f7
https://github.com/dustlrdk/noise2self/tree/46e8c4650f7ec4f664448417fecd39b4cae477f7
Net
import torch import torch.nn as nn import torch.utils import torch.nn.functional as F class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(16, 8, kernel_size=3, padding=1) self.fc1 = nn.Linear(8 * 8 * 8, 32) self.fc2 = nn.Linear(32, 2) def forward(self, x): out = F.max_pool2d(torch.tanh(self.conv1(x)), 2) out = F.max_pool2d(torch.tanh(self.conv2(out)), 2) out = out.view(-1, 8 * 8 * 8) out = torch.tanh(self.fc1(out)) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 8 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (8, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (32, 512), (512, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (2, 32), (32, 1)) assert_size_stride(primals_9, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_tanh_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf1, buf2, buf3, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 32, 32), (8192, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_tanh_2[grid(32768)](buf5, primals_5, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch .int8) buf7 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch .float32) triton_poi_fused_max_pool2d_with_indices_3[grid(8192)](buf5, buf6, buf7, 8192, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (16, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 32), (1, 512), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_tanh_4[grid(512)](buf9, primals_7, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (16, 512), (512, 1), 0), buf9, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(16, 8, kernel_size=3, padding=1) self.fc1 = nn.Linear(8 * 8 * 8, 32) self.fc2 = nn.Linear(32, 2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
dustasa/senior_software_HW
Net
false
3,446
[ "Apache-2.0" ]
0
767d1d7bbd5e7d7414c17fa14b92b942e53d84ed
https://github.com/dustasa/senior_software_HW/tree/767d1d7bbd5e7d7414c17fa14b92b942e53d84ed
GE2ELoss
import torch import torch.nn.functional as F import torch.nn as nn def calc_loss(sim_matrix): same_idx = list(range(sim_matrix.size(0))) pos = sim_matrix[same_idx, :, same_idx] neg = (torch.exp(sim_matrix).sum(dim=2) + 1e-06).log_() per_embedding_loss = -1 * (pos - neg) loss = per_embedding_loss.sum() return loss, per_embedding_loss def get_centroids(embeddings): centroids = embeddings.mean(dim=1) return centroids def get_utterance_centroids(embeddings): """ Returns the centroids for each utterance of a speaker, where the utterance centroid is the speaker centroid without considering this utterance Shape of embeddings should be: (speaker_ct, utterance_per_speaker_ct, embedding_size) """ sum_centroids = embeddings.sum(dim=1) sum_centroids = sum_centroids.reshape(sum_centroids.shape[0], 1, sum_centroids.shape[-1]) num_utterances = embeddings.shape[1] - 1 centroids = (sum_centroids - embeddings) / num_utterances return centroids def get_cossim(embeddings, centroids): num_utterances = embeddings.shape[1] utterance_centroids = get_utterance_centroids(embeddings) utterance_centroids_flat = utterance_centroids.view(utterance_centroids .shape[0] * utterance_centroids.shape[1], -1) embeddings_flat = embeddings.view(embeddings.shape[0] * num_utterances, -1) cos_same = F.cosine_similarity(embeddings_flat, utterance_centroids_flat) centroids_expand = centroids.repeat((num_utterances * embeddings.shape[ 0], 1)) embeddings_expand = embeddings_flat.unsqueeze(1).repeat(1, embeddings. shape[0], 1) embeddings_expand = embeddings_expand.view(embeddings_expand.shape[0] * embeddings_expand.shape[1], embeddings_expand.shape[-1]) cos_diff = F.cosine_similarity(embeddings_expand, centroids_expand) cos_diff = cos_diff.view(embeddings.size(0), num_utterances, centroids. size(0)) same_idx = list(range(embeddings.size(0))) cos_diff[same_idx, :, same_idx] = cos_same.view(embeddings.shape[0], num_utterances) cos_diff = cos_diff + 1e-06 return cos_diff class GE2ELoss(nn.Module): def __init__(self, device): super(GE2ELoss, self).__init__() self.w = nn.Parameter(torch.tensor(10.0), requires_grad=True) self.b = nn.Parameter(torch.tensor(-5.0), requires_grad=True) self.device = device def forward(self, embeddings): torch.clamp(self.w, 1e-06) centroids = get_centroids(embeddings) cossim = get_cossim(embeddings, centroids) sim_matrix = self.w * cossim + self.b loss, _ = calc_loss(sim_matrix) return loss def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 - tmp7 tmp9 = 0.3333333333333333 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + x2, xmask) tmp17 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_linalg_vector_norm_mean_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * (x0 % 4), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr0 + (4 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (12 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (1 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (5 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (9 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (13 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (2 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (6 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (10 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (14 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (3 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr0 + (7 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr0 + (11 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr0 + (15 + 16 * (x0 % 4)), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp8 * tmp8 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tmp16 / tmp7 tmp18 = tmp17 * tmp17 tmp19 = tmp9 + tmp18 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp26 = tmp24 + tmp25 tmp27 = tmp26 / tmp7 tmp28 = tmp27 * tmp27 tmp29 = tmp19 + tmp28 tmp32 = tmp30 + tmp31 tmp34 = tmp32 + tmp33 tmp36 = tmp34 + tmp35 tmp37 = tmp36 / tmp7 tmp38 = tmp37 * tmp37 tmp39 = tmp29 + tmp38 tl.store(out_ptr0 + x0, tmp39, xmask) @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3(in_ptr0 , in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4)), xmask) tmp1 = tl.load(in_ptr0 + 4 * (x1 // 4), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (1 + 4 * (x1 // 4)), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * (x1 // 4)), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * (x1 // 4)), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (x0 + 16 * (x1 % 4)), xmask) tmp17 = tl.load(in_ptr0 + (4 + x0 + 16 * (x1 % 4)), xmask) tmp19 = tl.load(in_ptr0 + (8 + x0 + 16 * (x1 % 4)), xmask) tmp21 = tl.load(in_ptr0 + (12 + x0 + 16 * (x1 % 4)), xmask) tmp25 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = 4.0 tmp24 = tmp22 / tmp23 tmp26 = libdevice.sqrt(tmp25) tmp27 = triton_helpers.maximum(tmp26, tmp13) tmp28 = tmp24 / tmp27 tmp29 = tmp15 * tmp28 tl.store(out_ptr0 + x2, tmp29, xmask) @triton.jit def triton_poi_fused_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_index_put_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex x0 = xindex % 4 tmp11 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp0 = x1 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tl.store(out_ptr0 + (4 * x0 + 17 * tmp10), tmp17, xmask) @triton.jit def triton_per_fused_add_exp_index_log_mul_sub_sum_6(in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 r0 = rindex % 4 r2 = rindex tmp11 = tl.load(in_ptr0 + 0) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp17 = tl.load(in_ptr2 + 0) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.load(in_ptr1 + 4 * r2, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (1 + 4 * r2), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (2 + 4 * r2), None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr1 + (3 + 4 * r2), None, eviction_policy='evict_last') tmp0 = r1 tmp1 = tl.full([1, 1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1, 1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1, 1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1, 1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tmp13 = tl.load(in_ptr1 + (4 * r0 + 17 * tmp10), None, eviction_policy= 'evict_last') tmp14 = 1e-06 tmp15 = tmp13 + tmp14 tmp16 = tmp12 * tmp15 tmp19 = tmp16 + tmp18 tmp21 = tmp20 + tmp14 tmp22 = tmp12 * tmp21 tmp23 = tmp22 + tmp18 tmp24 = tl_math.exp(tmp23) tmp26 = tmp25 + tmp14 tmp27 = tmp12 * tmp26 tmp28 = tmp27 + tmp18 tmp29 = tl_math.exp(tmp28) tmp30 = tmp24 + tmp29 tmp32 = tmp31 + tmp14 tmp33 = tmp12 * tmp32 tmp34 = tmp33 + tmp18 tmp35 = tl_math.exp(tmp34) tmp36 = tmp30 + tmp35 tmp38 = tmp37 + tmp14 tmp39 = tmp12 * tmp38 tmp40 = tmp39 + tmp18 tmp41 = tl_math.exp(tmp40) tmp42 = tmp36 + tmp41 tmp43 = tmp42 + tmp14 tmp44 = tl_math.log(tmp43) tmp45 = tmp19 - tmp44 tmp46 = -1.0 tmp47 = tmp45 * tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.sum(tmp48, 1)[:, None] tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp50, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1[grid(64)]( primals_2, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (64, 1), (1, 64), 0) del buf0 triton_poi_fused_linalg_vector_norm_mean_repeat_2[grid(64)](primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_clamp_min_div_linalg_vector_norm_mean_mul_repeat_3[ grid(256)](primals_2, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf2, (64,), (1,), 0) del buf2 triton_poi_fused_sum_4[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 triton_poi_fused_index_put_5[grid(16)](buf1, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 buf7 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_exp_index_log_mul_sub_sum_6[grid(1)](primals_1, buf4, primals_3, buf7, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) return buf7, primals_1, primals_3, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) def calc_loss(sim_matrix): same_idx = list(range(sim_matrix.size(0))) pos = sim_matrix[same_idx, :, same_idx] neg = (torch.exp(sim_matrix).sum(dim=2) + 1e-06).log_() per_embedding_loss = -1 * (pos - neg) loss = per_embedding_loss.sum() return loss, per_embedding_loss def get_centroids(embeddings): centroids = embeddings.mean(dim=1) return centroids def get_utterance_centroids(embeddings): """ Returns the centroids for each utterance of a speaker, where the utterance centroid is the speaker centroid without considering this utterance Shape of embeddings should be: (speaker_ct, utterance_per_speaker_ct, embedding_size) """ sum_centroids = embeddings.sum(dim=1) sum_centroids = sum_centroids.reshape(sum_centroids.shape[0], 1, sum_centroids.shape[-1]) num_utterances = embeddings.shape[1] - 1 centroids = (sum_centroids - embeddings) / num_utterances return centroids def get_cossim(embeddings, centroids): num_utterances = embeddings.shape[1] utterance_centroids = get_utterance_centroids(embeddings) utterance_centroids_flat = utterance_centroids.view(utterance_centroids .shape[0] * utterance_centroids.shape[1], -1) embeddings_flat = embeddings.view(embeddings.shape[0] * num_utterances, -1) cos_same = F.cosine_similarity(embeddings_flat, utterance_centroids_flat) centroids_expand = centroids.repeat((num_utterances * embeddings.shape[ 0], 1)) embeddings_expand = embeddings_flat.unsqueeze(1).repeat(1, embeddings. shape[0], 1) embeddings_expand = embeddings_expand.view(embeddings_expand.shape[0] * embeddings_expand.shape[1], embeddings_expand.shape[-1]) cos_diff = F.cosine_similarity(embeddings_expand, centroids_expand) cos_diff = cos_diff.view(embeddings.size(0), num_utterances, centroids. size(0)) same_idx = list(range(embeddings.size(0))) cos_diff[same_idx, :, same_idx] = cos_same.view(embeddings.shape[0], num_utterances) cos_diff = cos_diff + 1e-06 return cos_diff class GE2ELossNew(nn.Module): def __init__(self, device): super(GE2ELossNew, self).__init__() self.w = nn.Parameter(torch.tensor(10.0), requires_grad=True) self.b = nn.Parameter(torch.tensor(-5.0), requires_grad=True) self.device = device def forward(self, input_0): primals_1 = self.w primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dodo0822/PyTorch_Speaker_Verification
GE2ELoss
false
3,447
[ "BSD-3-Clause" ]
0
5310f441894e77895de27380d31149629e309d0f
https://github.com/dodo0822/PyTorch_Speaker_Verification/tree/5310f441894e77895de27380d31149629e309d0f
ReGLU
import torch import torch.nn as nn class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class ReGLU(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.ReLU( ), True, False, False, False) def forward(self, x: 'torch.Tensor'): return self.ffn(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class ReGLUNew(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.ReLU( ), True, False, False, False) def forward(self, input_0): primals_1 = self.ffn.layer1.weight primals_3 = self.ffn.layer2.weight primals_4 = self.ffn.linear_v.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
edchengmoore/pytorch_tabular
ReGLU
false
3,448
[ "MIT" ]
0
25f87089fbed95b46f2a1a8a96fba1f581aa8af1
https://github.com/edchengmoore/pytorch_tabular/tree/25f87089fbed95b46f2a1a8a96fba1f581aa8af1
SwiGLU
import torch import torch.nn as nn class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class SwiGLU(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.SiLU( ), True, False, False, False) def forward(self, x: 'torch.Tensor'): return self.ffn(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_silu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_silu_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class SwiGLUNew(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.SiLU( ), True, False, False, False) def forward(self, input_0): primals_1 = self.ffn.layer1.weight primals_3 = self.ffn.layer2.weight primals_4 = self.ffn.linear_v.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
edchengmoore/pytorch_tabular
SwiGLU
false
3,449
[ "MIT" ]
0
25f87089fbed95b46f2a1a8a96fba1f581aa8af1
https://github.com/edchengmoore/pytorch_tabular/tree/25f87089fbed95b46f2a1a8a96fba1f581aa8af1
NetDepth
import torch import torch.nn as nn import torch.utils import torch.nn.functional as F class NetDepth(nn.Module): def __init__(self, n_chans1=32): super().__init__() self.n_chans1 = n_chans1 self.conv1 = nn.Conv2d(3, n_chans1, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(n_chans1, n_chans1 // 2, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(n_chans1 // 2, n_chans1 // 2, kernel_size=3, padding=1) self.fc1 = nn.Linear(4 * 4 * n_chans1 // 2, 32) self.fc2 = nn.Linear(32, 2) def forward(self, x): out = F.max_pool2d(torch.relu(self.conv1(x)), 2) out = F.max_pool2d(torch.relu(self.conv2(out)), 2) out = F.max_pool2d(torch.relu(self.conv3(out)), 2) out = out.view(-1, 4 * 4 * self.n_chans1 // 2) out = torch.relu(self.fc1(out)) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (32, 256), (256, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (2, 32), (32, 1)) assert_size_stride(primals_11, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2, buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(65536)](buf5, primals_5, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf5, buf6, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8) buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_5[grid(4096)](buf9, buf10, buf11, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (16, 256), (256, 1), 0), reinterpret_tensor(primals_8, (256, 32), (1, 256), 0), out=buf12) buf13 = buf12 del buf12 triton_poi_fused_relu_6[grid(512)](buf13, primals_9, 512, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf14 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_11, buf13, reinterpret_tensor( primals_10, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf14) del primals_11 return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16, 256), (256, 1), 0), buf13, primals_10, primals_8) class NetDepthNew(nn.Module): def __init__(self, n_chans1=32): super().__init__() self.n_chans1 = n_chans1 self.conv1 = nn.Conv2d(3, n_chans1, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(n_chans1, n_chans1 // 2, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(n_chans1 // 2, n_chans1 // 2, kernel_size=3, padding=1) self.fc1 = nn.Linear(4 * 4 * n_chans1 // 2, 32) self.fc2 = nn.Linear(32, 2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
dustasa/senior_software_HW
NetDepth
false
3,450
[ "Apache-2.0" ]
0
767d1d7bbd5e7d7414c17fa14b92b942e53d84ed
https://github.com/dustasa/senior_software_HW/tree/767d1d7bbd5e7d7414c17fa14b92b942e53d84ed
NetWidth
import torch import torch.nn as nn import torch.utils import torch.nn.functional as F class NetWidth(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(32, 16, kernel_size=3, padding=1) self.fc1 = nn.Linear(16 * 8 * 8, 32) self.fc2 = nn.Linear(32, 2) def forward(self, x): out = F.max_pool2d(torch.tanh(self.conv1(x)), 2) out = F.max_pool2d(torch.tanh(self.conv2(out)), 2) out = out.view(-1, 16 * 8 * 8) out = torch.tanh(self.fc1(out)) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (32, 1024), (1024, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (2, 32), (32, 1)) assert_size_stride(primals_9, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_tanh_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2, buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_tanh_2[grid(65536)](buf5, primals_5, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf5, buf6, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0 ), reinterpret_tensor(primals_6, (1024, 32), (1, 1024), 0), out =buf8) buf9 = buf8 del buf8 triton_poi_fused_tanh_4[grid(512)](buf9, primals_7, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (16, 1024), (1024, 1), 0), buf9, primals_8, primals_6) class NetWidthNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(32, 16, kernel_size=3, padding=1) self.fc1 = nn.Linear(16 * 8 * 8, 32) self.fc2 = nn.Linear(32, 2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
dustasa/senior_software_HW
NetWidth
false
3,451
[ "Apache-2.0" ]
0
767d1d7bbd5e7d7414c17fa14b92b942e53d84ed
https://github.com/dustasa/senior_software_HW/tree/767d1d7bbd5e7d7414c17fa14b92b942e53d84ed
NetRes
import torch import torch.nn as nn import torch.utils import torch.nn.functional as F class NetRes(nn.Module): def __init__(self, n_chans1=32): super().__init__() self.n_chans1 = n_chans1 self.conv1 = nn.Conv2d(3, n_chans1, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(n_chans1, n_chans1 // 2, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(n_chans1 // 2, n_chans1 // 2, kernel_size=3, padding=1) self.fc1 = nn.Linear(4 * 4 * n_chans1 // 2, 32) self.fc2 = nn.Linear(32, 2) def forward(self, x): out = F.max_pool2d(torch.relu(self.conv1(x)), 2) out = F.max_pool2d(torch.relu(self.conv2(out)), 2) out1 = out out = F.max_pool2d(torch.relu(self.conv3(out)) + out1, 2) out = out.view(-1, 4 * 4 * self.n_chans1 // 2) out = torch.relu(self.fc1(out)) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 16 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, None) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = tmp4 <= tmp7 tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp8, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (32, 256), (256, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (2, 32), (32, 1)) assert_size_stride(primals_11, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2, buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(65536)](buf5, primals_5, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf5, buf6, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1)) buf9 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32) buf15 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_4[grid(16384) ](buf8, primals_7, buf6, buf9, buf15, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf8 del primals_7 buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8) buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_5[grid(4096)](buf9, buf10, buf11, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (16, 256), (256, 1), 0), reinterpret_tensor(primals_8, (256, 32), (1, 256), 0), out=buf12) buf13 = buf12 del buf12 triton_poi_fused_relu_6[grid(512)](buf13, primals_9, 512, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf14 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_11, buf13, reinterpret_tensor( primals_10, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf14) del primals_11 return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16, 256), (256, 1), 0), buf13, primals_10, primals_8, buf15) class NetResNew(nn.Module): def __init__(self, n_chans1=32): super().__init__() self.n_chans1 = n_chans1 self.conv1 = nn.Conv2d(3, n_chans1, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(n_chans1, n_chans1 // 2, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(n_chans1 // 2, n_chans1 // 2, kernel_size=3, padding=1) self.fc1 = nn.Linear(4 * 4 * n_chans1 // 2, 32) self.fc2 = nn.Linear(32, 2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
dustasa/senior_software_HW
NetRes
false
3,452
[ "Apache-2.0" ]
0
767d1d7bbd5e7d7414c17fa14b92b942e53d84ed
https://github.com/dustasa/senior_software_HW/tree/767d1d7bbd5e7d7414c17fa14b92b942e53d84ed
L2
import torch import torch.nn as nn class L2(nn.Module): def __init__(self): super(L2, self).__init__() def forward(self, output, target): lossvalue = torch.norm(output - target, p=2, dim=1).mean() return lossvalue def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = 64.0 tmp24 = tmp22 / tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L2New(nn.Module): def __init__(self): super(L2New, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
eight0153/flownet2-pytorch
L2
false
3,453
[ "Apache-2.0" ]
0
cc2964233cd18c8db05d1751281c6ab9d3165da6
https://github.com/eight0153/flownet2-pytorch/tree/cc2964233cd18c8db05d1751281c6ab9d3165da6
SIREN_CONV
import torch import numpy as np import torch.nn as nn def act(act_fun='LeakyReLU'): """ Either string defining an activation function or module (e.g. nn.ReLU) """ if isinstance(act_fun, str): if act_fun == 'LeakyReLU': return nn.LeakyReLU(0.2, inplace=True) elif act_fun == 'Swish': return Swish() elif act_fun[:3] == 'ELU': if len(act_fun) > 3: param = float(act_fun[3:]) return nn.ELU(param, inplace=True) return nn.ELU(inplace=True) elif act_fun == 'ReLU': return nn.ReLU() elif act_fun == 'tanh': return Tanh() elif act_fun == 'sine': return Sin() elif act_fun == 'soft': return nn.Softplus() elif act_fun == 'none': return nn.Sequential() else: assert False else: return act_fun() class Swish(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Swish, self).__init__() self.s = nn.Sigmoid() def forward(self, x): return x * self.s(x) class Tanh(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Tanh, self).__init__() def forward(self, x): return torch.tanh(x) class Sin(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Sin, self).__init__() def forward(self, x): return torch.sin(x) class SIREN_layer(nn.Module): def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30): super(SIREN_layer, self).__init__() self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias =True) self.act_fun = act(act_fun) self.omega_0 = omega_0 self.in_features = ch_in self.frist = frist self.init() def init(self): with torch.no_grad(): if self.frist: self.conv1.weight.uniform_(-1 / self.in_features, 1 / self. in_features) else: self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) / self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0) def forward(self, x): x = self.conv1(x) return self.act_fun(self.omega_0 * x) class SIREN_CONV(nn.Module): def __init__(self, ch_in, ch_out): super(SIREN_CONV, self).__init__() self.conv1 = SIREN_layer(ch_in, 64, frist=True) self.conv2 = SIREN_layer(64, 32) self.conv3 = SIREN_layer(32, ch_out) self.conv = nn.Sequential(self.conv1, self.conv2, self.conv3) def forward(self, x): x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ch_in': 4, 'ch_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_mul_sin_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 30.0 tmp4 = tmp2 * tmp3 tmp5 = tl_math.sin(tmp4) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp5, None) @triton.jit def triton_poi_fused_convolution_mul_sin_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 30.0 tmp4 = tmp2 * tmp3 tmp5 = tl_math.sin(tmp4) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp5, None) @triton.jit def triton_poi_fused_convolution_mul_sin_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 30.0 tmp4 = tmp2 * tmp3 tmp5 = tl_math.sin(tmp4) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch. float32) get_raw_stream(0) triton_poi_fused_convolution_mul_sin_0[grid(4096)](buf1, primals_2, buf2, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32 ) triton_poi_fused_convolution_mul_sin_1[grid(2048)](buf4, primals_5, buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_mul_sin_2[grid(256)](buf7, primals_7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return (buf8, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf4, buf5, buf7) def act(act_fun='LeakyReLU'): """ Either string defining an activation function or module (e.g. nn.ReLU) """ if isinstance(act_fun, str): if act_fun == 'LeakyReLU': return nn.LeakyReLU(0.2, inplace=True) elif act_fun == 'Swish': return Swish() elif act_fun[:3] == 'ELU': if len(act_fun) > 3: param = float(act_fun[3:]) return nn.ELU(param, inplace=True) return nn.ELU(inplace=True) elif act_fun == 'ReLU': return nn.ReLU() elif act_fun == 'tanh': return Tanh() elif act_fun == 'sine': return Sin() elif act_fun == 'soft': return nn.Softplus() elif act_fun == 'none': return nn.Sequential() else: assert False else: return act_fun() class Swish(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Swish, self).__init__() self.s = nn.Sigmoid() def forward(self, x): return x * self.s(x) class Tanh(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Tanh, self).__init__() def forward(self, x): return torch.tanh(x) class Sin(nn.Module): """ https://arxiv.org/abs/1710.05941 The hype was so huge that I could not help but try it """ def __init__(self): super(Sin, self).__init__() def forward(self, x): return torch.sin(x) class SIREN_layer(nn.Module): def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30): super(SIREN_layer, self).__init__() self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias =True) self.act_fun = act(act_fun) self.omega_0 = omega_0 self.in_features = ch_in self.frist = frist self.init() def init(self): with torch.no_grad(): if self.frist: self.conv1.weight.uniform_(-1 / self.in_features, 1 / self. in_features) else: self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) / self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0) def forward(self, x): x = self.conv1(x) return self.act_fun(self.omega_0 * x) class SIREN_CONVNew(nn.Module): def __init__(self, ch_in, ch_out): super(SIREN_CONVNew, self).__init__() self.conv1 = SIREN_layer(ch_in, 64, frist=True) self.conv2 = SIREN_layer(64, 32) self.conv3 = SIREN_layer(32, ch_out) self.conv = nn.Sequential(self.conv1, self.conv2, self.conv3) def forward(self, input_0): primals_1 = self.conv1.conv1.weight primals_2 = self.conv1.conv1.bias primals_4 = self.conv2.conv1.weight primals_5 = self.conv2.conv1.bias primals_6 = self.conv3.conv1.weight primals_7 = self.conv3.conv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
dustlrdk/noise2self
SIREN_CONV
false
3,454
[ "MIT" ]
0
46e8c4650f7ec4f664448417fecd39b4cae477f7
https://github.com/dustlrdk/noise2self/tree/46e8c4650f7ec4f664448417fecd39b4cae477f7
GEGLU
import torch import torch.nn as nn class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class GEGLU(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.GELU( ), True, False, False, False) def forward(self, x: 'torch.Tensor'): return self.ffn(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp9 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_mul_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class PositionWiseFeedForward(nn.Module): """ title: Position-wise Feed-Forward Network (FFN) summary: Documented reusable implementation of the position wise feedforward network. # Position-wise Feed-Forward Network (FFN) This is a [PyTorch](https://pytorch.org) implementation of position-wise feedforward network used in transformer. FFN consists of two fully connected layers. Number of dimensions in the hidden layer $d_{ff}$, is generally set to around four times that of the token embedding $d_{model}$. So it is sometime also called the expand-and-contract network. There is an activation at the hidden layer, which is usually set to ReLU (Rectified Linear Unit) activation, $$\\max(0, x)$$ That is, the FFN function is, $$FFN(x, W_1, W_2, b_1, b_2) = \\max(0, x W_1 + b_1) W_2 + b_2$$ where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters. Sometimes the GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU. $$x \\Phi(x)$$ where $\\Phi(x) = P(X \\le x), X \\sim \\mathcal{N}(0,1)$ ### Gated Linear Units This is a generic implementation that supports different variants including [Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU). We have also implemented experiments on these: * [experiment that uses `labml.configs`](glu_variants/experiment.html) * [simpler version from scratch](glu_variants/simple.html) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = nn.Dropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class GEGLUNew(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = PositionWiseFeedForward(d_model, d_ff, dropout, nn.GELU( ), True, False, False, False) def forward(self, input_0): primals_1 = self.ffn.layer1.weight primals_3 = self.ffn.layer2.weight primals_4 = self.ffn.linear_v.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
edchengmoore/pytorch_tabular
GEGLU
false
3,455
[ "MIT" ]
0
25f87089fbed95b46f2a1a8a96fba1f581aa8af1
https://github.com/edchengmoore/pytorch_tabular/tree/25f87089fbed95b46f2a1a8a96fba1f581aa8af1
ExtremeLinear
import math import torch from torch import nn from torch import autograd from torch.nn import init class ExtremeLinearFunction(autograd.Function): @staticmethod def forward(ctx, input, forward_weight, feedback_weight): ctx.save_for_backward(input, forward_weight, feedback_weight) output = input.mm(forward_weight.t()) ctx.output = output return output @staticmethod def backward(ctx, grad_output): input, forward_weight, _feedback_weight = ctx.saved_tensors grad_input = grad_weight = None if ctx.needs_input_grad[1]: inv_x = torch.pinverse(input) error_weight = inv_x.mm(ctx.output - grad_output) grad_weight = forward_weight - error_weight.t() if ctx.needs_input_grad[0]: grad_input = grad_output.mm(forward_weight) return grad_input, grad_weight, None class ExtremeLinear(nn.Module): def __init__(self, input_features, output_features): super(ExtremeLinear, self).__init__() self.input_features = input_features self.output_features = output_features self.forward_weight = nn.Parameter(torch.Tensor(output_features, input_features)) self.feedback_weight = nn.Parameter(torch.Tensor(output_features, input_features)) self.reset_parameters() def reset_parameters(self) ->None: init.kaiming_uniform_(self.forward_weight, a=math.sqrt(5)) init.kaiming_uniform_(self.feedback_weight, a=math.sqrt(5)) def forward(self, input): return ExtremeLinearFunction.apply(input, self.forward_weight, self .feedback_weight) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_features': 4, 'output_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn from torch import autograd from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_full_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.full([1], 0.0, tl.float64) tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp0, None) @triton.jit def triton_poi_fused_full_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.full([1], 1e-15, tl.float64) tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp0, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((), (), torch.float64) get_raw_stream(0) triton_poi_fused_full_0[grid(1)](buf1, 1, XBLOCK=1, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float64) triton_poi_fused_full_1[grid(1)](buf2, 1, XBLOCK=1, num_warps=1, num_stages=1) buf3 = torch.ops.aten.linalg_pinv.atol_rtol_tensor(primals_3, atol= buf1, rtol=buf2) del buf1 del buf2 del primals_3 buf4 = buf3 del buf3 return buf0, primals_1, buf0, buf4 class ExtremeLinearFunction(autograd.Function): @staticmethod def forward(ctx, input, forward_weight, feedback_weight): ctx.save_for_backward(input, forward_weight, feedback_weight) output = input.mm(forward_weight.t()) ctx.output = output return output @staticmethod def backward(ctx, grad_output): input, forward_weight, _feedback_weight = ctx.saved_tensors grad_input = grad_weight = None if ctx.needs_input_grad[1]: inv_x = torch.pinverse(input) error_weight = inv_x.mm(ctx.output - grad_output) grad_weight = forward_weight - error_weight.t() if ctx.needs_input_grad[0]: grad_input = grad_output.mm(forward_weight) return grad_input, grad_weight, None class ExtremeLinearNew(nn.Module): def __init__(self, input_features, output_features): super(ExtremeLinearNew, self).__init__() self.input_features = input_features self.output_features = output_features self.forward_weight = nn.Parameter(torch.Tensor(output_features, input_features)) self.feedback_weight = nn.Parameter(torch.Tensor(output_features, input_features)) self.reset_parameters() def reset_parameters(self) ->None: init.kaiming_uniform_(self.forward_weight, a=math.sqrt(5)) init.kaiming_uniform_(self.feedback_weight, a=math.sqrt(5)) def forward(self, input_0): primals_1 = self.forward_weight primals_2 = self.feedback_weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
crazyleg/lateral_research
ExtremeLinear
false
3,456
[ "MIT" ]
0
e186d218cd4b3ac3770e9fa375bc57133e4dafe5
https://github.com/crazyleg/lateral_research/tree/e186d218cd4b3ac3770e9fa375bc57133e4dafe5
ClsHead
import torch import torch.nn as nn import torch.nn.functional as F class ClsHead(nn.Module): """ Class orientation Args: params(dict): super parameters for build Class network """ def __init__(self, in_channels, class_dim, **kwargs): super(ClsHead, self).__init__() self.training = False self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(in_channels, class_dim, bias=True) def forward(self, x): x = self.pool(x) x = torch.reshape(x, shape=[x.shape[0], x.shape[1]]) x = self.fc(x) if not self.training: x = F.softmax(x, dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'class_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf2) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf3 return buf4, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf4 class ClsHeadNew(nn.Module): """ Class orientation Args: params(dict): super parameters for build Class network """ def __init__(self, in_channels, class_dim, **kwargs): super(ClsHeadNew, self).__init__() self.training = False self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(in_channels, class_dim, bias=True) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
eminem171333491/PaddleOCR2Pytorch
ClsHead
false
3,457
[ "Apache-2.0" ]
0
ec466bb3a689eccb9290e9f80812a45301d3b030
https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030
CTCHead
import torch import torch.nn as nn import torch.nn.functional as F class CTCHead(nn.Module): def __init__(self, in_channels, out_channels=6625, fc_decay=0.0004, mid_channels=None, **kwargs): super(CTCHead, self).__init__() if mid_channels is None: self.fc = nn.Linear(in_channels, out_channels, bias=True) else: self.fc1 = nn.Linear(in_channels, mid_channels, bias=True) self.fc2 = nn.Linear(mid_channels, out_channels, bias=True) self.out_channels = out_channels self.mid_channels = mid_channels def forward(self, x, labels=None): if self.mid_channels is None: predicts = self.fc(x) else: predicts = self.fc1(x) predicts = self.fc2(predicts) if not self.training: predicts = F.softmax(predicts, dim=2) return predicts def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 424000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6625 x3 = xindex // 6625 x2 = xindex // 26500 tmp0 = tl.load(in_ptr0 + (x0 + 6656 * x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + 26624 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (6656 + x0 + 26624 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (13312 + x0 + 26624 * x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (19968 + x0 + 26624 * x2), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x0 + 6656 * x3), tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 424000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6625 x3 = xindex // 6625 x2 = xindex // 26500 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6656 * x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + 26624 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (6656 + x0 + 26624 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (13312 + x0 + 26624 * x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (19968 + x0 + 26624 * x2), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x4, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6625, 4), (4, 1)) assert_size_stride(primals_2, (6625,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 6625), (6656, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 6625), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 6625), (106496, 26624, 6656, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(424000)](buf0, buf1, 424000, XBLOCK=512, num_warps=8, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 6625), (106000, 26500, 6625, 1), torch.float32) triton_poi_fused__softmax_1[grid(424000)](buf1, buf2, 424000, XBLOCK=512, num_warps=8, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class CTCHeadNew(nn.Module): def __init__(self, in_channels, out_channels=6625, fc_decay=0.0004, mid_channels=None, **kwargs): super(CTCHeadNew, self).__init__() if mid_channels is None: self.fc = nn.Linear(in_channels, out_channels, bias=True) else: self.fc1 = nn.Linear(in_channels, mid_channels, bias=True) self.fc2 = nn.Linear(mid_channels, out_channels, bias=True) self.out_channels = out_channels self.mid_channels = mid_channels def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
eminem171333491/PaddleOCR2Pytorch
CTCHead
false
3,458
[ "Apache-2.0" ]
0
ec466bb3a689eccb9290e9f80812a45301d3b030
https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030
MultiHeadAttention
import torch import torch.nn as nn import torch.nn.functional as F class MultiHeadAttention(nn.Module): """ Multi-Head Attention """ def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.d_key = d_key self.d_value = d_value self.d_model = d_model self.dropout_rate = dropout_rate self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.v_fc = torch.nn.Linear(in_features=d_model, out_features= d_value * n_head, bias=False) self.proj_fc = torch.nn.Linear(in_features=d_value * n_head, out_features=d_model, bias=False) def _prepare_qkv(self, queries, keys, values, cache=None): if keys is None: keys, values = queries, queries static_kv = False else: static_kv = True q = self.q_fc(queries) q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self .d_key]) q = q.permute(0, 2, 1, 3) if cache is not None and static_kv and 'static_k' in cache: k = cache['static_k'] v = cache['static_v'] else: k = self.k_fc(keys) v = self.v_fc(values) k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head, self.d_key]) k = k.permute(0, 2, 1, 3) v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head, self.d_value]) v = v.permute(0, 2, 1, 3) if cache is not None: if static_kv and 'static_k' not in cache: cache['static_k'], cache['static_v'] = k, v elif not static_kv: cache_k, cache_v = cache['k'], cache['v'] k = torch.cat([cache_k, k], dim=2) v = torch.cat([cache_v, v], dim=2) cache['k'], cache['v'] = k, v return q, k, v def forward(self, queries, keys, values, attn_bias, cache=None): keys = queries if keys is None else keys values = keys if values is None else values q, k, v = self._prepare_qkv(queries, keys, values, cache) product = torch.matmul(q, k.transpose(2, 3)) product = product * self.d_model ** -0.5 if attn_bias is not None: product += attn_bias weights = F.softmax(product, dim=-1) if self.dropout_rate: weights = F.dropout(weights, p=self.dropout_rate) out = torch.matmul(weights, v) out = out.permute(0, 2, 1, 3) out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape [2] * out.shape[3]]) out = self.proj_fc(out) return out def get_inputs(): return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4]), torch.rand( [4, 4, 1, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_key': 4, 'd_value': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_add_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 * tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 * tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 16 x5 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 1, 4, 1), (4, 16, 1, 16), torch.float32) buf5 = empty_strided_cuda((4, 1, 4, 1), (4, 16, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_add_mul_0[grid(16)](buf3, primals_7, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf3, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_add_mul_1[grid(64)](buf6, primals_7, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 del buf5 del primals_7 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf8) return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0 ), primals_8, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) class MultiHeadAttentionNew(nn.Module): """ Multi-Head Attention """ def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0): super(MultiHeadAttentionNew, self).__init__() self.n_head = n_head self.d_key = d_key self.d_value = d_value self.d_model = d_model self.dropout_rate = dropout_rate self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.v_fc = torch.nn.Linear(in_features=d_model, out_features= d_value * n_head, bias=False) self.proj_fc = torch.nn.Linear(in_features=d_value * n_head, out_features=d_model, bias=False) def _prepare_qkv(self, queries, keys, values, cache=None): if keys is None: keys, values = queries, queries static_kv = False else: static_kv = True q = self.q_fc(queries) q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self .d_key]) q = q.permute(0, 2, 1, 3) if cache is not None and static_kv and 'static_k' in cache: k = cache['static_k'] v = cache['static_v'] else: k = self.k_fc(keys) v = self.v_fc(values) k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head, self.d_key]) k = k.permute(0, 2, 1, 3) v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head, self.d_value]) v = v.permute(0, 2, 1, 3) if cache is not None: if static_kv and 'static_k' not in cache: cache['static_k'], cache['static_v'] = k, v elif not static_kv: cache_k, cache_v = cache['k'], cache['v'] k = torch.cat([cache_k, k], dim=2) v = torch.cat([cache_v, v], dim=2) cache['k'], cache['v'] = k, v return q, k, v def forward(self, input_0, input_1, input_2, input_3): primals_3 = self.q_fc.weight primals_5 = self.k_fc.weight primals_6 = self.v_fc.weight primals_7 = self.proj_fc.weight primals_1 = input_0 primals_2 = input_1 primals_4 = input_2 primals_8 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
eminem171333491/PaddleOCR2Pytorch
MultiHeadAttention
false
3,459
[ "Apache-2.0" ]
0
ec466bb3a689eccb9290e9f80812a45301d3b030
https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030
AddNorm
import torch import torch.nn as nn class AddNorm(nn.Module): """ Applies LayerNorm, Dropout and adds to input. Standard AddNorm operations in Transformers """ def __init__(self, input_dim: 'int', dropout: 'float'): super(AddNorm, self).__init__() self.dropout = nn.Dropout(dropout) self.ln = nn.LayerNorm(input_dim) def forward(self, X: 'torch.Tensor', Y: 'torch.Tensor') ->torch.Tensor: return self.ln(self.dropout(Y) + X) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'dropout': 0.5}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_native_layer_norm_0[grid(64)](primals_1, primals_2, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(256)](primals_1, primals_2, buf0, buf1, primals_3, primals_4, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 del primals_3 del primals_4 return buf3, buf2 class AddNormNew(nn.Module): """ Applies LayerNorm, Dropout and adds to input. Standard AddNorm operations in Transformers """ def __init__(self, input_dim: 'int', dropout: 'float'): super(AddNormNew, self).__init__() self.dropout = nn.Dropout(dropout) self.ln = nn.LayerNorm(input_dim) def forward(self, input_0, input_1): primals_3 = self.ln.weight primals_4 = self.ln.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
edchengmoore/pytorch_tabular
AddNorm
false
3,460
[ "MIT" ]
0
25f87089fbed95b46f2a1a8a96fba1f581aa8af1
https://github.com/edchengmoore/pytorch_tabular/tree/25f87089fbed95b46f2a1a8a96fba1f581aa8af1
ResidualBlock
import torch import torch.nn as nn class ResidualBlock(nn.Module): def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.prelu = nn.PReLU() self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) def forward(self, x): residual = self.conv1(x) residual = self.prelu(residual) residual = self.conv2(residual) return x + residual def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_add_convolution_1[grid(256)](buf4, primals_3, primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf4, primals_1, primals_3, primals_4, primals_5, buf1, buf2 class ResidualBlockNew(nn.Module): def __init__(self, channels): super(ResidualBlockNew, self).__init__() self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.prelu = nn.PReLU() self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.prelu.weight primals_5 = self.conv2.weight primals_6 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
elaina03/Single-Image-Dehazing
ResidualBlock
false
3,463
[ "MIT" ]
0
a6a29cb5591204f8066729df4053db0ea2b54aff
https://github.com/elaina03/Single-Image-Dehazing/tree/a6a29cb5591204f8066729df4053db0ea2b54aff
Encoder
import torch import torch.nn as nn import torch.nn.functional as F class Encoder(nn.Module): def __init__(self, sample_size, condition_size, hidden_size): super().__init__() self.fc1 = nn.Linear(sample_size + condition_size, hidden_size) self.fc2 = nn.Dropout(p=0.5) self.fc3 = nn.Linear(hidden_size, hidden_size) def forward(self, x, c): x = torch.cat([x, c], 1) p_x = F.relu(self.fc1(x)) p_x = self.fc2(p_x) p_x = F.relu(self.fc3(p_x)) return p_x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'sample_size': 4, 'condition_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4 ), 0), out=buf3) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(16)](buf4, primals_6, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 return buf4, buf0, buf2, buf5, primals_5 class EncoderNew(nn.Module): def __init__(self, sample_size, condition_size, hidden_size): super().__init__() self.fc1 = nn.Linear(sample_size + condition_size, hidden_size) self.fc2 = nn.Dropout(p=0.5) self.fc3 = nn.Linear(hidden_size, hidden_size) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_1 = self.fc3.weight primals_6 = self.fc3.bias primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
ekrell/learn-planning-space
Encoder
false
3,464
[ "MIT" ]
0
730e448bffa4996b2b1ef3a5b00500dc172962ec
https://github.com/ekrell/learn-planning-space/tree/730e448bffa4996b2b1ef3a5b00500dc172962ec
LatentZ
import torch import torch.nn as nn class LatentZ(nn.Module): def __init__(self, hidden_size, latent_size): super().__init__() self.mu = nn.Linear(hidden_size, latent_size) self.logvar = nn.Linear(hidden_size, latent_size) def forward(self, p_x): mu = self.mu(p_x) logvar = self.logvar(p_x) std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return std * eps + mu, logvar, mu def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'latent_size': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp3 * tmp4 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_0[grid(256)](buf1, buf3, buf0, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf4, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3 class LatentZNew(nn.Module): def __init__(self, hidden_size, latent_size): super().__init__() self.mu = nn.Linear(hidden_size, latent_size) self.logvar = nn.Linear(hidden_size, latent_size) def forward(self, input_0): primals_1 = self.mu.weight primals_2 = self.mu.bias primals_4 = self.logvar.weight primals_5 = self.logvar.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1], output[2]
ekrell/learn-planning-space
LatentZ
false
3,467
[ "MIT" ]
0
730e448bffa4996b2b1ef3a5b00500dc172962ec
https://github.com/ekrell/learn-planning-space/tree/730e448bffa4996b2b1ef3a5b00500dc172962ec
UpConv2D
import torch import torch.nn as nn class UpConv2D(nn.Module): def __init__(self, in_channels=3, out_channels=3, kernel_size=5, ratio=2): super(UpConv2D, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels * ratio ** 2, kernel_size, padding=kernel_size // 2) self.upscale = nn.PixelShuffle(ratio) def forward(self, input_): x = self.conv(input_) output = self.upscale(x) return output def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pixel_shuffle_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 2 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x5 = xindex y0 = yindex % 64 y1 = yindex // 64 % 2 y2 = yindex // 128 % 64 y6 = yindex // 8192 y3 = yindex // 8192 % 3 y7 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * y2 + 4096 * x5 + 8192 * y1 + 16384 * y6), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 2 * y1 + 4 * y3), xmask, eviction_policy ='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x5 + 2 * y7), tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (12, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 12, 64, 64), (49152, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 3, 64, 2, 64, 2), (49152, 16384, 256, 128, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pixel_shuffle_0[grid(98304, 2)](buf0, primals_2, buf1, 98304, 2, XBLOCK=2, YBLOCK=512, num_warps=4, num_stages=1) del buf0 del primals_2 return reinterpret_tensor(buf1, (4, 3, 128, 128), (49152, 16384, 128, 1), 0 ), primals_1, primals_3 class UpConv2DNew(nn.Module): def __init__(self, in_channels=3, out_channels=3, kernel_size=5, ratio=2): super(UpConv2DNew, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels * ratio ** 2, kernel_size, padding=kernel_size // 2) self.upscale = nn.PixelShuffle(ratio) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
emirkonuk/defocus
UpConv2D
false
3,468
[ "Apache-2.0" ]
0
da2977d2698eb20e9ab2a3bcd1fa4d05e1dd9b50
https://github.com/emirkonuk/defocus/tree/da2977d2698eb20e9ab2a3bcd1fa4d05e1dd9b50
PrototypicalNetwork
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.optim import torch.nn.parallel def L2SquareDist(A, B, average=True): assert A.dim() == 3 assert B.dim() == 3 assert A.size(0) == B.size(0) and A.size(2) == B.size(2) nB = A.size(0) Na = A.size(1) Nb = B.size(1) nC = A.size(2) AB = torch.bmm(A, B.transpose(1, 2)) AA = (A * A).sum(dim=2, keepdim=True).view(nB, Na, 1) BB = (B * B).sum(dim=2, keepdim=True).view(nB, 1, Nb) dist = AA.expand_as(AB) + BB.expand_as(AB) - 2 * AB if average: dist = dist / nC return dist class PrototypicalNetwork(nn.Module): def __init__(self, opt): super(PrototypicalNetwork, self).__init__() scale_cls = opt['scale_cls'] if 'scale_cls' in opt else 1.0 self.scale_cls = nn.Parameter(torch.FloatTensor(1).fill_(scale_cls), requires_grad=True) def forward(self, features_test, features_train, labels_train): """Recognize novel categories based on the Prototypical Nets approach. Classify the test examples (i.e., `features_test`) using the available training examples (i.e., `features_test` and `labels_train`) using the Prototypical Nets approach. Args: features_test: A 3D tensor with shape [batch_size x num_test_examples x num_channels] that represents the test features of each training episode in the batch. features_train: A 3D tensor with shape [batch_size x num_train_examples x num_channels] that represents the train features of each training episode in the batch. labels_train: A 3D tensor with shape [batch_size x num_train_examples x nKnovel] that represents the train labels (encoded as 1-hot vectors) of each training episode in the batch. Return: scores_cls: A 3D tensor with shape [batch_size x num_test_examples x nKnovel] that represents the classification scores of the test feature vectors for the nKnovel novel categories. """ assert features_train.dim() == 3 assert labels_train.dim() == 3 assert features_test.dim() == 3 assert features_train.size(0) == labels_train.size(0) assert features_train.size(0) == features_test.size(0) assert features_train.size(1) == labels_train.size(1) assert features_train.size(2) == features_test.size(2) labels_train_transposed = labels_train.transpose(1, 2) prototypes = torch.bmm(labels_train_transposed, features_train) prototypes = prototypes.div(labels_train_transposed.sum(dim=2, keepdim=True).expand_as(prototypes)) scores_cls = -self.scale_cls * L2SquareDist(features_test, prototypes) return scores_cls def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'opt': _mock_config(scale_cls=1.0)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(in_out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_add_div_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + 4 * x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x4), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_out_ptr0 + x3, xmask) tmp29 = tl.load(in_ptr2 + 0) tmp30 = tl.broadcast_to(tmp29, [XBLOCK]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tmp27 = 0.25 tmp28 = tmp26 * tmp27 tmp31 = -tmp30 tmp32 = tmp31 * tmp28 tl.store(in_out_ptr0 + x3, tmp28, xmask) tl.store(out_ptr0 + x3, tmp32, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), primals_1, out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_3, reinterpret_tensor(buf1, (4, 4, 4), ( 16, 1, 4), 0), out=buf2) buf3 = buf2 del buf2 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_neg_sub_1[grid(64)](buf4, primals_3, buf1, primals_4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del primals_3 del primals_4 return buf5, buf4 def L2SquareDist(A, B, average=True): assert A.dim() == 3 assert B.dim() == 3 assert A.size(0) == B.size(0) and A.size(2) == B.size(2) nB = A.size(0) Na = A.size(1) Nb = B.size(1) nC = A.size(2) AB = torch.bmm(A, B.transpose(1, 2)) AA = (A * A).sum(dim=2, keepdim=True).view(nB, Na, 1) BB = (B * B).sum(dim=2, keepdim=True).view(nB, 1, Nb) dist = AA.expand_as(AB) + BB.expand_as(AB) - 2 * AB if average: dist = dist / nC return dist class PrototypicalNetworkNew(nn.Module): def __init__(self, opt): super(PrototypicalNetworkNew, self).__init__() scale_cls = opt['scale_cls'] if 'scale_cls' in opt else 1.0 self.scale_cls = nn.Parameter(torch.FloatTensor(1).fill_(scale_cls), requires_grad=True) def forward(self, input_0, input_1, input_2): primals_4 = self.scale_cls primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Basasuya/FewShotWithoutForgetting
PrototypicalNetwork
false
3,469
[ "MIT" ]
0
eecc70e416ed82999124ddfca1b145f6dbcd74a6
https://github.com/Basasuya/FewShotWithoutForgetting/tree/eecc70e416ed82999124ddfca1b145f6dbcd74a6
EncoderLayer
import torch import torch.nn as nn import torch.nn.functional as F class Lambda(nn.Module): """An easy way to create a pytorch layer for a simple `func`.""" def __init__(self, func): """create a layer that simply calls `func` with `x`""" super().__init__() self.func = func def forward(self, x): return self.func(x) class FFN(nn.Module): """ Feed-Forward Network """ def __init__(self, d_inner_hid, d_model, dropout_rate): super(FFN, self).__init__() self.dropout_rate = dropout_rate self.fc1 = torch.nn.Linear(in_features=d_model, out_features= d_inner_hid) self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features= d_model) def forward(self, x): hidden = self.fc1(x) hidden = F.relu(hidden) if self.dropout_rate: hidden = F.dropout(hidden, p=self.dropout_rate) out = self.fc2(hidden) return out class MultiHeadAttention(nn.Module): """ Multi-Head Attention """ def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.d_key = d_key self.d_value = d_value self.d_model = d_model self.dropout_rate = dropout_rate self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.v_fc = torch.nn.Linear(in_features=d_model, out_features= d_value * n_head, bias=False) self.proj_fc = torch.nn.Linear(in_features=d_value * n_head, out_features=d_model, bias=False) def _prepare_qkv(self, queries, keys, values, cache=None): if keys is None: keys, values = queries, queries static_kv = False else: static_kv = True q = self.q_fc(queries) q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self .d_key]) q = q.permute(0, 2, 1, 3) if cache is not None and static_kv and 'static_k' in cache: k = cache['static_k'] v = cache['static_v'] else: k = self.k_fc(keys) v = self.v_fc(values) k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head, self.d_key]) k = k.permute(0, 2, 1, 3) v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head, self.d_value]) v = v.permute(0, 2, 1, 3) if cache is not None: if static_kv and 'static_k' not in cache: cache['static_k'], cache['static_v'] = k, v elif not static_kv: cache_k, cache_v = cache['k'], cache['v'] k = torch.cat([cache_k, k], dim=2) v = torch.cat([cache_v, v], dim=2) cache['k'], cache['v'] = k, v return q, k, v def forward(self, queries, keys, values, attn_bias, cache=None): keys = queries if keys is None else keys values = keys if values is None else values q, k, v = self._prepare_qkv(queries, keys, values, cache) product = torch.matmul(q, k.transpose(2, 3)) product = product * self.d_model ** -0.5 if attn_bias is not None: product += attn_bias weights = F.softmax(product, dim=-1) if self.dropout_rate: weights = F.dropout(weights, p=self.dropout_rate) out = torch.matmul(weights, v) out = out.permute(0, 2, 1, 3) out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape [2] * out.shape[3]]) out = self.proj_fc(out) return out class LambdaXY(nn.Module): """An easy way to create a pytorch layer for a simple `func`.""" def __init__(self, func): """create a layer that simply calls `func` with `x`""" super().__init__() self.func = func def forward(self, x, y): return self.func(x, y) class PrePostProcessLayer(nn.Module): """ PrePostProcessLayer """ def __init__(self, process_cmd, d_model, dropout_rate): super(PrePostProcessLayer, self).__init__() self.process_cmd = process_cmd self.functors = nn.ModuleList() cur_a_len = 0 cur_n_len = 0 cur_d_len = 0 for cmd in self.process_cmd: if cmd == 'a': self.functors.add_module('add_res_connect_{}'.format( cur_a_len), LambdaXY(lambda x, y: x + y if y is not None else x)) cur_a_len += 1 elif cmd == 'n': layerNorm = torch.nn.LayerNorm(normalized_shape=d_model, elementwise_affine=True, eps=1e-05) self.functors.add_module('layer_norm_%d' % cur_n_len, layerNorm ) cur_n_len += 1 elif cmd == 'd': self.functors.add_module('add_drop_{}'.format(cur_d_len), Lambda(lambda x: F.dropout(x, p=dropout_rate) if dropout_rate else x)) cur_d_len += 1 def forward(self, x, residual=None): for i, (cmd, functor) in enumerate(zip(self.process_cmd, self.functors) ): if cmd == 'a': x = functor(x, residual) else: x = functor(x) return x class EncoderLayer(nn.Module): """ EncoderLayer """ def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd='n', postprocess_cmd='da'): super(EncoderLayer, self).__init__() self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout) self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head, attention_dropout) self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model, prepostprocess_dropout) self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout) self.ffn = FFN(d_inner_hid, d_model, relu_dropout) self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model, prepostprocess_dropout) def forward(self, enc_input, attn_bias): attn_output = self.self_attn(self.preprocesser1(enc_input), None, None, attn_bias) attn_output = self.postprocesser1(attn_output, enc_input) ffn_output = self.ffn(self.preprocesser2(attn_output)) ffn_output = self.postprocesser2(ffn_output, attn_output) return ffn_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_head': 4, 'd_key': 4, 'd_value': 4, 'd_model': 4, 'd_inner_hid': 4, 'prepostprocess_dropout': 0.5, 'attention_dropout': 0.5, 'relu_dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 * tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 * tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (4, 16), (16, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](buf3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_clone_3[grid(64, 4)](buf4, buf7, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_mul_4[grid(64)](buf8, primals_7, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__softmax_add_mul_5[grid(256)](buf11, primals_7, buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf12 = torch.ops.aten.native_dropout.default(buf11, 0.5, True) buf13 = buf12[0] buf14 = buf12[1] del buf12 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](buf5, buf15, 256, XBLOCK=256, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), out=buf16 ) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](buf16, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf16 buf18 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf18) buf19 = torch.ops.aten.native_dropout.default(reinterpret_tensor( buf18, (4, 4, 4), (16, 4, 1), 0), 0.5, True) buf20 = buf19[0] buf21 = buf19[1] del buf19 buf22 = buf20 del buf20 triton_poi_fused_add_6[grid(64)](buf22, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf23 = buf1 del buf1 buf24 = buf0 del buf0 triton_poi_fused_native_layer_norm_0[grid(16)](buf22, buf23, buf24, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_native_layer_norm_1[grid(64)](buf22, buf23, buf24, primals_9, primals_10, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf23 del buf24 del primals_10 buf26 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf26) buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0) del buf26 buf36 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf27, primals_12, buf36, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf28 = torch.ops.aten.native_dropout.default(buf27, 0.5, True) buf29 = buf28[0] buf30 = buf28[1] del buf28 buf31 = reinterpret_tensor(buf27, (16, 4), (4, 1), 0) del buf27 extern_kernels.addmm(primals_14, reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf31) del primals_14 buf32 = torch.ops.aten.native_dropout.default(reinterpret_tensor( buf31, (4, 4, 4), (16, 4, 1), 0), 0.5, True) del buf31 buf33 = buf32[0] buf34 = buf32[1] del buf32 buf35 = buf33 del buf33 triton_poi_fused_add_6[grid(64)](buf35, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf35, primals_3, primals_9, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), buf11, buf14, reinterpret_tensor(buf17, (16, 16), (16, 1), 0 ), buf21, buf22, reinterpret_tensor(buf25, (16, 4), (4, 1), 0 ), buf30, reinterpret_tensor(buf29, (16, 4), (4, 1), 0 ), buf34, primals_13, buf36, primals_11, primals_8, reinterpret_tensor( buf13, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0 ), primals_6, primals_5, primals_4 class Lambda(nn.Module): """An easy way to create a pytorch layer for a simple `func`.""" def __init__(self, func): """create a layer that simply calls `func` with `x`""" super().__init__() self.func = func def forward(self, x): return self.func(x) class FFN(nn.Module): """ Feed-Forward Network """ def __init__(self, d_inner_hid, d_model, dropout_rate): super(FFN, self).__init__() self.dropout_rate = dropout_rate self.fc1 = torch.nn.Linear(in_features=d_model, out_features= d_inner_hid) self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features= d_model) def forward(self, x): hidden = self.fc1(x) hidden = F.relu(hidden) if self.dropout_rate: hidden = F.dropout(hidden, p=self.dropout_rate) out = self.fc2(hidden) return out class MultiHeadAttention(nn.Module): """ Multi-Head Attention """ def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.d_key = d_key self.d_value = d_value self.d_model = d_model self.dropout_rate = dropout_rate self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.v_fc = torch.nn.Linear(in_features=d_model, out_features= d_value * n_head, bias=False) self.proj_fc = torch.nn.Linear(in_features=d_value * n_head, out_features=d_model, bias=False) def _prepare_qkv(self, queries, keys, values, cache=None): if keys is None: keys, values = queries, queries static_kv = False else: static_kv = True q = self.q_fc(queries) q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self .d_key]) q = q.permute(0, 2, 1, 3) if cache is not None and static_kv and 'static_k' in cache: k = cache['static_k'] v = cache['static_v'] else: k = self.k_fc(keys) v = self.v_fc(values) k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head, self.d_key]) k = k.permute(0, 2, 1, 3) v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head, self.d_value]) v = v.permute(0, 2, 1, 3) if cache is not None: if static_kv and 'static_k' not in cache: cache['static_k'], cache['static_v'] = k, v elif not static_kv: cache_k, cache_v = cache['k'], cache['v'] k = torch.cat([cache_k, k], dim=2) v = torch.cat([cache_v, v], dim=2) cache['k'], cache['v'] = k, v return q, k, v def forward(self, queries, keys, values, attn_bias, cache=None): keys = queries if keys is None else keys values = keys if values is None else values q, k, v = self._prepare_qkv(queries, keys, values, cache) product = torch.matmul(q, k.transpose(2, 3)) product = product * self.d_model ** -0.5 if attn_bias is not None: product += attn_bias weights = F.softmax(product, dim=-1) if self.dropout_rate: weights = F.dropout(weights, p=self.dropout_rate) out = torch.matmul(weights, v) out = out.permute(0, 2, 1, 3) out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape [2] * out.shape[3]]) out = self.proj_fc(out) return out class LambdaXY(nn.Module): """An easy way to create a pytorch layer for a simple `func`.""" def __init__(self, func): """create a layer that simply calls `func` with `x`""" super().__init__() self.func = func def forward(self, x, y): return self.func(x, y) class PrePostProcessLayer(nn.Module): """ PrePostProcessLayer """ def __init__(self, process_cmd, d_model, dropout_rate): super(PrePostProcessLayer, self).__init__() self.process_cmd = process_cmd self.functors = nn.ModuleList() cur_a_len = 0 cur_n_len = 0 cur_d_len = 0 for cmd in self.process_cmd: if cmd == 'a': self.functors.add_module('add_res_connect_{}'.format( cur_a_len), LambdaXY(lambda x, y: x + y if y is not None else x)) cur_a_len += 1 elif cmd == 'n': layerNorm = torch.nn.LayerNorm(normalized_shape=d_model, elementwise_affine=True, eps=1e-05) self.functors.add_module('layer_norm_%d' % cur_n_len, layerNorm ) cur_n_len += 1 elif cmd == 'd': self.functors.add_module('add_drop_{}'.format(cur_d_len), Lambda(lambda x: F.dropout(x, p=dropout_rate) if dropout_rate else x)) cur_d_len += 1 def forward(self, x, residual=None): for i, (cmd, functor) in enumerate(zip(self.process_cmd, self.functors) ): if cmd == 'a': x = functor(x, residual) else: x = functor(x) return x class EncoderLayerNew(nn.Module): """ EncoderLayer """ def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd='n', postprocess_cmd='da'): super(EncoderLayerNew, self).__init__() self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout) self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head, attention_dropout) self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model, prepostprocess_dropout) self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout) self.ffn = FFN(d_inner_hid, d_model, relu_dropout) self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model, prepostprocess_dropout) def forward(self, input_0, input_1): primals_1 = self.preprocesser1.functors.layer_norm_0.weight primals_2 = self.preprocesser1.functors.layer_norm_0.bias primals_4 = self.self_attn.q_fc.weight primals_5 = self.self_attn.k_fc.weight primals_6 = self.self_attn.v_fc.weight primals_8 = self.self_attn.proj_fc.weight primals_9 = self.preprocesser2.functors.layer_norm_0.weight primals_10 = self.preprocesser2.functors.layer_norm_0.bias primals_11 = self.ffn.fc1.weight primals_12 = self.ffn.fc1.bias primals_13 = self.ffn.fc2.weight primals_14 = self.ffn.fc2.bias primals_3 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
eminem171333491/PaddleOCR2Pytorch
EncoderLayer
false
3,470
[ "Apache-2.0" ]
0
ec466bb3a689eccb9290e9f80812a45301d3b030
https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030
Encoder
import torch import torch.nn as nn import torch.nn.functional as F class Lambda(nn.Module): """An easy way to create a pytorch layer for a simple `func`.""" def __init__(self, func): """create a layer that simply calls `func` with `x`""" super().__init__() self.func = func def forward(self, x): return self.func(x) class FFN(nn.Module): """ Feed-Forward Network """ def __init__(self, d_inner_hid, d_model, dropout_rate): super(FFN, self).__init__() self.dropout_rate = dropout_rate self.fc1 = torch.nn.Linear(in_features=d_model, out_features= d_inner_hid) self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features= d_model) def forward(self, x): hidden = self.fc1(x) hidden = F.relu(hidden) if self.dropout_rate: hidden = F.dropout(hidden, p=self.dropout_rate) out = self.fc2(hidden) return out class MultiHeadAttention(nn.Module): """ Multi-Head Attention """ def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.d_key = d_key self.d_value = d_value self.d_model = d_model self.dropout_rate = dropout_rate self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.v_fc = torch.nn.Linear(in_features=d_model, out_features= d_value * n_head, bias=False) self.proj_fc = torch.nn.Linear(in_features=d_value * n_head, out_features=d_model, bias=False) def _prepare_qkv(self, queries, keys, values, cache=None): if keys is None: keys, values = queries, queries static_kv = False else: static_kv = True q = self.q_fc(queries) q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self .d_key]) q = q.permute(0, 2, 1, 3) if cache is not None and static_kv and 'static_k' in cache: k = cache['static_k'] v = cache['static_v'] else: k = self.k_fc(keys) v = self.v_fc(values) k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head, self.d_key]) k = k.permute(0, 2, 1, 3) v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head, self.d_value]) v = v.permute(0, 2, 1, 3) if cache is not None: if static_kv and 'static_k' not in cache: cache['static_k'], cache['static_v'] = k, v elif not static_kv: cache_k, cache_v = cache['k'], cache['v'] k = torch.cat([cache_k, k], dim=2) v = torch.cat([cache_v, v], dim=2) cache['k'], cache['v'] = k, v return q, k, v def forward(self, queries, keys, values, attn_bias, cache=None): keys = queries if keys is None else keys values = keys if values is None else values q, k, v = self._prepare_qkv(queries, keys, values, cache) product = torch.matmul(q, k.transpose(2, 3)) product = product * self.d_model ** -0.5 if attn_bias is not None: product += attn_bias weights = F.softmax(product, dim=-1) if self.dropout_rate: weights = F.dropout(weights, p=self.dropout_rate) out = torch.matmul(weights, v) out = out.permute(0, 2, 1, 3) out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape [2] * out.shape[3]]) out = self.proj_fc(out) return out class LambdaXY(nn.Module): """An easy way to create a pytorch layer for a simple `func`.""" def __init__(self, func): """create a layer that simply calls `func` with `x`""" super().__init__() self.func = func def forward(self, x, y): return self.func(x, y) class PrePostProcessLayer(nn.Module): """ PrePostProcessLayer """ def __init__(self, process_cmd, d_model, dropout_rate): super(PrePostProcessLayer, self).__init__() self.process_cmd = process_cmd self.functors = nn.ModuleList() cur_a_len = 0 cur_n_len = 0 cur_d_len = 0 for cmd in self.process_cmd: if cmd == 'a': self.functors.add_module('add_res_connect_{}'.format( cur_a_len), LambdaXY(lambda x, y: x + y if y is not None else x)) cur_a_len += 1 elif cmd == 'n': layerNorm = torch.nn.LayerNorm(normalized_shape=d_model, elementwise_affine=True, eps=1e-05) self.functors.add_module('layer_norm_%d' % cur_n_len, layerNorm ) cur_n_len += 1 elif cmd == 'd': self.functors.add_module('add_drop_{}'.format(cur_d_len), Lambda(lambda x: F.dropout(x, p=dropout_rate) if dropout_rate else x)) cur_d_len += 1 def forward(self, x, residual=None): for i, (cmd, functor) in enumerate(zip(self.process_cmd, self.functors) ): if cmd == 'a': x = functor(x, residual) else: x = functor(x) return x class EncoderLayer(nn.Module): """ EncoderLayer """ def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd='n', postprocess_cmd='da'): super(EncoderLayer, self).__init__() self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout) self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head, attention_dropout) self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model, prepostprocess_dropout) self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout) self.ffn = FFN(d_inner_hid, d_model, relu_dropout) self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model, prepostprocess_dropout) def forward(self, enc_input, attn_bias): attn_output = self.self_attn(self.preprocesser1(enc_input), None, None, attn_bias) attn_output = self.postprocesser1(attn_output, enc_input) ffn_output = self.ffn(self.preprocesser2(attn_output)) ffn_output = self.postprocesser2(ffn_output, attn_output) return ffn_output class Encoder(nn.Module): """ encoder """ def __init__(self, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd='n', postprocess_cmd='da'): super(Encoder, self).__init__() self.encoder_layers = nn.ModuleList() for i in range(n_layer): encoderLayer = EncoderLayer(n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd) self.encoder_layers.add_module('layer_%d' % i, encoderLayer) self.processer = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout) def forward(self, enc_input, attn_bias): for encoder_layer in self.encoder_layers: enc_output = encoder_layer(enc_input, attn_bias) enc_input = enc_output enc_output = self.processer(enc_output) return enc_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_layer': 1, 'n_head': 4, 'd_key': 4, 'd_value': 4, 'd_model': 4, 'd_inner_hid': 4, 'prepostprocess_dropout': 0.5, 'attention_dropout': 0.5, 'relu_dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 * tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 * tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (4, 16), (16, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](buf3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_clone_3[grid(64, 4)](buf4, buf7, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_mul_4[grid(64)](buf8, primals_7, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__softmax_add_mul_5[grid(256)](buf11, primals_7, buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf12 = torch.ops.aten.native_dropout.default(buf11, 0.5, True) buf13 = buf12[0] buf14 = buf12[1] del buf12 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](buf5, buf15, 256, XBLOCK=256, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), out=buf16 ) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(256)](buf16, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf16 buf18 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf18) buf19 = torch.ops.aten.native_dropout.default(reinterpret_tensor( buf18, (4, 4, 4), (16, 4, 1), 0), 0.5, True) buf20 = buf19[0] buf21 = buf19[1] del buf19 buf22 = buf20 del buf20 triton_poi_fused_add_6[grid(64)](buf22, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf23 = buf1 del buf1 buf24 = buf0 del buf0 triton_poi_fused_native_layer_norm_0[grid(16)](buf22, buf23, buf24, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_native_layer_norm_1[grid(64)](buf22, buf23, buf24, primals_9, primals_10, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf26 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf26) buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0) del buf26 buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf27, primals_12, buf39, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf28 = torch.ops.aten.native_dropout.default(buf27, 0.5, True) buf29 = buf28[0] buf30 = buf28[1] del buf28 buf31 = reinterpret_tensor(buf27, (16, 4), (4, 1), 0) del buf27 extern_kernels.addmm(primals_14, reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf31) del primals_14 buf32 = torch.ops.aten.native_dropout.default(reinterpret_tensor( buf31, (4, 4, 4), (16, 4, 1), 0), 0.5, True) buf33 = buf32[0] buf34 = buf32[1] del buf32 buf35 = buf33 del buf33 triton_poi_fused_add_6[grid(64)](buf35, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) buf36 = buf24 del buf24 buf37 = buf23 del buf23 triton_poi_fused_native_layer_norm_0[grid(16)](buf35, buf36, buf37, 16, XBLOCK=16, num_warps=1, num_stages=1) buf38 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0) del buf31 triton_poi_fused_native_layer_norm_1[grid(64)](buf35, buf36, buf37, primals_15, primals_16, buf38, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf36 del buf37 del primals_16 return (buf38, primals_3, primals_9, primals_15, reinterpret_tensor( buf2, (16, 4), (4, 1), 0), buf11, buf14, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), buf21, buf22, reinterpret_tensor(buf25, (16, 4), (4, 1), 0), buf30, reinterpret_tensor(buf29, (16, 4), (4, 1), 0 ), buf34, buf35, primals_13, buf39, primals_11, primals_8, reinterpret_tensor(buf13, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), primals_6, primals_5, primals_4) class Lambda(nn.Module): """An easy way to create a pytorch layer for a simple `func`.""" def __init__(self, func): """create a layer that simply calls `func` with `x`""" super().__init__() self.func = func def forward(self, x): return self.func(x) class FFN(nn.Module): """ Feed-Forward Network """ def __init__(self, d_inner_hid, d_model, dropout_rate): super(FFN, self).__init__() self.dropout_rate = dropout_rate self.fc1 = torch.nn.Linear(in_features=d_model, out_features= d_inner_hid) self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features= d_model) def forward(self, x): hidden = self.fc1(x) hidden = F.relu(hidden) if self.dropout_rate: hidden = F.dropout(hidden, p=self.dropout_rate) out = self.fc2(hidden) return out class MultiHeadAttention(nn.Module): """ Multi-Head Attention """ def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.d_key = d_key self.d_value = d_value self.d_model = d_model self.dropout_rate = dropout_rate self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False) self.v_fc = torch.nn.Linear(in_features=d_model, out_features= d_value * n_head, bias=False) self.proj_fc = torch.nn.Linear(in_features=d_value * n_head, out_features=d_model, bias=False) def _prepare_qkv(self, queries, keys, values, cache=None): if keys is None: keys, values = queries, queries static_kv = False else: static_kv = True q = self.q_fc(queries) q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self .d_key]) q = q.permute(0, 2, 1, 3) if cache is not None and static_kv and 'static_k' in cache: k = cache['static_k'] v = cache['static_v'] else: k = self.k_fc(keys) v = self.v_fc(values) k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head, self.d_key]) k = k.permute(0, 2, 1, 3) v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head, self.d_value]) v = v.permute(0, 2, 1, 3) if cache is not None: if static_kv and 'static_k' not in cache: cache['static_k'], cache['static_v'] = k, v elif not static_kv: cache_k, cache_v = cache['k'], cache['v'] k = torch.cat([cache_k, k], dim=2) v = torch.cat([cache_v, v], dim=2) cache['k'], cache['v'] = k, v return q, k, v def forward(self, queries, keys, values, attn_bias, cache=None): keys = queries if keys is None else keys values = keys if values is None else values q, k, v = self._prepare_qkv(queries, keys, values, cache) product = torch.matmul(q, k.transpose(2, 3)) product = product * self.d_model ** -0.5 if attn_bias is not None: product += attn_bias weights = F.softmax(product, dim=-1) if self.dropout_rate: weights = F.dropout(weights, p=self.dropout_rate) out = torch.matmul(weights, v) out = out.permute(0, 2, 1, 3) out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape [2] * out.shape[3]]) out = self.proj_fc(out) return out class LambdaXY(nn.Module): """An easy way to create a pytorch layer for a simple `func`.""" def __init__(self, func): """create a layer that simply calls `func` with `x`""" super().__init__() self.func = func def forward(self, x, y): return self.func(x, y) class PrePostProcessLayer(nn.Module): """ PrePostProcessLayer """ def __init__(self, process_cmd, d_model, dropout_rate): super(PrePostProcessLayer, self).__init__() self.process_cmd = process_cmd self.functors = nn.ModuleList() cur_a_len = 0 cur_n_len = 0 cur_d_len = 0 for cmd in self.process_cmd: if cmd == 'a': self.functors.add_module('add_res_connect_{}'.format( cur_a_len), LambdaXY(lambda x, y: x + y if y is not None else x)) cur_a_len += 1 elif cmd == 'n': layerNorm = torch.nn.LayerNorm(normalized_shape=d_model, elementwise_affine=True, eps=1e-05) self.functors.add_module('layer_norm_%d' % cur_n_len, layerNorm ) cur_n_len += 1 elif cmd == 'd': self.functors.add_module('add_drop_{}'.format(cur_d_len), Lambda(lambda x: F.dropout(x, p=dropout_rate) if dropout_rate else x)) cur_d_len += 1 def forward(self, x, residual=None): for i, (cmd, functor) in enumerate(zip(self.process_cmd, self.functors) ): if cmd == 'a': x = functor(x, residual) else: x = functor(x) return x class EncoderLayer(nn.Module): """ EncoderLayer """ def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd='n', postprocess_cmd='da'): super(EncoderLayer, self).__init__() self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout) self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head, attention_dropout) self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model, prepostprocess_dropout) self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout) self.ffn = FFN(d_inner_hid, d_model, relu_dropout) self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model, prepostprocess_dropout) def forward(self, enc_input, attn_bias): attn_output = self.self_attn(self.preprocesser1(enc_input), None, None, attn_bias) attn_output = self.postprocesser1(attn_output, enc_input) ffn_output = self.ffn(self.preprocesser2(attn_output)) ffn_output = self.postprocesser2(ffn_output, attn_output) return ffn_output class EncoderNew(nn.Module): """ encoder """ def __init__(self, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd='n', postprocess_cmd='da'): super(EncoderNew, self).__init__() self.encoder_layers = nn.ModuleList() for i in range(n_layer): encoderLayer = EncoderLayer(n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd) self.encoder_layers.add_module('layer_%d' % i, encoderLayer) self.processer = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout) def forward(self, input_0, input_1): primals_1 = (self.encoder_layers.layer_0.preprocesser1.functors. layer_norm_0.weight) primals_2 = (self.encoder_layers.layer_0.preprocesser1.functors. layer_norm_0.bias) primals_4 = self.encoder_layers.layer_0.self_attn.q_fc.weight primals_5 = self.encoder_layers.layer_0.self_attn.k_fc.weight primals_6 = self.encoder_layers.layer_0.self_attn.v_fc.weight primals_8 = self.encoder_layers.layer_0.self_attn.proj_fc.weight primals_9 = (self.encoder_layers.layer_0.preprocesser2.functors. layer_norm_0.weight) primals_10 = (self.encoder_layers.layer_0.preprocesser2.functors. layer_norm_0.bias) primals_11 = self.encoder_layers.layer_0.ffn.fc1.weight primals_12 = self.encoder_layers.layer_0.ffn.fc1.bias primals_13 = self.encoder_layers.layer_0.ffn.fc2.weight primals_14 = self.encoder_layers.layer_0.ffn.fc2.bias primals_15 = self.processer.functors.layer_norm_0.weight primals_16 = self.processer.functors.layer_norm_0.bias primals_3 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return output[0]
eminem171333491/PaddleOCR2Pytorch
Encoder
false
3,471
[ "Apache-2.0" ]
0
ec466bb3a689eccb9290e9f80812a45301d3b030
https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030
ConcatenatedAttention
import torch import torch.optim import torch.utils.data from torch import nn class ConcatenatedAttention(nn.Module): """ ConcatenatedAttention module which uses concatenation of encoder and decoder attention vectors instead of summing them up """ def __init__(self, encoder_dim, decoder_dim, attention_dim): """ @param encoder_dim: feature size of encoded images @param decoder_dim: size of decoder's RNN @param attention_dim: size of the attention network """ super().__init__() self.encoder_att = nn.Linear(encoder_dim, attention_dim) self.decoder_att = nn.Linear(decoder_dim, attention_dim) self.full_att = nn.Linear(attention_dim * 2, 1) self.relu = nn.ReLU() self.softmax = nn.Softmax(dim=1) def forward(self, encoder_out, decoder_hidden): """ Forward propagation. :param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim) :param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim) :return: attention weighted encoding, weights """ att1 = self.encoder_att(encoder_out) att2 = self.decoder_att(decoder_hidden).unsqueeze(1) att2_expanded = att2.expand_as(att1) att = self.full_att(self.relu(torch.cat([att1, att2_expanded], dim=2)) ).squeeze(2) alpha = self.softmax(att) attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum( dim=1) return attention_weighted_encoding, alpha def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'encoder_dim': 4, 'decoder_dim': 4, 'attention_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.optim import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x4 = xindex // 8 x2 = xindex // 32 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.full([1], 0, tl.int32) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp13 = 0.0 tmp14 = tmp12 <= tmp13 tl.store(out_ptr0 + x3, tmp12, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (1, 8), (8, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor( primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.bool) get_raw_stream(0) triton_poi_fused_cat_relu_threshold_backward_0[grid(128)](buf0, buf1, buf2, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf4 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_7, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused_mul_sum_3[grid(16)](primals_3, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf7, buf6, primals_3, primals_6, reinterpret_tensor(buf2, (16, 8), (8, 1), 0), buf6, primals_7, buf8 class ConcatenatedAttentionNew(nn.Module): """ ConcatenatedAttention module which uses concatenation of encoder and decoder attention vectors instead of summing them up """ def __init__(self, encoder_dim, decoder_dim, attention_dim): """ @param encoder_dim: feature size of encoded images @param decoder_dim: size of decoder's RNN @param attention_dim: size of the attention network """ super().__init__() self.encoder_att = nn.Linear(encoder_dim, attention_dim) self.decoder_att = nn.Linear(decoder_dim, attention_dim) self.full_att = nn.Linear(attention_dim * 2, 1) self.relu = nn.ReLU() self.softmax = nn.Softmax(dim=1) def forward(self, input_0, input_1): primals_1 = self.encoder_att.weight primals_2 = self.encoder_att.bias primals_4 = self.decoder_att.weight primals_5 = self.decoder_att.bias primals_7 = self.full_att.weight primals_8 = self.full_att.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
enesmsahin/ShowAttendTell
ConcatenatedAttention
false
3,472
[ "MIT" ]
0
ae94b9a61c3b7e6f2302b9fd4477b6a3e14a33fe
https://github.com/enesmsahin/ShowAttendTell/tree/ae94b9a61c3b7e6f2302b9fd4477b6a3e14a33fe
CVAE
import torch import torch.nn as nn import torch.nn.functional as F class Encoder(nn.Module): def __init__(self, sample_size, condition_size, hidden_size): super().__init__() self.fc1 = nn.Linear(sample_size + condition_size, hidden_size) self.fc2 = nn.Dropout(p=0.5) self.fc3 = nn.Linear(hidden_size, hidden_size) def forward(self, x, c): x = torch.cat([x, c], 1) p_x = F.relu(self.fc1(x)) p_x = self.fc2(p_x) p_x = F.relu(self.fc3(p_x)) return p_x class LatentZ(nn.Module): def __init__(self, hidden_size, latent_size): super().__init__() self.mu = nn.Linear(hidden_size, latent_size) self.logvar = nn.Linear(hidden_size, latent_size) def forward(self, p_x): mu = self.mu(p_x) logvar = self.logvar(p_x) std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return std * eps + mu, logvar, mu class Decoder(nn.Module): def __init__(self, latent_size, condition_size, hidden_size_1, hidden_size_2, sample_size): super().__init__() self.fc1 = nn.Linear(latent_size + condition_size, hidden_size_1) self.fc2 = nn.Dropout(p=0.5) self.fc3 = nn.Linear(hidden_size_1, hidden_size_2) self.fc4 = nn.Linear(hidden_size_2, sample_size) def forward(self, z_x, c): z = torch.cat([z_x, c], 1) q_x = F.relu(self.fc1(z)) q_x = self.fc2(q_x) q_x = F.relu(self.fc3(q_x)) q_x = self.fc4(q_x) return q_x class CVAE(nn.Module): def __init__(self, sample_size, condition_size, hidden_encoder_size, hidden_decoder_1_size, hidden_decoder_2_size, latent_size=2): super().__init__() self.sample_size = sample_size self.condition_size = condition_size self.hidden_encoder_size = hidden_encoder_size self.hidden_decoder_1__size = hidden_decoder_1_size self.hidden_decoder_2__size = hidden_decoder_2_size self.latent_size = latent_size self.encoder = Encoder(sample_size, condition_size, hidden_encoder_size ) self.latent_z = LatentZ(hidden_encoder_size, latent_size) self.decoder = Decoder(latent_size, condition_size, hidden_decoder_1_size, hidden_decoder_2_size, sample_size) def forward(self, x, c): p_x = self.encoder(x, c) z, logvar, mu = self.latent_z(p_x) q_z = self.decoder(z, c) return q_z, logvar, mu, z def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'sample_size': 4, 'condition_size': 4, 'hidden_encoder_size': 4, 'hidden_decoder_1_size': 4, 'hidden_decoder_2_size': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_exp_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp3 * tmp4 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 6, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-2 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (2, 4), (4, 1)) assert_size_stride(primals_8, (2,), (1,)) assert_size_stride(primals_9, (2, 4), (4, 1)) assert_size_stride(primals_10, (2,), (1,)) assert_size_stride(primals_11, (4, 6), (6, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4 ), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_10, buf4, reinterpret_tensor(primals_9, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_10 buf7 = torch.ops.aten.randn.default([4, 2], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf8 = buf7 del buf7 buf9 = empty_strided_cuda((4, 2), (2, 1), torch.float32) triton_poi_fused_add_exp_mul_2[grid(8)](buf6, buf8, buf5, buf9, 8, XBLOCK=8, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 6), (6, 1), torch.float32) triton_poi_fused_cat_3[grid(24)](buf9, primals_2, buf10, 24, XBLOCK =32, num_warps=1, num_stages=1) del primals_2 buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_11, (6, 4), (1, 6), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_relu_1[grid(16)](buf12, primals_12, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_relu_1[grid(16)](buf14, primals_14, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_14 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_16, buf14, reinterpret_tensor( primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_16 return (buf15, buf6, buf5, buf9, buf0, buf2, buf4, buf6, buf8, buf10, buf12, buf14, primals_15, primals_13, primals_11, primals_9, primals_7, primals_5) class Encoder(nn.Module): def __init__(self, sample_size, condition_size, hidden_size): super().__init__() self.fc1 = nn.Linear(sample_size + condition_size, hidden_size) self.fc2 = nn.Dropout(p=0.5) self.fc3 = nn.Linear(hidden_size, hidden_size) def forward(self, x, c): x = torch.cat([x, c], 1) p_x = F.relu(self.fc1(x)) p_x = self.fc2(p_x) p_x = F.relu(self.fc3(p_x)) return p_x class LatentZ(nn.Module): def __init__(self, hidden_size, latent_size): super().__init__() self.mu = nn.Linear(hidden_size, latent_size) self.logvar = nn.Linear(hidden_size, latent_size) def forward(self, p_x): mu = self.mu(p_x) logvar = self.logvar(p_x) std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return std * eps + mu, logvar, mu class Decoder(nn.Module): def __init__(self, latent_size, condition_size, hidden_size_1, hidden_size_2, sample_size): super().__init__() self.fc1 = nn.Linear(latent_size + condition_size, hidden_size_1) self.fc2 = nn.Dropout(p=0.5) self.fc3 = nn.Linear(hidden_size_1, hidden_size_2) self.fc4 = nn.Linear(hidden_size_2, sample_size) def forward(self, z_x, c): z = torch.cat([z_x, c], 1) q_x = F.relu(self.fc1(z)) q_x = self.fc2(q_x) q_x = F.relu(self.fc3(q_x)) q_x = self.fc4(q_x) return q_x class CVAENew(nn.Module): def __init__(self, sample_size, condition_size, hidden_encoder_size, hidden_decoder_1_size, hidden_decoder_2_size, latent_size=2): super().__init__() self.sample_size = sample_size self.condition_size = condition_size self.hidden_encoder_size = hidden_encoder_size self.hidden_decoder_1__size = hidden_decoder_1_size self.hidden_decoder_2__size = hidden_decoder_2_size self.latent_size = latent_size self.encoder = Encoder(sample_size, condition_size, hidden_encoder_size ) self.latent_z = LatentZ(hidden_encoder_size, latent_size) self.decoder = Decoder(latent_size, condition_size, hidden_decoder_1_size, hidden_decoder_2_size, sample_size) def forward(self, input_0, input_1): primals_3 = self.encoder.fc1.weight primals_4 = self.encoder.fc1.bias primals_1 = self.encoder.fc3.weight primals_6 = self.encoder.fc3.bias primals_7 = self.latent_z.mu.weight primals_8 = self.latent_z.mu.bias primals_9 = self.latent_z.logvar.weight primals_10 = self.latent_z.logvar.bias primals_11 = self.decoder.fc1.weight primals_12 = self.decoder.fc1.bias primals_2 = self.decoder.fc3.weight primals_14 = self.decoder.fc3.bias primals_5 = self.decoder.fc4.weight primals_16 = self.decoder.fc4.bias primals_13 = input_0 primals_15 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return output[0], output[1], output[2], output[3]
ekrell/learn-planning-space
CVAE
false
3,473
[ "MIT" ]
0
730e448bffa4996b2b1ef3a5b00500dc172962ec
https://github.com/ekrell/learn-planning-space/tree/730e448bffa4996b2b1ef3a5b00500dc172962ec
TFSamepaddingLayer
import torch import torch.utils.data import torch.multiprocessing import torch.nn as nn import torch.nn.functional as F class TFSamepaddingLayer(nn.Module): """To align with tf `same` padding. Putting this before any conv layer that need padding Assuming kernel has Height == Width for simplicity """ def __init__(self, ksize, stride): super(TFSamepaddingLayer, self).__init__() self.ksize = ksize self.stride = stride def forward(self, x): if x.shape[2] % self.stride == 0: pad = max(self.ksize - self.stride, 0) else: pad = max(self.ksize - x.shape[2] % self.stride, 0) if pad % 2 == 0: pad_val = pad // 2 padding = pad_val, pad_val, pad_val, pad_val else: pad_val_start = pad // 2 pad_val_end = pad - pad_val_start padding = pad_val_start, pad_val_end, pad_val_start, pad_val_end x = F.pad(x, padding, 'constant', 0) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ksize': 4, 'stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.multiprocessing import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](arg0_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class TFSamepaddingLayerNew(nn.Module): """To align with tf `same` padding. Putting this before any conv layer that need padding Assuming kernel has Height == Width for simplicity """ def __init__(self, ksize, stride): super(TFSamepaddingLayerNew, self).__init__() self.ksize = ksize self.stride = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
essential2189/Cell-Based-Model
TFSamepaddingLayer
false
3,474
[ "MIT" ]
0
f01c3fcb45e69baa4dc8216b8b5a092f56cfa38e
https://github.com/essential2189/Cell-Based-Model/tree/f01c3fcb45e69baa4dc8216b8b5a092f56cfa38e
SmoothL1Loss
import torch import torch.nn.functional as F import torch.nn as nn def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.sum() / pred.numel() elif reduction_enum == 2: return loss.sum() def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None): if avg_factor is None: avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-06 loss = smooth_l1_loss(pred, target, beta, reduction='none') return torch.sum(loss * weight)[None] / avg_factor class SmoothL1Loss(nn.Module): def __init__(self, beta=1.0, loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.loss_weight = loss_weight def forward(self, pred, target, weight, *args, **kwargs): loss_bbox = self.loss_weight * weighted_smoothl1(pred, target, weight, *args, beta=self.beta, **kwargs) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_abs_div_lt_mul_sub_sum_where_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = 0.5 tmp7 = tmp3 * tmp6 tmp8 = tmp7 * tmp3 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp6 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp13 = tmp11 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 0.015624999755859379 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = reinterpret_tensor(buf0, (1,), (1,), 0) del buf0 get_raw_stream(0) triton_per_fused_abs_div_lt_mul_sub_sum_where_0[grid(1)](buf1, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta ) reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.sum() / pred.numel() elif reduction_enum == 2: return loss.sum() def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None): if avg_factor is None: avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-06 loss = smooth_l1_loss(pred, target, beta, reduction='none') return torch.sum(loss * weight)[None] / avg_factor class SmoothL1LossNew(nn.Module): def __init__(self, beta=1.0, loss_weight=1.0): super(SmoothL1LossNew, self).__init__() self.beta = beta self.loss_weight = loss_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
es6rc/icevision
SmoothL1Loss
false
3,475
[ "Apache-2.0" ]
0
bb78dd2e1721c2edb82fb9c1a826fe301541d2a1
https://github.com/es6rc/icevision/tree/bb78dd2e1721c2edb82fb9c1a826fe301541d2a1
CrossEntropyLoss
import torch import torch.nn.functional as F import torch.nn as nn def mask_cross_entropy(pred, target, label): num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, reduction ='mean')[None] def _expand_binary_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size (0), label_channels) return bin_labels, bin_label_weights def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None): if pred.dim() != label.dim(): label, weight = _expand_binary_labels(label, weight, pred.size(-1)) if avg_factor is None: avg_factor = max(torch.sum(weight > 0).float().item(), 1.0) return F.binary_cross_entropy_with_logits(pred, label.float(), weight. float(), reduction='sum')[None] / avg_factor def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True): if avg_factor is None: avg_factor = max(torch.sum(weight > 0).float().item(), 1.0) raw = F.cross_entropy(pred, label, reduction='none') if reduce: return torch.sum(raw * weight)[None] / avg_factor else: return raw * weight / avg_factor class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, loss_weight=1.0): super(CrossEntropyLoss, self).__init__() assert use_sigmoid is False or use_mask is False self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.loss_weight = loss_weight if self.use_sigmoid: self.cls_criterion = weighted_binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = weighted_cross_entropy def forward(self, cls_score, label, label_weight, *args, **kwargs): loss_cls = self.loss_weight * self.cls_criterion(cls_score, label, label_weight, *args, **kwargs) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp20 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tl.store(out_ptr0 + x2, tmp27, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + r2, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 0.00390625 tmp7 = tmp5 * tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_mul_neg_sum_1[grid(64)](buf0, arg0_1, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = reinterpret_tensor(buf2, (1,), (1,), 0) del buf2 triton_per_fused__log_softmax_div_mul_neg_sum_2[grid(1)](buf3, buf1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf1 return buf3, def mask_cross_entropy(pred, target, label): num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, reduction ='mean')[None] def _expand_binary_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size (0), label_channels) return bin_labels, bin_label_weights def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None): if pred.dim() != label.dim(): label, weight = _expand_binary_labels(label, weight, pred.size(-1)) if avg_factor is None: avg_factor = max(torch.sum(weight > 0).float().item(), 1.0) return F.binary_cross_entropy_with_logits(pred, label.float(), weight. float(), reduction='sum')[None] / avg_factor def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True): if avg_factor is None: avg_factor = max(torch.sum(weight > 0).float().item(), 1.0) raw = F.cross_entropy(pred, label, reduction='none') if reduce: return torch.sum(raw * weight)[None] / avg_factor else: return raw * weight / avg_factor class CrossEntropyLossNew(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, loss_weight=1.0): super(CrossEntropyLossNew, self).__init__() assert use_sigmoid is False or use_mask is False self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.loss_weight = loss_weight if self.use_sigmoid: self.cls_criterion = weighted_binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = weighted_cross_entropy def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
es6rc/icevision
CrossEntropyLoss
false
3,476
[ "Apache-2.0" ]
0
bb78dd2e1721c2edb82fb9c1a826fe301541d2a1
https://github.com/es6rc/icevision/tree/bb78dd2e1721c2edb82fb9c1a826fe301541d2a1
Squash
import torch import torch.nn as nn import torch.jit class Squash(nn.Module): def forward(self, x): y = x ** 3 return torch.clamp(y, min=0) / (1 + y.abs()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.jit assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_clamp_div_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = tmp1 * tmp0 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tl_math.abs(tmp2) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp4 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_clamp_div_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SquashNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ethanabrooks/teacher-RL
Squash
false
3,477
[ "MIT" ]
0
41b44fa4de1e8ce7e0c3eac726919c28ede63538
https://github.com/ethanabrooks/teacher-RL/tree/41b44fa4de1e8ce7e0c3eac726919c28ede63538
GeM
import torch import torch.nn.functional as F import torch.nn as nn class GeM(nn.Module): def __init__(self, p=3, eps=1e-06): super(GeM, self).__init__() self.p = p self.eps = eps def forward(self, x): return self.gem(x, p=self.p, eps=self.eps) def gem(self, x, p=3, eps=1e-06): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1)) ).pow(1.0 / p) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_clamp_pow_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp40 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp45 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp55 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp60 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp65 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp70 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp75 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = 1e-06 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = tmp2 * tmp2 tmp4 = tmp3 * tmp2 tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = tmp6 * tmp6 tmp8 = tmp7 * tmp6 tmp9 = tmp8 + tmp4 tmp11 = triton_helpers.maximum(tmp10, tmp1) tmp12 = tmp11 * tmp11 tmp13 = tmp12 * tmp11 tmp14 = tmp13 + tmp9 tmp16 = triton_helpers.maximum(tmp15, tmp1) tmp17 = tmp16 * tmp16 tmp18 = tmp17 * tmp16 tmp19 = tmp18 + tmp14 tmp21 = triton_helpers.maximum(tmp20, tmp1) tmp22 = tmp21 * tmp21 tmp23 = tmp22 * tmp21 tmp24 = tmp23 + tmp19 tmp26 = triton_helpers.maximum(tmp25, tmp1) tmp27 = tmp26 * tmp26 tmp28 = tmp27 * tmp26 tmp29 = tmp28 + tmp24 tmp31 = triton_helpers.maximum(tmp30, tmp1) tmp32 = tmp31 * tmp31 tmp33 = tmp32 * tmp31 tmp34 = tmp33 + tmp29 tmp36 = triton_helpers.maximum(tmp35, tmp1) tmp37 = tmp36 * tmp36 tmp38 = tmp37 * tmp36 tmp39 = tmp38 + tmp34 tmp41 = triton_helpers.maximum(tmp40, tmp1) tmp42 = tmp41 * tmp41 tmp43 = tmp42 * tmp41 tmp44 = tmp43 + tmp39 tmp46 = triton_helpers.maximum(tmp45, tmp1) tmp47 = tmp46 * tmp46 tmp48 = tmp47 * tmp46 tmp49 = tmp48 + tmp44 tmp51 = triton_helpers.maximum(tmp50, tmp1) tmp52 = tmp51 * tmp51 tmp53 = tmp52 * tmp51 tmp54 = tmp53 + tmp49 tmp56 = triton_helpers.maximum(tmp55, tmp1) tmp57 = tmp56 * tmp56 tmp58 = tmp57 * tmp56 tmp59 = tmp58 + tmp54 tmp61 = triton_helpers.maximum(tmp60, tmp1) tmp62 = tmp61 * tmp61 tmp63 = tmp62 * tmp61 tmp64 = tmp63 + tmp59 tmp66 = triton_helpers.maximum(tmp65, tmp1) tmp67 = tmp66 * tmp66 tmp68 = tmp67 * tmp66 tmp69 = tmp68 + tmp64 tmp71 = triton_helpers.maximum(tmp70, tmp1) tmp72 = tmp71 * tmp71 tmp73 = tmp72 * tmp71 tmp74 = tmp73 + tmp69 tmp76 = triton_helpers.maximum(tmp75, tmp1) tmp77 = tmp76 * tmp76 tmp78 = tmp77 * tmp76 tmp79 = tmp78 + tmp74 tmp80 = 0.0625 tmp81 = tmp79 * tmp80 tmp82 = 0.3333333333333333 tmp83 = libdevice.pow(tmp81, tmp82) tl.store(in_out_ptr0 + x0, tmp83, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_avg_pool2d_clamp_pow_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf1, class GeMNew(nn.Module): def __init__(self, p=3, eps=1e-06): super(GeMNew, self).__init__() self.p = p self.eps = eps def gem(self, x, p=3, eps=1e-06): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1)) ).pow(1.0 / p) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
esha-singh/DL_project
GeM
false
3,478
[ "MIT" ]
0
11ac2874845bc3982435cc37f4e0b8896b95660e
https://github.com/esha-singh/DL_project/tree/11ac2874845bc3982435cc37f4e0b8896b95660e
Log
import torch import torch.nn as nn import torch.jit class Log(nn.Module): def forward(self, x): return torch.log(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.jit assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_log_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.log(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_log_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LogNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ethanabrooks/teacher-RL
Log
false
3,479
[ "MIT" ]
0
41b44fa4de1e8ce7e0c3eac726919c28ede63538
https://github.com/ethanabrooks/teacher-RL/tree/41b44fa4de1e8ce7e0c3eac726919c28ede63538
QREmbeddingBag
import torch import numpy as np from torch import nn from torch.nn.parameter import Parameter import torch.nn.functional as F class QREmbeddingBag(nn.Module): """Computes sums or means over two 'bags' of embeddings, one using the quotient of the indices and the other using the remainder of the indices, without instantiating the intermediate embeddings, then performsan operation to combine these. For bags of constant length and no :attr:`per_sample_weights`, this class * with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``sum(dim=1)``, * with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=1)``, * with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=1)``. However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these operations. QREmbeddingBag also supports per-sample weights as an argument to the forward pass. This scales the output of the Embedding before performing a weighted reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the only supported ``mode`` is ``"sum"``, which computes a weighted sum according to :attr: `per_sample_weights`. Known Issues: Autograd breaks with multiple GPUs. It breaks only with multiple embeddings. Args: num_categories (int): total number of unique categories. The input indices must be in 0, 1, ..., num_categories - 1. embedding_dim (list): list of sizes for each embedding vector in each table. If ``"add"`` or ``"mult"`` operation are used, these embedding dimensions must be the same. If a single embedding_dim is used, then it will use this embedding_dim for both embedding tables. num_collisions (int): number of collisions to enforce. operation (string, optional): ``"concat"``, ``"add"``, or ``"mult". Specifies the operation to compose embeddings. ``"concat"`` concatenates the embeddings, ``"add"`` sums the embeddings, and ``"mult"`` multiplies (component-wise) the embeddings. Default: ``"mult"`` max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` is renormalized to have norm :attr:`max_norm`. norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``False``. .. note:: This option is not supported when ``mode="max"``. mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag. * ``"sum"`` computes the weighted sum, taking `per_sample_weights` into consideration. * ``"mean"`` computes the average of the values in the bag, * ``"max"`` computes the max value over each bag. Default: ``"mean"`` sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See Notes for more details regarding sparse gradients. .. note:: This option is not supported when ``mode="max"``. Attributes: weight (Tensor): the learnable weights of each embedding table is the module of shape `(num_embeddings, embedding_dim)` initialized using a uniform distribution with sqrt(1 / num_categories). Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and :attr:`per_index_weights` (Tensor, optional) If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and this will return ``B`` values aggregated in a way depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case. If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing the starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as having ``B`` bags. Empty bags (i.e., having 0-length) will have returned vectors filled by zeros. per_sample_weights (Tensor, optional): a tensor of float / double weights, or None to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights` must have exactly the same shape as input and is treated as having the same :attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``. Returns: The output tensor of shape `(B, embedding_dim)` """ __constants__ = ['num_embeddings', 'embedding_dim', 'num_collisions', 'operation', 'max_norm', 'norm_type', 'scale_grad_by_freq', 'mode', 'sparse'] def __init__(self, num_embeddings, embedding_dim, num_collisions, operation='mult', max_norm=None, norm_type=2.0, scale_grad_by_freq= False, mode='mean', sparse=False, _weight=None): super(QREmbeddingBag, self).__init__() assert operation in ['concat', 'mult', 'add'], 'Not valid operation!' self.num_categories = num_embeddings if isinstance(embedding_dim, int) or len(embedding_dim) == 1: self.embedding_dim = [embedding_dim, embedding_dim] else: self.embedding_dim = embedding_dim self.num_collisions = num_collisions self.operation = operation self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq if self.operation == 'add' or self.operation == 'mult': assert self.embedding_dim[0] == self.embedding_dim[1 ], 'Embedding dimensions do not match!' self.num_embeddings = [int(np.ceil(num_embeddings / num_collisions) ), num_collisions] if _weight is None: self.weight_q = Parameter(torch.Tensor(self.num_embeddings[0], self.embedding_dim[0])) self.weight_r = Parameter(torch.Tensor(self.num_embeddings[1], self.embedding_dim[1])) self.reset_parameters() else: assert list(_weight[0].shape) == [self.num_embeddings[0], self. embedding_dim[0] ], 'Shape of weight for quotient table does not' + 'match num_embeddings and embedding_dim' assert list(_weight[1].shape) == [self.num_embeddings[1], self. embedding_dim[1] ], 'Shape of weight for remainder table does not' + 'match num_embeddings and embedding_dim' self.weight_q = Parameter(_weight[0]) self.weight_r = Parameter(_weight[1]) self.mode = mode self.sparse = sparse def reset_parameters(self): nn.init.uniform_(self.weight_q, np.sqrt(1 / self.num_categories)) nn.init.uniform_(self.weight_r, np.sqrt(1 / self.num_categories)) def forward(self, input, offsets=None, per_sample_weights=None): input_q = (input / self.num_collisions).long() input_r = torch.remainder(input, self.num_collisions).long() embed_q = F.embedding_bag(input_q, self.weight_q, offsets, self. max_norm, self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse, per_sample_weights) embed_r = F.embedding_bag(input_r, self.weight_r, offsets, self. max_norm, self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse, per_sample_weights) if self.operation == 'concat': embed = torch.cat((embed_q, embed_r), dim=1) elif self.operation == 'add': embed = embed_q + embed_r elif self.operation == 'mult': embed = embed_q * embed_r return embed def extra_repr(self): s = '{num_embeddings}, {embedding_dim}' if self.max_norm is not None: s += ', max_norm={max_norm}' if self.norm_type != 2: s += ', norm_type={norm_type}' if self.scale_grad_by_freq is not False: s += ', scale_grad_by_freq={scale_grad_by_freq}' s += ', mode={mode}' return s.format(**self.__dict__) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_embeddings': 4, 'embedding_dim': 4, 'num_collisions': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from torch import nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 4 * x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_div_remainder_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = tmp2.to(tl.int64) tmp4 = 4.0 tmp5 = tmp0 % tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = tmp5 != tmp6 tmp8 = libdevice.signbit(tmp5) if tmp5.dtype is tl.float32 else tmp5 < 0 tmp9 = libdevice.signbit(tmp4) if tmp4.dtype is tl.float32 else tmp4 < 0 tmp10 = tmp8 != tmp9 tmp11 = tmp7 & tmp10 tmp12 = tmp5 + tmp4 tmp13 = tl.where(tmp11, tmp12, tmp5) tmp14 = tmp13.to(tl.int64) tl.store(out_ptr0 + x0, tmp3, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_poi_fused_arange_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.int64) triton_poi_fused__to_copy_div_remainder_1[grid(16)](primals_1, buf1, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf2 = torch.ops.aten._embedding_bag.default(primals_2, reinterpret_tensor(buf1, (16,), (1,), 0), buf0, False, 1) del primals_2 buf3 = buf2[0] buf4 = buf2[1] buf5 = buf2[2] buf6 = buf2[3] del buf2 buf8 = torch.ops.aten._embedding_bag.default(primals_3, reinterpret_tensor(buf7, (16,), (1,), 0), buf0, False, 1) del primals_3 buf9 = buf8[0] buf10 = buf8[1] buf11 = buf8[2] buf12 = buf8[3] del buf8 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_2[grid(16)](buf3, buf9, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf13, buf0, reinterpret_tensor(buf1, (16,), (1,), 0 ), buf3, buf4, buf5, buf6, reinterpret_tensor(buf7, (16,), (1,), 0 ), buf9, buf10, buf11, buf12 class QREmbeddingBagNew(nn.Module): """Computes sums or means over two 'bags' of embeddings, one using the quotient of the indices and the other using the remainder of the indices, without instantiating the intermediate embeddings, then performsan operation to combine these. For bags of constant length and no :attr:`per_sample_weights`, this class * with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``sum(dim=1)``, * with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=1)``, * with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=1)``. However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these operations. QREmbeddingBag also supports per-sample weights as an argument to the forward pass. This scales the output of the Embedding before performing a weighted reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the only supported ``mode`` is ``"sum"``, which computes a weighted sum according to :attr: `per_sample_weights`. Known Issues: Autograd breaks with multiple GPUs. It breaks only with multiple embeddings. Args: num_categories (int): total number of unique categories. The input indices must be in 0, 1, ..., num_categories - 1. embedding_dim (list): list of sizes for each embedding vector in each table. If ``"add"`` or ``"mult"`` operation are used, these embedding dimensions must be the same. If a single embedding_dim is used, then it will use this embedding_dim for both embedding tables. num_collisions (int): number of collisions to enforce. operation (string, optional): ``"concat"``, ``"add"``, or ``"mult". Specifies the operation to compose embeddings. ``"concat"`` concatenates the embeddings, ``"add"`` sums the embeddings, and ``"mult"`` multiplies (component-wise) the embeddings. Default: ``"mult"`` max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` is renormalized to have norm :attr:`max_norm`. norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``False``. .. note:: This option is not supported when ``mode="max"``. mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag. * ``"sum"`` computes the weighted sum, taking `per_sample_weights` into consideration. * ``"mean"`` computes the average of the values in the bag, * ``"max"`` computes the max value over each bag. Default: ``"mean"`` sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See Notes for more details regarding sparse gradients. .. note:: This option is not supported when ``mode="max"``. Attributes: weight (Tensor): the learnable weights of each embedding table is the module of shape `(num_embeddings, embedding_dim)` initialized using a uniform distribution with sqrt(1 / num_categories). Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and :attr:`per_index_weights` (Tensor, optional) If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and this will return ``B`` values aggregated in a way depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case. If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing the starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as having ``B`` bags. Empty bags (i.e., having 0-length) will have returned vectors filled by zeros. per_sample_weights (Tensor, optional): a tensor of float / double weights, or None to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights` must have exactly the same shape as input and is treated as having the same :attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``. Returns: The output tensor of shape `(B, embedding_dim)` """ __constants__ = ['num_embeddings', 'embedding_dim', 'num_collisions', 'operation', 'max_norm', 'norm_type', 'scale_grad_by_freq', 'mode', 'sparse'] def __init__(self, num_embeddings, embedding_dim, num_collisions, operation='mult', max_norm=None, norm_type=2.0, scale_grad_by_freq= False, mode='mean', sparse=False, _weight=None): super(QREmbeddingBagNew, self).__init__() assert operation in ['concat', 'mult', 'add'], 'Not valid operation!' self.num_categories = num_embeddings if isinstance(embedding_dim, int) or len(embedding_dim) == 1: self.embedding_dim = [embedding_dim, embedding_dim] else: self.embedding_dim = embedding_dim self.num_collisions = num_collisions self.operation = operation self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq if self.operation == 'add' or self.operation == 'mult': assert self.embedding_dim[0] == self.embedding_dim[1 ], 'Embedding dimensions do not match!' self.num_embeddings = [int(np.ceil(num_embeddings / num_collisions) ), num_collisions] if _weight is None: self.weight_q = Parameter(torch.Tensor(self.num_embeddings[0], self.embedding_dim[0])) self.weight_r = Parameter(torch.Tensor(self.num_embeddings[1], self.embedding_dim[1])) self.reset_parameters() else: assert list(_weight[0].shape) == [self.num_embeddings[0], self. embedding_dim[0] ], 'Shape of weight for quotient table does not' + 'match num_embeddings and embedding_dim' assert list(_weight[1].shape) == [self.num_embeddings[1], self. embedding_dim[1] ], 'Shape of weight for remainder table does not' + 'match num_embeddings and embedding_dim' self.weight_q = Parameter(_weight[0]) self.weight_r = Parameter(_weight[1]) self.mode = mode self.sparse = sparse def reset_parameters(self): nn.init.uniform_(self.weight_q, np.sqrt(1 / self.num_categories)) nn.init.uniform_(self.weight_r, np.sqrt(1 / self.num_categories)) def extra_repr(self): s = '{num_embeddings}, {embedding_dim}' if self.max_norm is not None: s += ', max_norm={max_norm}' if self.norm_type != 2: s += ', norm_type={norm_type}' if self.scale_grad_by_freq is not False: s += ', scale_grad_by_freq={scale_grad_by_freq}' s += ', mode={mode}' return s.format(**self.__dict__) def forward(self, input_0): primals_2 = self.weight_q primals_1 = self.weight_r primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
divyanshugit/EnvisEdge
QREmbeddingBag
false
3,480
[ "Apache-2.0" ]
0
26b21fd0eb665fa23a8b8a825c9bf460994d6714
https://github.com/divyanshugit/EnvisEdge/tree/26b21fd0eb665fa23a8b8a825c9bf460994d6714
GatedActivation
import torch from torch import nn class GatedActivation(nn.Module): """Activation function which computes actiation_fn(f) * sigmoid(g). The f and g correspond to the top 1/2 and bottom 1/2 of the input channels. """ def __init__(self, activation_fn=torch.tanh): """Initializes a new GatedActivation instance. Args: activation_fn: Activation to use for the top 1/2 input channels. """ super().__init__() self._activation_fn = activation_fn def forward(self, x): _, c, _, _ = x.shape assert c % 2 == 0, 'x must have an even number of channels.' x, gate = x[:, :c // 2, :, :], x[:, c // 2:, :, :] return self._activation_fn(x) * torch.sigmoid(gate) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GatedActivationNew(nn.Module): """Activation function which computes actiation_fn(f) * sigmoid(g). The f and g correspond to the top 1/2 and bottom 1/2 of the input channels. """ def __init__(self, activation_fn=torch.tanh): """Initializes a new GatedActivation instance. Args: activation_fn: Activation to use for the top 1/2 input channels. """ super().__init__() self._activation_fn = activation_fn def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
eyalbetzalel/pytorch-generative-1
GatedActivation
false
3,481
[ "MIT" ]
0
7c3adfdb57345220e14fdf3e827c041fa4db121c
https://github.com/eyalbetzalel/pytorch-generative-1/tree/7c3adfdb57345220e14fdf3e827c041fa4db121c
Hsigmoid
import torch import torch.nn as nn import torch.nn.functional as F class Hsigmoid(nn.Module): def __init__(self, inplace=True): super(Hsigmoid, self).__init__() self.inplace = inplace def forward(self, x): return F.relu6(1.2 * x + 3.0, inplace=self.inplace) / 6.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.2 tmp2 = tmp0 * tmp1 tmp3 = 3.0 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 6.0 tmp8 = triton_helpers.minimum(tmp6, tmp7) tmp9 = 0.16666666666666666 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HsigmoidNew(nn.Module): def __init__(self, inplace=True): super(HsigmoidNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
eminem171333491/PaddleOCR2Pytorch
Hsigmoid
false
3,482
[ "Apache-2.0" ]
0
ec466bb3a689eccb9290e9f80812a45301d3b030
https://github.com/eminem171333491/PaddleOCR2Pytorch/tree/ec466bb3a689eccb9290e9f80812a45301d3b030
Conv
import torch import torch.utils.data from torch import nn class Conv(nn.Module): def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False, relu=True): super(Conv, self).__init__() self.inp_dim = inp_dim self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride, padding=(kernel_size - 1) // 2, bias=True) self.relu = None self.bn = None if relu: self.relu = nn.ReLU() if bn: self.bn = nn.BatchNorm2d(out_dim) def forward(self, x): assert x.size()[1] == self.inp_dim, '{} {}'.format(x.size()[1], self.inp_dim) x = self.conv(x) if self.relu is not None: x = self.relu(x) if self.bn is not None: x = self.bn(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inp_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf1, primals_1, primals_2, buf2 class ConvNew(nn.Module): def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False, relu=True): super(ConvNew, self).__init__() self.inp_dim = inp_dim self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride, padding=(kernel_size - 1) // 2, bias=True) self.relu = None self.bn = None if relu: self.relu = nn.ReLU() if bn: self.bn = nn.BatchNorm2d(out_dim) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dmetehan/associative-embedding
Conv
false
3,483
[ "BSD-3-Clause" ]
0
a2c2e86e622cd97feec621fcfd34c3f97934e388
https://github.com/dmetehan/associative-embedding/tree/a2c2e86e622cd97feec621fcfd34c3f97934e388
MLP
import torch import torch.nn as nn import torch.nn.functional as F class MLP(nn.Module): def __init__(self, state_dim, action_dim, hidden_dim=400): super(MLP, self).__init__() self.fc1 = nn.Linear(state_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, hidden_dim) self.fc3 = nn.Linear(hidden_dim, action_dim) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (400, 400), (400, 1)) assert_size_stride(primals_5, (400,), (1,)) assert_size_stride(primals_6, (4, 400), (400, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1, primals_2, buf6, 25600, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 400), (1, 400), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf3, primals_5, buf5, 25600, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 4), (1, 400), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 400), (400, 1), 0 ), reinterpret_tensor(buf3, (64, 400), (400, 1), 0 ), primals_6, buf5, primals_4, buf6 class MLPNew(nn.Module): def __init__(self, state_dim, action_dim, hidden_dim=400): super(MLPNew, self).__init__() self.fc1 = nn.Linear(state_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, hidden_dim) self.fc3 = nn.Linear(hidden_dim, action_dim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
f2010126/DL_Labs
MLP
false
3,484
[ "BSD-3-Clause" ]
0
ee81d8aa6027846fc32c98feb9079211c59aa0e9
https://github.com/f2010126/DL_Labs/tree/ee81d8aa6027846fc32c98feb9079211c59aa0e9
BertPooler
from _paritybench_helpers import _mock_config import torch from torch import nn class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_add_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2 class BertPoolerNew(nn.Module): def __init__(self, config): super(BertPoolerNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Adoni/pytorch-pretrained-BERT
BertPooler
false
3,485
[ "Apache-2.0" ]
0
845c33f00e933626dcfc96e0923ecf034295ef75
https://github.com/Adoni/pytorch-pretrained-BERT/tree/845c33f00e933626dcfc96e0923ecf034295ef75
Lookahead
import torch import torch.utils.data.distributed import torch.nn as nn import torch.nn.functional as F class Lookahead(nn.Module): def __init__(self, n_features, context): super(Lookahead, self).__init__() assert context > 0 self.context = context self.n_features = n_features self.pad = 0, self.context - 1 self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size =self.context, stride=1, groups=self.n_features, padding=0, bias=None) def forward(self, x): x = x.transpose(0, 1).transpose(1, 2) x = F.pad(x, pad=self.pad, value=0) x = self.conv(x) x = x.transpose(1, 2).transpose(0, 1).contiguous() return x def __repr__(self): return self.__class__.__name__ + '(' + 'n_features=' + str(self. n_features) + ', context=' + str(self.context) + ')' def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_features': 4, 'context': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data.distributed import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 7 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = x1 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (y0 + 16 * x1), tmp2 & xmask & ymask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x1 + 7 * y0), tmp3, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(16, 7)](primals_1, buf0, 16, 7, XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=4, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(4, 16)](buf1, buf2, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) del buf1 return buf2, primals_2, buf0 class LookaheadNew(nn.Module): def __init__(self, n_features, context): super(LookaheadNew, self).__init__() assert context > 0 self.context = context self.n_features = n_features self.pad = 0, self.context - 1 self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size =self.context, stride=1, groups=self.n_features, padding=0, bias=None) def __repr__(self): return self.__class__.__name__ + '(' + 'n_features=' + str(self. n_features) + ', context=' + str(self.context) + ')' def forward(self, input_0): primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
faboyds/deepspeech.pytorch
Lookahead
false
3,486
[ "MIT" ]
0
d20f3510a3c556a07f5d662a91a63acffc26633b
https://github.com/faboyds/deepspeech.pytorch/tree/d20f3510a3c556a07f5d662a91a63acffc26633b
make_binary
import torch from torch import Tensor class make_binary(torch.nn.Module): def __init__(self, inplace=False): super().__init__() self.inplace = inplace def forward(self, tensor: 'Tensor') ->Tensor: return tensor % 2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_remainder_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 % tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 != tmp3 tmp5 = libdevice.signbit(tmp2) if tmp2.dtype is tl.float32 else tmp2 < 0 tmp6 = libdevice.signbit(tmp1) if tmp1.dtype is tl.float32 else tmp1 < 0 tmp7 = tmp5 != tmp6 tmp8 = tmp4 & tmp7 tmp9 = tmp2 + tmp1 tmp10 = tl.where(tmp8, tmp9, tmp2) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_remainder_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class make_binaryNew(torch.nn.Module): def __init__(self, inplace=False): super().__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fcaretti/mitosis_MNIST
make_binary
false
3,487
[ "MIT" ]
0
3dce002ff41a09ddd65eb220dc6e5f5c0013a0ea
https://github.com/fcaretti/mitosis_MNIST/tree/3dce002ff41a09ddd65eb220dc6e5f5c0013a0ea
Convolutional
import torch import torch.nn.functional as F import torch.nn as nn class Convolutional(nn.Module): def __init__(self, num_classes=10): super().__init__() self.conv1 = nn.Conv2d(1, 16, 5) self.conv2 = nn.Conv2d(16, 32, 5) self.fc1 = nn.Linear(512, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, num_classes) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = x.view(x.size(0), 1, 28, 28) x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = x.view(x.size(0), 512) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 1, 28, 28])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 576 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy ='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1)) assert_size_stride(primals_2, (16, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (128, 512), (512, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (64, 128), (128, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (10, 64), (64, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 24, 24), (9216, 576, 24, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(36864)](buf1, primals_3, 36864, XBLOCK=512, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 16, 12, 12), (2304, 144, 12, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 12, 12), (2304, 144, 12, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(9216)](buf1, buf2, buf3, 9216, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 8, 8), (2048, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(8192)](buf5, primals_5, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.int8) buf7 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(2048)](buf5, buf6, buf7, 2048, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 128), (1, 512), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(512)](buf9, primals_7, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (128, 64), (1, 128), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(256)](buf11, primals_9, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_2, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 512), (512, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class ConvolutionalNew(nn.Module): def __init__(self, num_classes=10): super().__init__() self.conv1 = nn.Conv2d(1, 16, 5) self.conv2 = nn.Conv2d(16, 32, 5) self.fc1 = nn.Linear(512, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, num_classes) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
f4str/digit-recognizer
Convolutional
false
3,488
[ "MIT" ]
0
67c175c683b22a3bf9d8a28dce812a82e08039d5
https://github.com/f4str/digit-recognizer/tree/67c175c683b22a3bf9d8a28dce812a82e08039d5
FPNHead
import torch import torch.nn as nn class FPNHead(nn.Module): def __init__(self, num_in, num_mid, num_out): super().__init__() self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1, bias=False) self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1, bias=False) def forward(self, x): x = nn.functional.relu(self.block0(x), inplace=True) x = nn.functional.relu(self.block1(x), inplace=True) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_in': 4, 'num_mid': 4, 'num_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps =4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, primals_3, buf1, buf4 class FPNHeadNew(nn.Module): def __init__(self, num_in, num_mid, num_out): super().__init__() self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1, bias=False) self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1, bias=False) def forward(self, input_0): primals_1 = self.block0.weight primals_3 = self.block1.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
emirkonuk/defocus
FPNHead
false
3,489
[ "Apache-2.0" ]
0
da2977d2698eb20e9ab2a3bcd1fa4d05e1dd9b50
https://github.com/emirkonuk/defocus/tree/da2977d2698eb20e9ab2a3bcd1fa4d05e1dd9b50
FeedForward
import torch import torch.nn.functional as F import torch.nn as nn class FeedForward(nn.Module): def __init__(self, num_classes=10): super().__init__() self.linear1 = nn.Linear(784, 512) self.linear2 = nn.Linear(512, 128) self.linear3 = nn.Linear(128, num_classes) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = x.view(x.size(0), 784) x = F.relu(self.linear1(x)) x = F.relu(self.linear2(x)) x = self.linear3(x) return x def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (512, 784), (784, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (128, 512), (512, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (10, 128), (128, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 512), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(2048)](buf1, primals_3, 2048, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 128), ( 1, 512), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(512)](buf3, primals_5, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_1, buf1, buf3, primals_6, primals_4 class FeedForwardNew(nn.Module): def __init__(self, num_classes=10): super().__init__() self.linear1 = nn.Linear(784, 512) self.linear2 = nn.Linear(512, 128) self.linear3 = nn.Linear(128, num_classes) def forward(self, input_0): primals_2 = self.linear1.weight primals_3 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
f4str/digit-recognizer
FeedForward
false
3,490
[ "MIT" ]
0
67c175c683b22a3bf9d8a28dce812a82e08039d5
https://github.com/f4str/digit-recognizer/tree/67c175c683b22a3bf9d8a28dce812a82e08039d5
PositionwiseFeedForward
import torch import torch.nn as nn import torch.nn.functional as F class PositionwiseFeedForward(nn.Module): """Implements FFN equation.""" def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.norm = nn.Sequential() self.w_2 = nn.Linear(d_ff, d_model) self.dropout = None def forward(self, x): return self.w_2(self.norm(F.relu(self.w_1(x)).transpose(2, 1). contiguous()).transpose(2, 1).contiguous()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clone_relu_threshold_backward_0[grid(256)](buf0, primals_2, buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = buf0 del buf0 extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3 class PositionwiseFeedForwardNew(nn.Module): """Implements FFN equation.""" def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.norm = nn.Sequential() self.w_2 = nn.Linear(d_ff, d_model) self.dropout = None def forward(self, input_0): primals_1 = self.w_1.weight primals_2 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
fellenB/dcp
PositionwiseFeedForward
false
3,492
[ "MIT" ]
0
3ca7724799d38ff8a56acb4b8b9011bb41932cb0
https://github.com/fellenB/dcp/tree/3ca7724799d38ff8a56acb4b8b9011bb41932cb0
MnistClassifier
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class MnistClassifier(nn.Module): def __init__(self, config): super(MnistClassifier, self).__init__() self.config = config self.h = self.config['image_h'] self.w = self.config['image_w'] self.out_dim = self.config['class_num'] self.fc1 = nn.Linear(self.h * self.w, 16) self.fc2 = nn.Linear(16, self.out_dim) def forward(self, x): x = torch.flatten(x, start_dim=1) x = self.fc1(x) x_hidd = x x = x ** 2 x = self.fc2(x) return x, x_hidd def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(image_h=4, image_w=4, class_num=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 16), (16, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 16), (1, 16), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, buf0, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0 ), buf0, buf1, primals_4 class MnistClassifierNew(nn.Module): def __init__(self, config): super(MnistClassifierNew, self).__init__() self.config = config self.h = self.config['image_h'] self.w = self.config['image_w'] self.out_dim = self.config['class_num'] self.fc1 = nn.Linear(self.h * self.w, 16) self.fc2 = nn.Linear(16, self.out_dim) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
DanielKalicki/homomorphic_mnist
MnistClassifier
false
3,493
[ "BSD-3-Clause" ]
0
954e9df2123527bfd266757f3b96897e405e5356
https://github.com/DanielKalicki/homomorphic_mnist/tree/954e9df2123527bfd266757f3b96897e405e5356
MSBlock
import torch import torch.nn as nn class MSBlock(nn.Module): def __init__(self, c_in, rate=4): super(MSBlock, self).__init__() self.rate = rate self.conv = nn.Conv2d(c_in, 32, 3, stride=1, padding=1) self.relu = nn.ReLU(inplace=True) dilation = self.rate * 1 if self.rate >= 1 else 1 self.conv1 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation, padding=dilation) self.relu1 = nn.ReLU(inplace=True) dilation = self.rate * 2 if self.rate >= 1 else 1 self.conv2 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation, padding=dilation) self.relu2 = nn.ReLU(inplace=True) dilation = self.rate * 3 if self.rate >= 1 else 1 self.conv3 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation, padding=dilation) self.relu3 = nn.ReLU(inplace=True) self._initialize_weights() def forward(self, x): o = self.relu(self.conv(x)) o1 = self.relu1(self.conv1(o)) o2 = self.relu2(self.conv2(o)) o3 = self.relu3(self.conv3(o)) out = o + o1 + o2 + o3 return out def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0, 0.01) if m.bias is not None: m.bias.data.zero_() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c_in': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x3, None) tmp8 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x3, None) tmp13 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp11 = tmp6 + tmp10 tmp14 = tmp12 + tmp13 tmp15 = triton_helpers.maximum(tmp4, tmp14) tmp16 = tmp11 + tmp15 tmp17 = 0.0 tmp18 = tmp15 <= tmp17 tmp19 = tmp10 <= tmp17 tmp20 = tmp5 <= tmp17 tl.store(out_ptr0 + x3, tmp16, None) tl.store(out_ptr1 + x3, tmp18, None) tl.store(out_ptr2 + x3, tmp19, None) tl.store(out_ptr3 + x3, tmp20, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(2048)](buf1, primals_2, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1)) buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(8, 8), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1)) buf4 = extern_kernels.convolution(buf1, primals_8, stride=(1, 1), padding=(12, 12), dilation=(12, 12), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 4, 4), (512, 16, 4, 1)) buf5 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32 ) buf6 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool) buf7 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool) buf8 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(2048)]( buf1, buf2, primals_5, buf3, primals_7, buf4, primals_9, buf5, buf6, buf7, buf8, 2048, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf3 del buf4 del primals_5 del primals_7 del primals_9 return (buf5, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf6, buf7, buf8) class MSBlockNew(nn.Module): def __init__(self, c_in, rate=4): super(MSBlockNew, self).__init__() self.rate = rate self.conv = nn.Conv2d(c_in, 32, 3, stride=1, padding=1) self.relu = nn.ReLU(inplace=True) dilation = self.rate * 1 if self.rate >= 1 else 1 self.conv1 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation, padding=dilation) self.relu1 = nn.ReLU(inplace=True) dilation = self.rate * 2 if self.rate >= 1 else 1 self.conv2 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation, padding=dilation) self.relu2 = nn.ReLU(inplace=True) dilation = self.rate * 3 if self.rate >= 1 else 1 self.conv3 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation, padding=dilation) self.relu3 = nn.ReLU(inplace=True) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0, 0.01) if m.bias is not None: m.bias.data.zero_() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.conv1.weight primals_5 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_8 = self.conv3.weight primals_9 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
farkoo/novel-seam-carving-method
MSBlock
false
3,494
[ "MIT" ]
0
aa3e9a4e3d5e13872eed412444e5be519542f7e5
https://github.com/farkoo/novel-seam-carving-method/tree/aa3e9a4e3d5e13872eed412444e5be519542f7e5
ChannelwiseAttention
import torch import torch.nn as nn import torch.nn.functional as F class ChannelwiseAttention(nn.Module): def __init__(self, in_channels): super(ChannelwiseAttention, self).__init__() self.in_channels = in_channels self.linear_1 = nn.Linear(self.in_channels, self.in_channels // 4) self.linear_2 = nn.Linear(self.in_channels // 4, self.in_channels) def forward(self, input_): n_b, n_c, _h, _w = input_.size() feats = F.adaptive_avg_pool2d(input_, (1, 1)).view((n_b, n_c)) feats = F.relu(self.linear_1(feats)) feats = torch.sigmoid(self.linear_2(feats)) ca_act_reg = torch.mean(feats) feats = feats.view((n_b, n_c, 1, 1)) feats = feats.expand_as(input_).clone() return feats, ca_act_reg def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_per_fused_mean_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x2, tmp1, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf2) del primals_2 buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(4)](buf3, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((), (), torch.float32) buf7 = buf5 del buf5 triton_per_fused_mean_sigmoid_2[grid(1)](buf7, buf4, 1, 16, XBLOCK= 1, num_warps=2, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(256)](buf4, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf6, buf7, reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), buf3, buf4, primals_4 class ChannelwiseAttentionNew(nn.Module): def __init__(self, in_channels): super(ChannelwiseAttentionNew, self).__init__() self.in_channels = in_channels self.linear_1 = nn.Linear(self.in_channels, self.in_channels // 4) self.linear_2 = nn.Linear(self.in_channels // 4, self.in_channels) def forward(self, input_0): primals_2 = self.linear_1.weight primals_3 = self.linear_1.bias primals_4 = self.linear_2.weight primals_5 = self.linear_2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
farkoo/novel-seam-carving-method
ChannelwiseAttention
false
3,495
[ "MIT" ]
0
aa3e9a4e3d5e13872eed412444e5be519542f7e5
https://github.com/farkoo/novel-seam-carving-method/tree/aa3e9a4e3d5e13872eed412444e5be519542f7e5
DumbFeat
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.optim import torch.nn.parallel class DumbFeat(nn.Module): def __init__(self, opt): super(DumbFeat, self).__init__() dropout = opt['dropout'] if 'dropout' in opt else 0.0 self.dropout = torch.nn.Dropout(p=dropout, inplace=False ) if dropout > 0.0 else None def forward(self, x): if x.dim() > 2: x = x.view(x.size(0), -1) assert x.dim() == 2 if self.dropout is not None: x = self.dropout(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(dropout=0.5)}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class DumbFeatNew(nn.Module): def __init__(self, opt): super(DumbFeatNew, self).__init__() dropout = opt['dropout'] if 'dropout' in opt else 0.0 self.dropout = torch.nn.Dropout(p=dropout, inplace=False ) if dropout > 0.0 else None def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Basasuya/FewShotWithoutForgetting
DumbFeat
false
3,496
[ "MIT" ]
0
eecc70e416ed82999124ddfca1b145f6dbcd74a6
https://github.com/Basasuya/FewShotWithoutForgetting/tree/eecc70e416ed82999124ddfca1b145f6dbcd74a6
SubPixelConvolutionalBlock
import torch from torch import nn class SubPixelConvolutionalBlock(nn.Module): """ A subpixel convolutional block, comprising convolutional, pixel-shuffle, and PReLU activation layers. """ def __init__(self, kernel_size=3, n_channels=64, scaling_factor=2): """ :param kernel_size: kernel size of the convolution :param n_channels: number of input and output channels :param scaling_factor: factor to scale input images by (along both dimensions) """ super(SubPixelConvolutionalBlock, self).__init__() self.conv = nn.Conv2d(in_channels=n_channels, out_channels= n_channels * scaling_factor ** 2, kernel_size=kernel_size, padding=kernel_size // 2) self.pixel_shuffle = nn.PixelShuffle(upscale_factor=scaling_factor) self.prelu = nn.PReLU() def forward(self, input): """ Forward propagation. :param input: input images, a tensor of size (N, n_channels, w, h) :return: scaled output images, a tensor of size (N, n_channels, w * scaling factor, h * scaling factor) """ output = self.conv(input) output = self.pixel_shuffle(output) output = self.prelu(output) return output def get_inputs(): return [torch.rand([4, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused__prelu_kernel_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 128 x2 = xindex // 16384 % 64 x3 = xindex // 1048576 x4 = xindex tmp0 = tl.load(in_ptr0 + (2 * (x1 % 2) + 4 * x2 + 256 * (x0 // 2) + 16384 * (x1 // 2) + 1048576 * x3 + x0 % 2), None) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x4, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (256, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1)) assert_size_stride(primals_4, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 64, 3, 3), (576, 1, 192, 64), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(16384, 9)](primals_1, buf0, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_1[grid(256, 4096)](primals_3, buf1, 256, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf3 = buf2 del buf2 triton_poi_fused_convolution_2[grid(4194304)](buf3, primals_2, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf4 = empty_strided_cuda((4, 64, 128, 128), (1048576, 16384, 128, 1), torch.float32) triton_poi_fused__prelu_kernel_3[grid(4194304)](buf3, primals_4, buf4, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) return buf4, buf0, buf1, primals_4, buf3 class SubPixelConvolutionalBlockNew(nn.Module): """ A subpixel convolutional block, comprising convolutional, pixel-shuffle, and PReLU activation layers. """ def __init__(self, kernel_size=3, n_channels=64, scaling_factor=2): """ :param kernel_size: kernel size of the convolution :param n_channels: number of input and output channels :param scaling_factor: factor to scale input images by (along both dimensions) """ super(SubPixelConvolutionalBlockNew, self).__init__() self.conv = nn.Conv2d(in_channels=n_channels, out_channels= n_channels * scaling_factor ** 2, kernel_size=kernel_size, padding=kernel_size // 2) self.pixel_shuffle = nn.PixelShuffle(upscale_factor=scaling_factor) self.prelu = nn.PReLU() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.prelu.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
f74066357/SR
SubPixelConvolutionalBlock
false
3,497
[ "MIT" ]
0
374ac141dfbfb4f851379d1c3c7c7f6bf1a21c67
https://github.com/f74066357/SR/tree/374ac141dfbfb4f851379d1c3c7c7f6bf1a21c67
Gaussianize
import torch import torch.nn as nn class Gaussianize(nn.Module): """ Gaussianization per RealNVP sec 3.6 / fig 4b -- at each step half the variables are directly modeled as Gaussians. Model as Gaussians: x2 = z2 * exp(logs) + mu, so x2 ~ N(mu, exp(logs)^2) where mu, logs = f(x1) then to recover the random numbers z driving the model: z2 = (x2 - mu) * exp(-logs) Here f(x1) is a conv layer initialized to identity. """ def __init__(self, n_channels): super().__init__() self.net = nn.Conv2d(n_channels, 2 * n_channels, kernel_size=3, padding=1) self.log_scale_factor = nn.Parameter(torch.zeros(2 * n_channels, 1, 1)) self.net.weight.data.zero_() self.net.bias.data.zero_() def forward(self, x1, x2): h = self.net(x1) * self.log_scale_factor.exp() m, logs = h[:, 0::2, :, :], h[:, 1::2, :, :] z2 = (x2 - m) * torch.exp(-logs) logdet = -logs.sum([1, 2, 3]) return z2, logdet def inverse(self, x1, z2): h = self.net(x1) * self.log_scale_factor.exp() m, logs = h[:, 0::2, :, :], h[:, 1::2, :, :] x2 = m + z2 * torch.exp(logs) logdet = logs.sum([1, 2, 3]) return x2, logdet def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_per_fused_exp_mul_neg_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r3 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 32 * r2 + 128 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + 2 * r2, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (16 + r1 + 32 * r2 + 128 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr2 + (1 + 2 * r2), None, eviction_policy='evict_last') tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 * tmp3 tmp5 = tmp0 - tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 * tmp8 tmp10 = -tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp5 * tmp11 tmp13 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = -tmp16 tl.store(out_ptr0 + (r3 + 64 * x0), tmp12, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp17, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8, 1, 1), (1, 1, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(512)](buf1, primals_2, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) buf4 = buf3 del buf3 triton_per_fused_exp_mul_neg_sub_sum_1[grid(4)](buf4, primals_5, buf1, primals_4, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) return buf2, buf4, primals_1, primals_3, primals_4, primals_5, buf1 class GaussianizeNew(nn.Module): """ Gaussianization per RealNVP sec 3.6 / fig 4b -- at each step half the variables are directly modeled as Gaussians. Model as Gaussians: x2 = z2 * exp(logs) + mu, so x2 ~ N(mu, exp(logs)^2) where mu, logs = f(x1) then to recover the random numbers z driving the model: z2 = (x2 - mu) * exp(-logs) Here f(x1) is a conv layer initialized to identity. """ def __init__(self, n_channels): super().__init__() self.net = nn.Conv2d(n_channels, 2 * n_channels, kernel_size=3, padding=1) self.log_scale_factor = nn.Parameter(torch.zeros(2 * n_channels, 1, 1)) self.net.weight.data.zero_() self.net.bias.data.zero_() def inverse(self, x1, z2): h = self.net(x1) * self.log_scale_factor.exp() m, logs = h[:, 0::2, :, :], h[:, 1::2, :, :] x2 = m + z2 * torch.exp(logs) logdet = logs.sum([1, 2, 3]) return x2, logdet def forward(self, input_0, input_1): primals_4 = self.log_scale_factor primals_1 = self.net.weight primals_2 = self.net.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
ffraaz/flow_based_priors
Gaussianize
false
3,498
[ "MIT" ]
0
4f61ecc233a01375c9a069a8baf676152a3e20fa
https://github.com/ffraaz/flow_based_priors/tree/4f61ecc233a01375c9a069a8baf676152a3e20fa
SimpleAttention
import torch import torch.nn as nn import torch.nn.functional as F class SimpleAttention(nn.Module): def __init__(self, input_dim): super(SimpleAttention, self).__init__() self.input_dim = input_dim self.scalar = nn.Linear(self.input_dim, 1, bias=False) def forward(self, M, x=None): """ M -> (seq_len, batch, vector) x -> dummy argument for the compatibility with MatchingAttention """ scale = self.scalar(M) alpha = F.softmax(scale, dim=0).permute(1, 2, 0) attn_pool = torch.bmm(alpha, M.transpose(0, 1))[:, 0, :] return attn_pool, alpha def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4, 4), (4, 16, 1), 0), out =buf3) return reinterpret_tensor(buf3, (4, 4), (4, 1), 0), reinterpret_tensor(buf2 , (4, 1, 4), (1, 1, 4), 0), primals_2, buf2 class SimpleAttentionNew(nn.Module): def __init__(self, input_dim): super(SimpleAttentionNew, self).__init__() self.input_dim = input_dim self.scalar = nn.Linear(self.input_dim, 1, bias=False) def forward(self, input_0): primals_1 = self.scalar.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
filkar/CASTLE
SimpleAttention
false
3,499
[ "MIT" ]
0
128b316d24503875bcc298301c17b003e6d4599d
https://github.com/filkar/CASTLE/tree/128b316d24503875bcc298301c17b003e6d4599d
Net2
import torch import numpy as np from torch import as_tensor from torch import no_grad import torch.nn as nn import torch.nn.functional as F import torch.optim as optim class AsModelNet(nn.Module): @staticmethod def chunk_it(xx): d = [] for x in xx: d.append(x) if len(d) >= 3: yield as_tensor(d) d = [] if d: yield as_tensor(d) def fit(self, xx, yy): criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(self.parameters(), lr=0.001, momentum=0.9) chunk_x = (as_tensor(x.reshape(1, *self.shape).astype(np.float32)) for x in xx) chunk_y = (as_tensor(y.reshape(1)) for y in yy) for epoch in range(5): for x, y in zip(chunk_x, chunk_y): optimizer.zero_grad() outputs = self(x) loss = criterion(outputs, y) loss.backward() optimizer.step() def predict(self, xx): with no_grad(): tensor = self(as_tensor(xx.reshape(-1, *self.shape).astype(np. float32))) return tensor.numpy().argmax(axis=1) def eye_probability(self, vector): vector = vector.reshape(1, -1) with no_grad(): tensor = self(as_tensor(vector.reshape(-1, *self.shape).astype( np.float32))) return tensor[0, 1].item() class Net2(AsModelNet): def __init__(self, shape_size): super().__init__() mid_size = round(np.sqrt(shape_size)) self.shape = shape_size, self.fc1 = nn.Linear(shape_size, mid_size) self.sigmoid = nn.Sigmoid() self.fc2 = nn.Linear(mid_size, 3) self.softmax = nn.Softmax(dim=1) def forward(self, x): x = F.relu(self.fc1(x)) x = self.sigmoid(x) x = self.fc2(x) x = self.softmax(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'shape_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import as_tensor from torch import no_grad import torch.nn as nn import torch.optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_sigmoid_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.sigmoid(tmp4) tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 12 x2 = xindex // 48 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (12 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (24 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (36 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 12 x2 = xindex // 48 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (12 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (24 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (36 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (3, 2), (2, 1)) assert_size_stride(primals_5, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_sigmoid_threshold_backward_0[grid(128)](buf0, primals_2, buf1, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((64, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), ( 2, 1), 0), reinterpret_tensor(primals_4, (2, 3), (1, 2), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32) triton_poi_fused__softmax_1[grid(192)](buf2, buf3, 192, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 3), (48, 12, 3, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(192)](buf3, buf4, 192, XBLOCK=128, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf4, primals_4, buf5 class AsModelNet(nn.Module): @staticmethod def chunk_it(xx): d = [] for x in xx: d.append(x) if len(d) >= 3: yield as_tensor(d) d = [] if d: yield as_tensor(d) def fit(self, xx, yy): criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(self.parameters(), lr=0.001, momentum=0.9) chunk_x = (as_tensor(x.reshape(1, *self.shape).astype(np.float32)) for x in xx) chunk_y = (as_tensor(y.reshape(1)) for y in yy) for epoch in range(5): for x, y in zip(chunk_x, chunk_y): optimizer.zero_grad() outputs = self(x) loss = criterion(outputs, y) loss.backward() optimizer.step() def predict(self, xx): with no_grad(): tensor = self(as_tensor(xx.reshape(-1, *self.shape).astype(np. float32))) return tensor.numpy().argmax(axis=1) def eye_probability(self, vector): vector = vector.reshape(1, -1) with no_grad(): tensor = self(as_tensor(vector.reshape(-1, *self.shape).astype( np.float32))) return tensor[0, 1].item() class Net2New(AsModelNet): def __init__(self, shape_size): super().__init__() mid_size = round(np.sqrt(shape_size)) self.shape = shape_size, self.fc1 = nn.Linear(shape_size, mid_size) self.sigmoid = nn.Sigmoid() self.fc2 = nn.Linear(mid_size, 3) self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
firemark/eye-detector
Net2
false
3,500
[ "MIT" ]
0
1efc4ccd0f0fc5d52e16b130d336eefd14324a02
https://github.com/firemark/eye-detector/tree/1efc4ccd0f0fc5d52e16b130d336eefd14324a02
Split
import torch import torch.nn as nn class Gaussianize(nn.Module): """ Gaussianization per RealNVP sec 3.6 / fig 4b -- at each step half the variables are directly modeled as Gaussians. Model as Gaussians: x2 = z2 * exp(logs) + mu, so x2 ~ N(mu, exp(logs)^2) where mu, logs = f(x1) then to recover the random numbers z driving the model: z2 = (x2 - mu) * exp(-logs) Here f(x1) is a conv layer initialized to identity. """ def __init__(self, n_channels): super().__init__() self.net = nn.Conv2d(n_channels, 2 * n_channels, kernel_size=3, padding=1) self.log_scale_factor = nn.Parameter(torch.zeros(2 * n_channels, 1, 1)) self.net.weight.data.zero_() self.net.bias.data.zero_() def forward(self, x1, x2): h = self.net(x1) * self.log_scale_factor.exp() m, logs = h[:, 0::2, :, :], h[:, 1::2, :, :] z2 = (x2 - m) * torch.exp(-logs) logdet = -logs.sum([1, 2, 3]) return z2, logdet def inverse(self, x1, z2): h = self.net(x1) * self.log_scale_factor.exp() m, logs = h[:, 0::2, :, :], h[:, 1::2, :, :] x2 = m + z2 * torch.exp(logs) logdet = logs.sum([1, 2, 3]) return x2, logdet class Split(nn.Module): """ Split layer; cf Glow figure 2 / RealNVP figure 4b Based on RealNVP multi-scale architecture: splits an input in half along the channel dim; half the vars are directly modeled as Gaussians while the other half undergo further transformations (cf RealNVP figure 4b). """ def __init__(self, n_channels): super().__init__() self.gaussianize = Gaussianize(n_channels // 2) def forward(self, x): x1, x2 = x.chunk(2, dim=1) z2, logdet = self.gaussianize(x1, x2) return x1, z2, logdet def inverse(self, x1, z2): x2, logdet = self.gaussianize.inverse(x1, z2) x = torch.cat([x1, x2], dim=1) return x, logdet def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_per_fused_exp_mul_neg_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + (32 + r3 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 32 * r2 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + 2 * r2, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (16 + r1 + 32 * r2 + 64 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr2 + (1 + 2 * r2), None, eviction_policy='evict_last') tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 * tmp3 tmp5 = tmp0 - tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 * tmp8 tmp10 = -tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp5 * tmp11 tmp13 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = -tmp16 tl.store(out_ptr0 + (r3 + 32 * x0), tmp5, xmask) tl.store(out_ptr1 + (r3 + 32 * x0), tmp12, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp17, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 2, 4, 4), (64, 16, 4, 1), 0), primals_2, stride=(1, 1), padding =(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4,), (1,), torch.float32) buf5 = buf4 del buf4 triton_per_fused_exp_mul_neg_sub_sum_1[grid(4)](buf5, primals_1, buf1, primals_4, buf2, buf3, 4, 32, XBLOCK=1, num_warps=2, num_stages=1) return reinterpret_tensor(primals_1, (4, 2, 4, 4), (64, 16, 4, 1), 0 ), buf3, buf5, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 2, 4, 4), (64, 16, 4, 1), 0), buf1, buf2 class Gaussianize(nn.Module): """ Gaussianization per RealNVP sec 3.6 / fig 4b -- at each step half the variables are directly modeled as Gaussians. Model as Gaussians: x2 = z2 * exp(logs) + mu, so x2 ~ N(mu, exp(logs)^2) where mu, logs = f(x1) then to recover the random numbers z driving the model: z2 = (x2 - mu) * exp(-logs) Here f(x1) is a conv layer initialized to identity. """ def __init__(self, n_channels): super().__init__() self.net = nn.Conv2d(n_channels, 2 * n_channels, kernel_size=3, padding=1) self.log_scale_factor = nn.Parameter(torch.zeros(2 * n_channels, 1, 1)) self.net.weight.data.zero_() self.net.bias.data.zero_() def forward(self, x1, x2): h = self.net(x1) * self.log_scale_factor.exp() m, logs = h[:, 0::2, :, :], h[:, 1::2, :, :] z2 = (x2 - m) * torch.exp(-logs) logdet = -logs.sum([1, 2, 3]) return z2, logdet def inverse(self, x1, z2): h = self.net(x1) * self.log_scale_factor.exp() m, logs = h[:, 0::2, :, :], h[:, 1::2, :, :] x2 = m + z2 * torch.exp(logs) logdet = logs.sum([1, 2, 3]) return x2, logdet class SplitNew(nn.Module): """ Split layer; cf Glow figure 2 / RealNVP figure 4b Based on RealNVP multi-scale architecture: splits an input in half along the channel dim; half the vars are directly modeled as Gaussians while the other half undergo further transformations (cf RealNVP figure 4b). """ def __init__(self, n_channels): super().__init__() self.gaussianize = Gaussianize(n_channels // 2) def inverse(self, x1, z2): x2, logdet = self.gaussianize.inverse(x1, z2) x = torch.cat([x1, x2], dim=1) return x, logdet def forward(self, input_0): primals_4 = self.gaussianize.log_scale_factor primals_2 = self.gaussianize.net.weight primals_3 = self.gaussianize.net.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1], output[2]
ffraaz/flow_based_priors
Split
false
3,501
[ "MIT" ]
0
4f61ecc233a01375c9a069a8baf676152a3e20fa
https://github.com/ffraaz/flow_based_priors/tree/4f61ecc233a01375c9a069a8baf676152a3e20fa
Attention
import math import torch import torch.nn as nn import torch.nn.functional as F class Attention(nn.Module): def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0): """ Attention Mechanism :param embed_dim: :param hidden_dim: :param out_dim: :param n_head: num of head (Multi-Head Attention) :param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot) :return (?, q_len, out_dim,) """ super(Attention, self).__init__() if hidden_dim is None: hidden_dim = embed_dim // n_head if out_dim is None: out_dim = embed_dim self.embed_dim = embed_dim self.hidden_dim = hidden_dim self.n_head = n_head self.score_function = score_function self.w_k = nn.Linear(embed_dim, n_head * hidden_dim) self.w_q = nn.Linear(embed_dim, n_head * hidden_dim) self.proj = nn.Linear(n_head * hidden_dim, out_dim) self.dropout = nn.Dropout(dropout) if score_function == 'mlp': self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2)) elif self.score_function == 'bi_linear': self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_dim) if self.weight is not None: self.weight.data.uniform_(-stdv, stdv) def forward(self, k, q): if len(q.shape) == 2: q = torch.unsqueeze(q, dim=1) if len(k.shape) == 2: k = torch.unsqueeze(k, dim=1) mb_size = k.shape[0] k_len = k.shape[1] q_len = q.shape[1] kx = self.w_k(k).view(mb_size, k_len, self.n_head, self.hidden_dim) kx = kx.permute(2, 0, 1, 3).contiguous().view(-1, k_len, self. hidden_dim) qx = self.w_q(q).view(mb_size, q_len, self.n_head, self.hidden_dim) qx = qx.permute(2, 0, 1, 3).contiguous().view(-1, q_len, self. hidden_dim) if self.score_function == 'dot_product': kt = kx.permute(0, 2, 1) score = torch.bmm(qx, kt) elif self.score_function == 'scaled_dot_product': kt = kx.permute(0, 2, 1) qkt = torch.bmm(qx, kt) score = torch.div(qkt, math.sqrt(self.hidden_dim)) elif self.score_function == 'mlp': kxx = torch.unsqueeze(kx, dim=1).expand(-1, q_len, -1, -1) qxx = torch.unsqueeze(qx, dim=2).expand(-1, -1, k_len, -1) kq = torch.cat((kxx, qxx), dim=-1) score = torch.tanh(torch.matmul(kq, self.weight)) elif self.score_function == 'bi_linear': qw = torch.matmul(qx, self.weight) kt = kx.permute(0, 2, 1) score = torch.bmm(qw, kt) else: raise RuntimeError('invalid score_function') score = F.softmax(score, dim=0) output = torch.bmm(score, kx) output = torch.cat(torch.split(output, mb_size, dim=0), dim=-1) output = self.proj(output) output = self.dropout(output) return output, score def get_inputs(): return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4])] def get_init_inputs(): return [[], {'embed_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0 ), buf4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0 ), primals_7, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0) class AttentionNew(nn.Module): def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0): """ Attention Mechanism :param embed_dim: :param hidden_dim: :param out_dim: :param n_head: num of head (Multi-Head Attention) :param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot) :return (?, q_len, out_dim,) """ super(AttentionNew, self).__init__() if hidden_dim is None: hidden_dim = embed_dim // n_head if out_dim is None: out_dim = embed_dim self.embed_dim = embed_dim self.hidden_dim = hidden_dim self.n_head = n_head self.score_function = score_function self.w_k = nn.Linear(embed_dim, n_head * hidden_dim) self.w_q = nn.Linear(embed_dim, n_head * hidden_dim) self.proj = nn.Linear(n_head * hidden_dim, out_dim) self.dropout = nn.Dropout(dropout) if score_function == 'mlp': self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2)) elif self.score_function == 'bi_linear': self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_dim) if self.weight is not None: self.weight.data.uniform_(-stdv, stdv) def forward(self, input_0, input_1): primals_3 = self.w_k.weight primals_4 = self.w_k.bias primals_5 = self.w_q.weight primals_6 = self.w_q.bias primals_7 = self.proj.weight primals_8 = self.proj.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
filkar/CASTLE
Attention
false
3,502
[ "MIT" ]
0
128b316d24503875bcc298301c17b003e6d4599d
https://github.com/filkar/CASTLE/tree/128b316d24503875bcc298301c17b003e6d4599d
BertSelfAttention
from _paritybench_helpers import _mock_config import math import torch from torch import nn class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float('-inf') tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = tmp29 != 0 tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = tmp33 != 0 tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38 != 0 tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) tl.store(out_ptr2 + x2, tmp45, xmask) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x4, xmask) tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf11 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super(BertSelfAttentionNew, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Adoni/pytorch-pretrained-BERT
BertSelfAttention
false
3,503
[ "Apache-2.0" ]
0
845c33f00e933626dcfc96e0923ecf034295ef75
https://github.com/Adoni/pytorch-pretrained-BERT/tree/845c33f00e933626dcfc96e0923ecf034295ef75
ActionAttention
import torch import numpy as np import torch as th import torch.nn as nn class ActionAttention(nn.Module): def __init__(self, model_dim, n_actions): super(ActionAttention, self).__init__() self.model_dim = model_dim self.n_actions = n_actions self.fcq = nn.Linear(model_dim, model_dim * n_actions) self.fck = nn.Linear(model_dim, model_dim * n_actions) def forward(self, queries, keys): model_dim = self.model_dim n_actions = self.n_actions batch_size = queries.size(0) q = self.fcq(queries).view(batch_size * n_actions, 1, model_dim) k = self.fck(keys).view(batch_size, -1, n_actions, model_dim ).transpose(1, 2).reshape(batch_size * n_actions, -1, model_dim) v = th.bmm(q, k.transpose(1, 2)) / np.sqrt(model_dim) v = v.view(batch_size, n_actions, -1).transpose(1, 2) return v def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'model_dim': 4, 'n_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_sqrt_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor( primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 4), 0), out=buf2) buf3 = buf2 del buf2 get_raw_stream(0) triton_poi_fused_div_sqrt_0[grid(16)](buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) return reinterpret_tensor(buf3, (4, 1, 4), (4, 1, 1), 0 ), primals_1, primals_6, reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 16, 1), 0) class ActionAttentionNew(nn.Module): def __init__(self, model_dim, n_actions): super(ActionAttentionNew, self).__init__() self.model_dim = model_dim self.n_actions = n_actions self.fcq = nn.Linear(model_dim, model_dim * n_actions) self.fck = nn.Linear(model_dim, model_dim * n_actions) def forward(self, input_0, input_1): primals_2 = self.fcq.weight primals_3 = self.fcq.bias primals_4 = self.fck.weight primals_5 = self.fck.bias primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
footoredo/pymarl
ActionAttention
false
3,504
[ "Apache-2.0" ]
0
9c62dda7a7ed984e020f2cafab93601342305af2
https://github.com/footoredo/pymarl/tree/9c62dda7a7ed984e020f2cafab93601342305af2
ActionAttentionV3
import torch import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F class ActionAttentionV3(nn.Module): def __init__(self, model_dim, n_actions): super(ActionAttentionV3, self).__init__() self.model_dim = model_dim self.n_actions = n_actions self.fcq = nn.Linear(model_dim, model_dim) self.fck = nn.Linear(model_dim, model_dim) self.fca = nn.Linear(model_dim, n_actions) def forward(self, queries, keys): model_dim = self.model_dim n_actions = self.n_actions batch_size = queries.size(0) a = self.fca(queries) q = self.fcq(queries).view(batch_size, 1, model_dim) k = self.fck(keys).view(batch_size, -1, model_dim) v = th.bmm(q, k.transpose(1, 2)) / np.sqrt(model_dim) v = F.tanh(v) v = th.bmm(v.transpose(1, 2), a.view(batch_size, 1, n_actions)) return v def get_inputs(): return [torch.rand([4, 1, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'model_dim': 4, 'n_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_sqrt_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 / tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4, 4 ), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (4, 4 ), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0), out=buf3) buf4 = buf3 del buf3 get_raw_stream(0) triton_poi_fused_div_sqrt_tanh_0[grid(64)](buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (4, 16, 1), (16, 1, 16), 0), reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0), out=buf5) return buf5, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0 ), reinterpret_tensor(primals_8, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0) class ActionAttentionV3New(nn.Module): def __init__(self, model_dim, n_actions): super(ActionAttentionV3New, self).__init__() self.model_dim = model_dim self.n_actions = n_actions self.fcq = nn.Linear(model_dim, model_dim) self.fck = nn.Linear(model_dim, model_dim) self.fca = nn.Linear(model_dim, n_actions) def forward(self, input_0, input_1): primals_2 = self.fcq.weight primals_3 = self.fcq.bias primals_4 = self.fck.weight primals_5 = self.fck.bias primals_6 = self.fca.weight primals_7 = self.fca.bias primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
footoredo/pymarl
ActionAttentionV3
false
3,505
[ "Apache-2.0" ]
0
9c62dda7a7ed984e020f2cafab93601342305af2
https://github.com/footoredo/pymarl/tree/9c62dda7a7ed984e020f2cafab93601342305af2
Pointer
import torch import torch.nn as nn import torch.nn.functional as F def mask_logits(target, mask): mask = mask.type(torch.float32) return target * mask + (1 - mask) * -1e+30 class Initialized_Conv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=1, relu=False, bias=False): super().__init__() self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride =stride, padding=padding, groups=groups, bias=bias) if relu is True: self.relu = True nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu') else: self.relu = False nn.init.xavier_uniform_(self.out.weight) def forward(self, x): if self.relu is True: return F.relu(self.out(x)) else: return self.out(x) class Pointer(nn.Module): def __init__(self, d_model): super().__init__() self.w1 = Initialized_Conv1d(d_model * 2, 1) self.w2 = Initialized_Conv1d(d_model * 2, 1) def forward(self, M1, M2, M3, mask): X1 = torch.cat([M1, M2], dim=1) X2 = torch.cat([M1, M3], dim=1) Y1 = mask_logits(self.w1(X1).squeeze(), mask) Y2 = mask_logits(self.w2(X2).squeeze(), mask) return Y1, Y2 def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask, other=0.0) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x3, tmp10, xmask) tl.store(out_ptr1 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp8 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp1 tmp5 = -1e+30 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp9 = tmp8 * tmp1 tmp10 = tmp9 + tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1, 8, 1), (8, 1, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (1, 8, 1), (8, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, primals_3, buf0, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4), (4, 4, 1)) buf4 = extern_kernels.convolution(buf1, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 4), (4, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_1[grid(64)](buf2, primals_5, buf4, buf3, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 del buf4 return buf3, buf5, primals_4, primals_5, primals_6, buf0, buf1 def mask_logits(target, mask): mask = mask.type(torch.float32) return target * mask + (1 - mask) * -1e+30 class Initialized_Conv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=1, relu=False, bias=False): super().__init__() self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride =stride, padding=padding, groups=groups, bias=bias) if relu is True: self.relu = True nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu') else: self.relu = False nn.init.xavier_uniform_(self.out.weight) def forward(self, x): if self.relu is True: return F.relu(self.out(x)) else: return self.out(x) class PointerNew(nn.Module): def __init__(self, d_model): super().__init__() self.w1 = Initialized_Conv1d(d_model * 2, 1) self.w2 = Initialized_Conv1d(d_model * 2, 1) def forward(self, input_0, input_1, input_2, input_3): primals_4 = self.w1.out.weight primals_6 = self.w2.out.weight primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 primals_5 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
dcy2018/QANA
Pointer
false
3,506
[ "MIT" ]
0
69d1e4ff408a56317479e22ecc854c91fc0f420f
https://github.com/dcy2018/QANA/tree/69d1e4ff408a56317479e22ecc854c91fc0f420f
AddSubNet
import torch from torch import nn import torch.utils.data class AddSubNet(nn.Module): """ Simple AddSub network in PyTorch. This network outputs the sum and subtraction of the inputs. """ def __init__(self): super(AddSubNet, self).__init__() def forward(self, input0, input1): """ """ return input0 + input1, input0 - input1 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_sub_0[grid(256)](arg0_1, arg1_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1 class AddSubNetNew(nn.Module): """ Simple AddSub network in PyTorch. This network outputs the sum and subtraction of the inputs. """ def __init__(self): super(AddSubNetNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
funny000/python_project
AddSubNet
false
3,507
[ "MIT" ]
0
190289765d0bdd908ce289c78969b3702a2c4292
https://github.com/funny000/python_project/tree/190289765d0bdd908ce289c78969b3702a2c4292
ORPooling
import torch import torch.nn as nn class ORPooling(nn.Module): def __init__(self, orientations): super(ORPooling, self).__init__() self.orientations = orientations def forward(self, x): B, C, H, W = x.shape assert C % self.orientations == 0 x = x.view(B, -1, self.orientations, H, W) return x.max(2)[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'orientations': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class ORPoolingNew(nn.Module): def __init__(self, orientations): super(ORPoolingNew, self).__init__() self.orientations = orientations def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
filick/torchcv
ORPooling
false
3,508
[ "MIT" ]
0
6e3f6780f00037e086c0ee48bf2b93a177a3b4bc
https://github.com/filick/torchcv/tree/6e3f6780f00037e086c0ee48bf2b93a177a3b4bc
AdditiveAttention
import torch import torch.nn as nn import torch.nn.functional as F class AdditiveAttention(nn.Module): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(AdditiveAttention, self).__init__() if internal_dim is None: internal_dim = int((encoder_hidden_state_dim + decoder_hidden_state_dim) / 2) self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False) self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False) self.v = nn.Linear(internal_dim, 1, bias=False) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + self.w2( decoder_state))) def forward(self, encoder_states, decoder_state): score_vec = torch.cat([self.score(encoder_states[:, i], decoder_state) for i in range(encoder_states.shape[1])], dim=1) attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2) final_context_vec = torch.sum(attention_probs * encoder_states, dim=1) return final_context_vec, attention_probs def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'encoder_hidden_state_dim': 4, 'decoder_hidden_state_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_mm_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_mm_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_mm_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp5 = tmp4 + tmp1 tmp6 = libdevice.tanh(tmp5) tmp8 = tmp7 + tmp1 tmp9 = libdevice.tanh(tmp8) tmp11 = tmp10 + tmp1 tmp12 = libdevice.tanh(tmp11) tl.store(out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr1 + x2, tmp6, xmask) tl.store(out_ptr2 + x2, tmp9, xmask) tl.store(out_ptr3 + x2, tmp12, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mm_0[grid(4)](primals_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4 ), 0), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) del primals_3 buf10 = buf0 del buf0 triton_poi_fused_mm_1[grid(4)](primals_1, buf10, 4, XBLOCK=4, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf11) buf4 = buf10 del buf10 triton_poi_fused_mm_2[grid(4)](primals_1, buf4, 4, XBLOCK=4, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_2, (4, 4), (1, 4 ), 0), out=buf5) buf7 = buf4 del buf4 triton_poi_fused_mm_3[grid(4)](primals_1, buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_2, (4, 4), (1, 4 ), 0), out=buf8) del buf7 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_tanh_4[grid(16)](buf1, buf2, buf5, buf8, buf11, buf3, buf6, buf9, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf11 del buf5 del buf8 buf17 = buf2 del buf2 buf13 = reinterpret_tensor(buf17, (4, 1), (4, 1), 0) extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (4, 1), (1, 4 ), 0), out=buf13) buf14 = reinterpret_tensor(buf17, (4, 1), (4, 1), 1) extern_kernels.mm(buf6, reinterpret_tensor(primals_5, (4, 1), (1, 4 ), 0), out=buf14) buf15 = reinterpret_tensor(buf17, (4, 1), (4, 1), 2) extern_kernels.mm(buf9, reinterpret_tensor(primals_5, (4, 1), (1, 4 ), 0), out=buf15) buf16 = reinterpret_tensor(buf17, (4, 1), (4, 1), 3) extern_kernels.mm(buf12, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf16) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_5[grid(16)](buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf13 del buf14 del buf15 del buf16 buf19 = buf17 del buf17 triton_poi_fused__softmax_6[grid(16)](buf18, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) buf20 = buf18 del buf18 triton_poi_fused_mul_sum_7[grid(16)](buf19, primals_1, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf20, reinterpret_tensor(buf19, (4, 4, 1), (4, 1, 1), 0 ), primals_1, primals_4, reinterpret_tensor(primals_1, (1, 4), (16, 4), 0), buf3, reinterpret_tensor(primals_1, (1, 4), (16, 4), 1 ), buf6, reinterpret_tensor(primals_1, (1, 4), (16, 4), 2 ), buf9, reinterpret_tensor(primals_1, (1, 4), (16, 4), 3 ), buf12, buf19, primals_5 class AdditiveAttentionNew(nn.Module): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(AdditiveAttentionNew, self).__init__() if internal_dim is None: internal_dim = int((encoder_hidden_state_dim + decoder_hidden_state_dim) / 2) self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False) self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False) self.v = nn.Linear(internal_dim, 1, bias=False) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + self.w2( decoder_state))) def forward(self, input_0, input_1): primals_1 = self.w1.weight primals_2 = self.w2.weight primals_5 = self.v.weight primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
fireofearth/Trajectron-plus-plus
AdditiveAttention
false
3,509
[ "MIT" ]
0
b39df025b62a8ce466266936198baee9bfa14e89
https://github.com/fireofearth/Trajectron-plus-plus/tree/b39df025b62a8ce466266936198baee9bfa14e89
TemporallyBatchedAdditiveAttention
import torch import torch.nn as nn import torch.nn.functional as F class AdditiveAttention(nn.Module): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(AdditiveAttention, self).__init__() if internal_dim is None: internal_dim = int((encoder_hidden_state_dim + decoder_hidden_state_dim) / 2) self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False) self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False) self.v = nn.Linear(internal_dim, 1, bias=False) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + self.w2( decoder_state))) def forward(self, encoder_states, decoder_state): score_vec = torch.cat([self.score(encoder_states[:, i], decoder_state) for i in range(encoder_states.shape[1])], dim=1) attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2) final_context_vec = torch.sum(attention_probs * encoder_states, dim=1) return final_context_vec, attention_probs class TemporallyBatchedAdditiveAttention(AdditiveAttention): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(TemporallyBatchedAdditiveAttention, self).__init__( encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + torch.unsqueeze( self.w2(decoder_state), dim=1))) def forward(self, encoder_states, decoder_state): score_vec = self.score(encoder_states, decoder_state) attention_probs = F.softmax(score_vec, dim=1) final_context_vec = torch.sum(attention_probs * encoder_states, dim=1) return final_context_vec, torch.squeeze(torch.transpose( attention_probs, 1, 2), dim=3) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'encoder_hidden_state_dim': 4, 'decoder_hidden_state_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 256 x0 = xindex % 64 x2 = xindex // 256 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(out_ptr0 + x4, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(1024)](buf0, buf1, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0) del buf1 extern_kernels.mm(reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0 ) del buf0 triton_poi_fused__softmax_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf3 triton_poi_fused__softmax_2[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_mul_sum_3[grid(256)](buf5, primals_2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 4, 16, 1, 1), 0 ), primals_2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), buf2, buf5, primals_5 class AdditiveAttention(nn.Module): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(AdditiveAttention, self).__init__() if internal_dim is None: internal_dim = int((encoder_hidden_state_dim + decoder_hidden_state_dim) / 2) self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False) self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False) self.v = nn.Linear(internal_dim, 1, bias=False) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + self.w2( decoder_state))) def forward(self, encoder_states, decoder_state): score_vec = torch.cat([self.score(encoder_states[:, i], decoder_state) for i in range(encoder_states.shape[1])], dim=1) attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2) final_context_vec = torch.sum(attention_probs * encoder_states, dim=1) return final_context_vec, attention_probs class TemporallyBatchedAdditiveAttentionNew(AdditiveAttention): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(TemporallyBatchedAdditiveAttentionNew, self).__init__( encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + torch.unsqueeze( self.w2(decoder_state), dim=1))) def forward(self, input_0, input_1): primals_1 = self.w1.weight primals_3 = self.w2.weight primals_5 = self.v.weight primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
fireofearth/Trajectron-plus-plus
TemporallyBatchedAdditiveAttention
false
3,510
[ "MIT" ]
0
b39df025b62a8ce466266936198baee9bfa14e89
https://github.com/fireofearth/Trajectron-plus-plus/tree/b39df025b62a8ce466266936198baee9bfa14e89
CoxPHLossSorted
import torch from torch import Tensor def cox_ph_loss_sorted(log_h: 'Tensor', events: 'Tensor', eps: 'float'=1e-07 ) ->Tensor: """Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ if events.dtype is torch.bool: events = events.float() events = events.view(-1) log_h = log_h.view(-1) gamma = log_h.max() log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma) return -log_h.sub(log_cumsum_h).mul(events).sum().div(events.sum()) class CoxPHLossSorted(torch.nn.Module): """Loss for CoxPH. Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ def __init__(self): super().__init__() def forward(self, log_h: 'Tensor', events: 'Tensor') ->Tensor: return cox_ph_loss_sorted(log_h, events) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import Tensor assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp14 = tl.load(in_ptr1 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5.to(tl.float32) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp8, = tl.associative_scan((tmp7,), 0, _triton_helper_fn_add0) tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp11 + tmp3 tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = tl.broadcast_to(tmp14, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = tmp18 / tmp21 tmp23 = -tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 get_raw_stream(0) triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0[grid(1)]( buf4, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, def cox_ph_loss_sorted(log_h: 'Tensor', events: 'Tensor', eps: 'float'=1e-07 ) ->Tensor: """Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ if events.dtype is torch.bool: events = events.float() events = events.view(-1) log_h = log_h.view(-1) gamma = log_h.max() log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma) return -log_h.sub(log_cumsum_h).mul(events).sum().div(events.sum()) class CoxPHLossSortedNew(torch.nn.Module): """Loss for CoxPH. Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
gabrielasuchopar/pycox
CoxPHLossSorted
false
3,511
[ "BSD-2-Clause" ]
0
e4ea5f0ee26c6d3e3a468f164de2b7c426376e99
https://github.com/gabrielasuchopar/pycox/tree/e4ea5f0ee26c6d3e3a468f164de2b7c426376e99
ActionAttentionV2
import torch import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F class ActionAttentionV2(nn.Module): def __init__(self, model_dim, n_actions): super(ActionAttentionV2, self).__init__() self.model_dim = model_dim self.n_actions = n_actions self.fcq = nn.Linear(model_dim, model_dim) self.fck = nn.Linear(model_dim, model_dim) self.fca = nn.Linear(model_dim, n_actions) def forward(self, queries, keys): model_dim = self.model_dim n_actions = self.n_actions batch_size = queries.size(0) a = self.fca(queries) q = self.fcq(queries).view(batch_size, 1, model_dim) k = self.fck(keys).view(batch_size, -1, model_dim) v = th.bmm(q, k.transpose(1, 2)) / np.sqrt(model_dim) v = F.softmax(v, dim=-1) v = th.bmm(v.transpose(1, 2), a.view(batch_size, 1, n_actions)) return v def get_inputs(): return [torch.rand([4, 1, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'model_dim': 4, 'n_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_sqrt_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.full([1, 1], 2.0, tl.float64) tmp2 = tl.full([1, 1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, float('-inf')) tmp11 = triton_helpers.max2(tmp10, 1)[:, None] tmp12 = tmp7 - tmp11 tmp13 = tmp6.to(tl.float64) tmp14 = tmp13 * tmp1 tmp15 = tmp14.to(tl.float32) tmp16 = tmp12 / tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp17 / tmp21 tl.store(out_ptr2 + (r1 + 16 * x0), tmp22, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4, 4 ), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (4, 4 ), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0), out=buf3) buf6 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_sqrt_0[grid(4)](buf3, buf6, 4, 16, XBLOCK =1, num_warps=2, num_stages=1) del buf3 buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 16, 1), (16, 1, 16), 0), reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0), out=buf7) return buf7, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0 ), reinterpret_tensor(primals_8, (64, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0) class ActionAttentionV2New(nn.Module): def __init__(self, model_dim, n_actions): super(ActionAttentionV2New, self).__init__() self.model_dim = model_dim self.n_actions = n_actions self.fcq = nn.Linear(model_dim, model_dim) self.fck = nn.Linear(model_dim, model_dim) self.fca = nn.Linear(model_dim, n_actions) def forward(self, input_0, input_1): primals_2 = self.fcq.weight primals_3 = self.fcq.bias primals_4 = self.fck.weight primals_5 = self.fck.bias primals_6 = self.fca.weight primals_7 = self.fca.bias primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
footoredo/pymarl
ActionAttentionV2
false
3,512
[ "Apache-2.0" ]
0
9c62dda7a7ed984e020f2cafab93601342305af2
https://github.com/footoredo/pymarl/tree/9c62dda7a7ed984e020f2cafab93601342305af2
CoxPHLoss
import torch from torch import Tensor def cox_ph_loss_sorted(log_h: 'Tensor', events: 'Tensor', eps: 'float'=1e-07 ) ->Tensor: """Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ if events.dtype is torch.bool: events = events.float() events = events.view(-1) log_h = log_h.view(-1) gamma = log_h.max() log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma) return -log_h.sub(log_cumsum_h).mul(events).sum().div(events.sum()) def cox_ph_loss(log_h: 'Tensor', durations: 'Tensor', events: 'Tensor', eps: 'float'=1e-07) ->Tensor: """Loss for CoxPH model. If data is sorted by descending duration, see `cox_ph_loss_sorted`. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ idx = durations.sort(descending=True)[1] events = events[idx] log_h = log_h[idx] return cox_ph_loss_sorted(log_h, events, eps) class CoxPHLoss(torch.nn.Module): """Loss for CoxPH model. If data is sorted by descending duration, see `cox_ph_loss_sorted`. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ def forward(self, log_h: 'Tensor', durations: 'Tensor', events: 'Tensor' ) ->Tensor: return cox_ph_loss(log_h, durations, events) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import Tensor assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = r1 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) _tmp5, tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=True) tl.store(out_ptr0 + (r1 + 4 * x0), tmp6, xmask) @triton.jit def triton_red_fused_max_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 2 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp9 = tl.full([XBLOCK, RBLOCK], float('-inf'), tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (128 * x0 + r1 // 64), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~(rmask & xmask), 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (64 * tmp5 + r1 % 64), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = triton_helpers.maximum(_tmp9, tmp8) _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp9 = triton_helpers.max2(_tmp9, 1)[:, None] tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_per_fused_max_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_spl_fused_cumsum_exp_sub_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ws_ptr, xnumel, rnumel, RBLOCK: tl.constexpr): XBLOCK: tl.constexpr = 1 rnumel = 16384 xoffset = tl.program_id(1) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) roffset = tl.program_id(0) * RBLOCK rindex = roffset + tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0 // 64, rmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr2 + 0) tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp12 = tl.num_programs(0) tmp13 = ws_ptr.to(tl.pointer_type(tl.uint64)) + xoffset * 1 * tmp12 tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~rmask, 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (64 * tmp5 + r0 % 64), rmask, other=0.0) tmp10 = tmp7 - tmp9 tmp11 = tl_math.exp(tmp10) tmp14 = tmp11.to(tl.float32) tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp16 = tl.reduce(tmp15, 0, _triton_helper_fn_add0) tmp17 = triton_helpers.exclusive_scan_decoupled_lookback(tmp13, tmp16, tl.program_id(0), _triton_helper_fn_add0, DTYPE_VALUE_AS_UINT=tl. uint32, DTYPE_PACK=tl.uint64) tmp18 = tl.associative_scan(tmp15, 0, _triton_helper_fn_add0) tmp19 = _triton_helper_fn_add0(tmp17, tmp18) tmp20 = tl.where(roffset == 0, tmp18, tmp19) tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp20, rmask) @triton.jit def triton_red_fused_add_log_mul_sub_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): xnumel = 2 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp12 = tl.load(in_ptr3 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) _tmp19 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp22 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (128 * x0 + r1 // 64), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr2 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~(rmask & xmask), 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (64 * tmp5 + r1 % 64), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp14 = tmp11 + tmp13 tmp15 = tmp7 - tmp14 tmp16 = tl.load(in_ptr4 + (64 * tmp5 + r1 % 64), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = _tmp19 + tmp18 _tmp19 = tl.where(rmask & xmask, tmp20, _tmp19) tmp21 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp23 = _tmp22 + tmp21 _tmp22 = tl.where(rmask & xmask, tmp23, _tmp22) tmp19 = tl.sum(_tmp19, 1)[:, None] tl.store(out_ptr0 + x0, tmp19, xmask) tmp22 = tl.sum(_tmp22, 1)[:, None] tl.store(out_ptr1 + x0, tmp22, xmask) @triton.jit def triton_per_fused_add_div_log_mul_neg_sub_sum_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_ptr1 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tmp3 / tmp7 tmp9 = -tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16) get_raw_stream(0) triton_per_fused_sort_0[grid(64)](arg1_1, buf1, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((2,), (1,), torch.float32) triton_red_fused_max_1[grid(2)](buf1, arg0_1, buf2, 2, 8192, XBLOCK =1, RBLOCK=2048, num_warps=16, num_stages=1) buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_max_2[grid(1)](buf2, buf3, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((16384,), (1,), torch.float32) workspace = empty_strided_cuda((512,), (1,), torch.uint8) workspace.zero_() triton_spl_fused_cumsum_exp_sub_3[split_scan_grid(1, 16384)](buf1, arg0_1, buf3, buf4, workspace, 1, 16384, RBLOCK=2048, num_warps =16, num_stages=1) del workspace buf5 = buf2 del buf2 buf7 = empty_strided_cuda((2,), (1,), torch.float32) triton_red_fused_add_log_mul_sub_sum_4[grid(2)](buf1, arg0_1, buf4, buf3, arg2_1, buf5, buf7, 2, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del arg0_1 del arg2_1 del buf1 del buf4 buf6 = buf3 del buf3 buf9 = buf6 del buf6 triton_per_fused_add_div_log_mul_neg_sub_sum_5[grid(1)](buf9, buf5, buf7, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) del buf5 del buf7 return buf9, def cox_ph_loss_sorted(log_h: 'Tensor', events: 'Tensor', eps: 'float'=1e-07 ) ->Tensor: """Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ if events.dtype is torch.bool: events = events.float() events = events.view(-1) log_h = log_h.view(-1) gamma = log_h.max() log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma) return -log_h.sub(log_cumsum_h).mul(events).sum().div(events.sum()) def cox_ph_loss(log_h: 'Tensor', durations: 'Tensor', events: 'Tensor', eps: 'float'=1e-07) ->Tensor: """Loss for CoxPH model. If data is sorted by descending duration, see `cox_ph_loss_sorted`. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ idx = durations.sort(descending=True)[1] events = events[idx] log_h = log_h[idx] return cox_ph_loss_sorted(log_h, events, eps) class CoxPHLossNew(torch.nn.Module): """Loss for CoxPH model. If data is sorted by descending duration, see `cox_ph_loss_sorted`. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
gabrielasuchopar/pycox
CoxPHLoss
false
3,513
[ "BSD-2-Clause" ]
0
e4ea5f0ee26c6d3e3a468f164de2b7c426376e99
https://github.com/gabrielasuchopar/pycox/tree/e4ea5f0ee26c6d3e3a468f164de2b7c426376e99
ContrastiveLoss
import torch from typing import * import torch.nn as nn import torch.nn.functional as F class ContrastiveLoss(nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=2.0): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2) if not torch.cuda.is_available(): euclidean_distance = euclidean_distance.cpu() loss_contrastive = torch.mean((1 - label) * torch.pow( euclidean_distance, 2) + label * torch.pow(torch.clamp(self. margin - euclidean_distance, min=0.0), 2)) return loss_contrastive def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from typing import * import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + x0, tmp24, xmask) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = 2.0 tmp7 = tmp6 - tmp3 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp9 * tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class ContrastiveLossNew(nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=2.0): super(ContrastiveLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
gaungalif/siamese.pytorch
ContrastiveLoss
false
3,514
[ "MIT" ]
0
2c06ef574147ea0b8b980943330eaeabe9892533
https://github.com/gaungalif/siamese.pytorch/tree/2c06ef574147ea0b8b980943330eaeabe9892533
Dave_norminit
import torch import torch.nn as nn import torch.utils.data class Dave_norminit(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 24, (5, 5), stride=(2, 2)) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(24, 36, (5, 5), stride=(2, 2)) self.relu2 = nn.ReLU() self.conv3 = nn.Conv2d(36, 48, (5, 5), stride=(2, 2)) self.relu3 = nn.ReLU() self.conv4 = nn.Conv2d(48, 64, (3, 3), stride=(1, 1)) self.relu4 = nn.ReLU() self.conv5 = nn.Conv2d(64, 64, (3, 3), stride=(1, 1)) self.relu5 = nn.ReLU() self.fc1 = nn.Linear(1600, 1164) nn.init.normal_(self.fc1.weight, mean=0.0, std=0.1) self.relu6 = nn.ReLU() self.fc2 = nn.Linear(1164, 100) nn.init.normal_(self.fc2.weight, mean=0.0, std=0.1) self.relu7 = nn.ReLU() self.fc3 = nn.Linear(100, 50) nn.init.normal_(self.fc3.weight, mean=0.0, std=0.1) self.relu8 = nn.ReLU() self.fc4 = nn.Linear(50, 10) nn.init.normal_(self.fc4.weight, mean=0.0, std=0.1) self.relu9 = nn.ReLU() self.before_prediction = nn.Linear(10, 1) def forward(self, x): x = self.conv1(x) x = self.relu1(x) x = self.conv2(x) x = self.relu2(x) x = self.conv3(x) x = self.relu3(x) x = self.conv4(x) x = self.relu4(x) x = self.conv5(x) x = self.relu5(x) x = x.view(-1, 1600) x = self.fc1(x) x = self.relu6(x) x = self.fc2(x) x = self.relu7(x) x = self.fc3(x) x = self.relu8(x) x = self.fc4(x) x = self.relu9(x) x = self.before_prediction(x) x = torch.atan(x) * 2 return x def get_inputs(): return [torch.rand([4, 3, 96, 96])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 203136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 2116 % 24 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 63504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 441 % 36 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 15552 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 48 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 49 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 25 % 64 x2 = xindex // 1600 x3 = xindex % 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x3 + 1664 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4656 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 1164 x1 = xindex // 1164 tmp0 = tl.load(in_out_ptr0 + (x0 + 1184 * x1), xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x0 + 1184 * x1), tmp4, xmask) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_atan_mul_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.atan(tmp0) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21) = args args.clear() assert_size_stride(primals_1, (24, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (24,), (1,)) assert_size_stride(primals_3, (4, 3, 96, 96), (27648, 9216, 96, 1)) assert_size_stride(primals_4, (36, 24, 5, 5), (600, 25, 5, 1)) assert_size_stride(primals_5, (36,), (1,)) assert_size_stride(primals_6, (48, 36, 5, 5), (900, 25, 5, 1)) assert_size_stride(primals_7, (48,), (1,)) assert_size_stride(primals_8, (64, 48, 3, 3), (432, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (1164, 1600), (1600, 1)) assert_size_stride(primals_13, (1164,), (1,)) assert_size_stride(primals_14, (100, 1164), (1164, 1)) assert_size_stride(primals_15, (100,), (1,)) assert_size_stride(primals_16, (50, 100), (100, 1)) assert_size_stride(primals_17, (50,), (1,)) assert_size_stride(primals_18, (10, 50), (50, 1)) assert_size_stride(primals_19, (10,), (1,)) assert_size_stride(primals_20, (1, 10), (10, 1)) assert_size_stride(primals_21, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 24, 46, 46), (50784, 2116, 46, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(203136)](buf1, primals_2, 203136, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 36, 21, 21), (15876, 441, 21, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(63504)](buf3, primals_5, 63504, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 48, 9, 9), (3888, 81, 9, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(15552)](buf5, primals_7, 15552, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 7, 7), (3136, 49, 7, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_3[grid(12544)](buf7, primals_9, 12544, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 5, 5), (1600, 25, 5, 1)) buf9 = buf8 del buf8 buf21 = empty_strided_cuda((4, 64, 5, 5), (1664, 25, 5, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_4[grid(6400)](buf9 , primals_11, buf21, 6400, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((4, 1164), (1184, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (4, 1600), (1600, 1), 0), reinterpret_tensor(primals_12, (1600, 1164), (1, 1600), 0), out =buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(4656)](buf11, primals_13, 4656, XBLOCK =256, num_warps=4, num_stages=1) del primals_13 buf12 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf11, reinterpret_tensor(primals_14, (1164, 100), (1, 1164), 0), out=buf12) buf13 = buf12 del buf12 triton_poi_fused_relu_6[grid(400)](buf13, primals_15, 400, XBLOCK= 128, num_warps=4, num_stages=1) del primals_15 buf14 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.mm(buf13, reinterpret_tensor(primals_16, (100, 50), (1, 100), 0), out=buf14) buf15 = buf14 del buf14 triton_poi_fused_relu_7[grid(200)](buf15, primals_17, 200, XBLOCK= 128, num_warps=4, num_stages=1) del primals_17 buf16 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_18, (50, 10), ( 1, 50), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_8[grid(40)](buf17, primals_19, 40, XBLOCK=64, num_warps=1, num_stages=1) del primals_19 buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_21, buf17, reinterpret_tensor( primals_20, (10, 1), (1, 10), 0), alpha=1, beta=1, out=buf19) del primals_21 buf20 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused_atan_mul_9[grid(4)](buf19, buf20, 4, XBLOCK=4, num_warps=1, num_stages=1) return (buf20, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, reinterpret_tensor(buf9, (4, 1600), (1600, 1), 0), buf11, buf13, buf15, buf17, buf19, primals_20, primals_18, primals_16, primals_14, primals_12, buf21) class Dave_norminitNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 24, (5, 5), stride=(2, 2)) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(24, 36, (5, 5), stride=(2, 2)) self.relu2 = nn.ReLU() self.conv3 = nn.Conv2d(36, 48, (5, 5), stride=(2, 2)) self.relu3 = nn.ReLU() self.conv4 = nn.Conv2d(48, 64, (3, 3), stride=(1, 1)) self.relu4 = nn.ReLU() self.conv5 = nn.Conv2d(64, 64, (3, 3), stride=(1, 1)) self.relu5 = nn.ReLU() self.fc1 = nn.Linear(1600, 1164) nn.init.normal_(self.fc1.weight, mean=0.0, std=0.1) self.relu6 = nn.ReLU() self.fc2 = nn.Linear(1164, 100) nn.init.normal_(self.fc2.weight, mean=0.0, std=0.1) self.relu7 = nn.ReLU() self.fc3 = nn.Linear(100, 50) nn.init.normal_(self.fc3.weight, mean=0.0, std=0.1) self.relu8 = nn.ReLU() self.fc4 = nn.Linear(50, 10) nn.init.normal_(self.fc4.weight, mean=0.0, std=0.1) self.relu9 = nn.ReLU() self.before_prediction = nn.Linear(10, 1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.conv5.weight primals_11 = self.conv5.bias primals_12 = self.fc1.weight primals_13 = self.fc1.bias primals_14 = self.fc2.weight primals_15 = self.fc2.bias primals_16 = self.fc3.weight primals_17 = self.fc3.bias primals_18 = self.fc4.weight primals_19 = self.fc4.bias primals_20 = self.before_prediction.weight primals_21 = self.before_prediction.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return output[0]
fabriceyhc/diversity_attacks
Dave_norminit
false
3,515
[ "MIT" ]
0
69e948a5cdf6c6f9e895be5e2096a887bad99151
https://github.com/fabriceyhc/diversity_attacks/tree/69e948a5cdf6c6f9e895be5e2096a887bad99151