entry_point stringlengths 1 65 | original_triton_code stringlengths 4.5k 619k | python_code stringlengths 208 60.9k | triton_code stringlengths 1.15k 275k | repo_name stringlengths 7 115 | module_name stringlengths 1 65 | synthetic bool 1
class | uuid int64 0 18.5k | licenses listlengths 1 6 | stars int64 0 19.8k | sha stringlengths 40 40 | repo_link stringlengths 72 180 | pytorch_code stringlengths 200 4.05k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
NormalAttention_dot | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class NormalAttention_dot(nn.Module):
def __init__(self, input_channel_num, k=4):
super(NormalAttention_dot, self).__init__()
self.c_in = input_channel_num
self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels=
self.c_in // k, kerne... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | Schwartz-Zha/My-invertible-resnet | NormalAttention_dot | false | 1,037 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_channel_num, k=4):
super().__init__()
self.c_in = input_channel_num
self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels=
self.c_in // k, kernel_size=1)
self.key_conv = nn.Co... |
ActNorm2D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
from torch.nn import Parameter
class ActNorm2D(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNorm2D, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride =... | Schwartz-Zha/My-invertible-resnet | ActNorm2D | false | 1,038 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
from torch.nn import Parameter
class Model(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = P... |
NormalAttention_embedded_gaussian | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class NormalAttention_embedded_gaussian(nn.Module):
def __init__(self, input_channel_num, k=4):
super(NormalAttention_embedded_gaussian, self).__init__()
self.c_in = input_channel_num
self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels=
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.... | Schwartz-Zha/My-invertible-resnet | NormalAttention_embedded_gaussian | false | 1,039 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_channel_num, k=4):
super().__init__()
self.c_in = input_channel_num
self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels=
self.c_in // k, kernel_size=1)
self.key_conv = nn.Co... |
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.nn.functional as F
def set_init(layers):
for layer in layers:
nn.init.normal_(layer.weight, mean=0.0, std=0.1)
nn.init.constant_(layer.bias, 0.0)
class Net(nn.Module):
def __init__(self, s_dim, a_dim):
super(Net, self).__init__()
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | SeungyounShin/pytorch-A3C | Net | false | 1,040 | [
"MIT"
] | 0 | acb9c05a5e1a697c48a7d4c1a48b1c86326faf91 | https://github.com/SeungyounShin/pytorch-A3C/tree/acb9c05a5e1a697c48a7d4c1a48b1c86326faf91 | import torch
import torch.nn as nn
import torch.nn.functional as F
def set_init(layers):
for layer in layers:
nn.init.normal_(layer.weight, mean=0.0, std=0.1)
nn.init.constant_(layer.bias, 0.0)
class Model(nn.Module):
def __init__(self, s_dim, a_dim):
super().__init__()
self... |
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import functools
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
impor... | SeHwanJoo/mmsegmentation_body | DiceLoss | false | 1,041 | [
"Apache-2.0"
] | 0 | 31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac | https://github.com/SeHwanJoo/mmsegmentation_body/tree/31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac | import functools
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "... |
VAE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.autograd
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Li... | import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from... | ScorpioDoctor/antares02 | VAE | false | 1,042 | [
"BSD-3-Clause"
] | 0 | 631b817d2e98f351d1173b620d15c4a5efed11da | https://github.com/ScorpioDoctor/antares02/tree/631b817d2e98f351d1173b620d15c4a5efed11da | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.autograd
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(78... |
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | SeungoneKim/Transformer_implementation | MultiHeadAttention | false | 1,043 | [
"Apache-2.0"
] | 0 | a52bf552eb645fc9bfb812cc26842fc147d6c008 | https://github.com/SeungoneKim/Transformer_implementation/tree/a52bf552eb645fc9bfb812cc26842fc147d6c008 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super().__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
batch_size, num_head, ke... |
Encoding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
class Encoding(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Ar... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
... | SeHwanJoo/mmsegmentation_body | Encoding | false | 1,044 | [
"Apache-2.0"
] | 0 | 31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac | https://github.com/SeHwanJoo/mmsegmentation_body/tree/31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
class Model(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:... |
CnnNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.nn.functional as F
class CnnNet(nn.Module):
def __init__(self):
super(CnnNet, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, 2)
self.pool2 = nn.MaxPool2d(2, 2)... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_... | RoyHirsch/DeepLearningCourse | CnnNet | false | 1,045 | [
"MIT"
] | 0 | 9036c0fdbb08b610524d7be991f8e4b490a82c6c | https://github.com/RoyHirsch/DeepLearningCourse/tree/9036c0fdbb08b610524d7be991f8e4b490a82c6c | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, 2)
self.pool2 = nn.MaxPool2d(2, 2)
self... |
BILM | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
class BILM(nn.Module):
def __init__(self):
super(BILM, self).__init__()
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
def forward(self, feat):
pos_sig = torc... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
emp... | SeunghwanByun/Real-Time-Road-Detection-Network | BILM | false | 1,046 | [
"MIT"
] | 0 | bc46615adef0e2b1a9a03dd4951559ca5849e6e1 | https://github.com/SeunghwanByun/Real-Time-Road-Detection-Network/tree/bc46615adef0e2b1a9a03dd4951559ca5849e6e1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
def forward(self, feat):
pos_sig = torch.sigmoid... |
KLDLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
import torch.utils.data
class KLDLoss(nn.Module):
def forward(self, mu, logvar):
return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
... | SebyakinAndrei/MichiGAN | KLDLoss | false | 1,047 | [
"MIT"
] | 0 | 6584c9a106b33096f38e8f5b11d0320f7065fd26 | https://github.com/SebyakinAndrei/MichiGAN/tree/6584c9a106b33096f38e8f5b11d0320f7065fd26 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def forward(self, mu, logvar):
return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AddCoords | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
class AddCoords(nn.Module):
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _,... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_st... | SeunghwanByun/Real-Time-Road-Detection-Network | AddCoords | false | 1,048 | [
"MIT"
] | 0 | bc46615adef0e2b1a9a03dd4951559ca5849e6e1 | https://github.com/SeunghwanByun/Real-Time-Road-Detection-Network/tree/bc46615adef0e2b1a9a03dd4951559ca5849e6e1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_d... |
BCEDiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import functools
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_ze... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
impor... | SeHwanJoo/mmsegmentation_body | BCEDiceLoss | false | 1,049 | [
"Apache-2.0"
] | 0 | 31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac | https://github.com/SeHwanJoo/mmsegmentation_body/tree/31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac | import functools
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_ze... |
BasicBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.utils.data
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution ... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | SebyakinAndrei/MichiGAN | BasicBlock | false | 1,050 | [
"MIT"
] | 0 | 6584c9a106b33096f38e8f5b11d0320f7065fd26 | https://github.com/SebyakinAndrei/MichiGAN/tree/6584c9a106b33096f38e8f5b11d0320f7065fd26 | import torch
import torch.nn as nn
import torch.utils.data
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution ... |
BahdanauAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.parallel
import torch.utils.data
import torch.onnx
import torch.optim
import torch.utils.data.distributed
class BahdanauAttention(nn.Module):
"""
It should be very similar to ... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | SerailHydra/examples | BahdanauAttention | false | 1,051 | [
"BSD-3-Clause"
] | 0 | 547226ff28032d4dab1dbf26e0b5f8b8276d79ae | https://github.com/SerailHydra/examples/tree/547226ff28032d4dab1dbf26e0b5f8b8276d79ae | import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.parallel
import torch.utils.data
import torch.onnx
import torch.optim
import torch.utils.data.distributed
class Model(nn.Module):
"""
It should be very similar to tf.contrib.s... |
StateActionEmbedding | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import math
import torch
import numpy as np
from abc import ABC
from abc import abstractmethod
from abc import abstractproperty
from torch import nn
from enum import Enum
def tensor_to_numpy(tensor):
return tensor.detach().cpu().numpy()
class MLPParamHandler(ABC):
def __init__(self) ->None:
"""Inte... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import numpy as np
from abc import ABC
from abc import abstractmethod
from abc import abstractproperty
from torch import nn
from... | Sebastian-Griesbach/Improving-Policy-Conditioned-Value-Functions | StateActionEmbedding | false | 1,052 | [
"MIT"
] | 0 | ec4125c5e056753e507df0406fcd60b6b6c3dc25 | https://github.com/Sebastian-Griesbach/Improving-Policy-Conditioned-Value-Functions/tree/ec4125c5e056753e507df0406fcd60b6b6c3dc25 | import math
import torch
import numpy as np
from abc import ABC
from abc import abstractmethod
from abc import abstractproperty
from torch import nn
from enum import Enum
def tensor_to_numpy(tensor):
return tensor.detach().cpu().numpy()
class MLPParamHandler(ABC):
def __init__(self) ->None:
"""Inte... |
GatedConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class GatedConv2d(torch.nn.Module):
"""
Gated Convlution layer with activation (default activation:LeakyReLU)
Params: same as conv2d
Input: The feature from last layer "I"
Output:\\phi(f(I))*\\sigmoid(g(I))
"""
def __init__(self, in_channels, out_channel... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | ShiraLightricks/3d-photo-inpainting | GatedConv2d | false | 1,053 | [
"MIT"
] | 0 | c42ac41576690b765e50f5281ddbfb58439ff36d | https://github.com/ShiraLightricks/3d-photo-inpainting/tree/c42ac41576690b765e50f5281ddbfb58439ff36d | import torch
import torch.nn as nn
class Model(torch.nn.Module):
"""
Gated Convlution layer with activation (default activation:LeakyReLU)
Params: same as conv2d
Input: The feature from last layer "I"
Output:\\phi(f(I))*\\sigmoid(g(I))
"""
def __init__(self, in_channels, out_channels, ker... |
CoordConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class AddCoords(nn.Module):
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _,... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_s... | SeunghwanByun/Real-Time-Road-Detection-Network | CoordConv | false | 1,054 | [
"MIT"
] | 0 | bc46615adef0e2b1a9a03dd4951559ca5849e6e1 | https://github.com/SeunghwanByun/Real-Time-Road-Detection-Network/tree/bc46615adef0e2b1a9a03dd4951559ca5849e6e1 | import torch
import torch.nn as nn
class AddCoords(nn.Module):
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _,... |
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | SeungoneKim/Transformer_implementation | ScaledDotProductAttention | false | 1,055 | [
"Apache-2.0"
] | 0 | a52bf552eb645fc9bfb812cc26842fc147d6c008 | https://github.com/SeungoneKim/Transformer_implementation/tree/a52bf552eb645fc9bfb812cc26842fc147d6c008 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
batch_size, num_head, key_sequence_length, s... |
Swish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
class Swish(nn.Module):
def __init__(self, inplace=True):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.mul_(x.sigmoid()) if self.inplace else x.mul(x.sigmoid())
def get_inputs():
return [torch.rand([4, 4,... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_pt... | ShowLo/Networks | Swish | false | 1,056 | [
"MIT"
] | 0 | 48f8545783966c383b6c3b600fbe37a15ea8ae3c | https://github.com/ShowLo/Networks/tree/48f8545783966c383b6c3b600fbe37a15ea8ae3c | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, inplace=True):
super().__init__()
self.inplace = inplace
def forward(self, x):
return x.mul_(x.sigmoid()) if self.inplace else x.mul(x.sigmoid())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
... |
Bicubic | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | from torch.nn import Module
import torch
import torch.nn.functional as F
class Bicubic(Module):
def __init__(self, scale_factor):
super().__init__()
self.scale_factor = scale_factor
def forward(self, x):
return F.interpolate(x, scale_factor=self.scale_factor, mode='bicubic')
def ge... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
... | ShivanshuPurohit/Diffusion | Bicubic | false | 1,057 | [
"MIT"
] | 0 | 9a190d9aa4ed9767cf223e4ef57d0c31690f92cc | https://github.com/ShivanshuPurohit/Diffusion/tree/9a190d9aa4ed9767cf223e4ef57d0c31690f92cc | from torch.nn import Module
import torch
import torch.nn.functional as F
class Model(Module):
def __init__(self, scale_factor):
super().__init__()
self.scale_factor = scale_factor
def forward(self, x):
return F.interpolate(x, scale_factor=self.scale_factor, mode='bicubic')
def get_... |
adder2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
def adder2d_function(X, W, stride=1, padding=0, groups=1):
n_filters, _d_filter, h_filter, w_filter = W.size()
n_x, _d_x, h_x, w_x = X.size()
h_out = (h_x - h_filter + 2 * padding) / stride + 1
w_out = (w_x - w_filter + 2 * padding) / stride + 1
h_out, w_out = in... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_st... | ShangyinGao/pytorch-cifar | adder2d | false | 1,058 | [
"MIT"
] | 0 | 480e19825bb155e3d0fafae3545faa3a4165bd77 | https://github.com/ShangyinGao/pytorch-cifar/tree/480e19825bb155e3d0fafae3545faa3a4165bd77 | import torch
import torch.nn as nn
def adder2d_function(X, W, stride=1, padding=0, groups=1):
n_filters, _d_filter, h_filter, w_filter = W.size()
n_x, _d_x, h_x, w_x = X.size()
h_out = (h_x - h_filter + 2 * padding) / stride + 1
w_out = (w_x - w_filter + 2 * padding) / stride + 1
h_out, w_out = in... |
FFN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.utils.data
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
class FFN(nn.Module):
def __init__(self, d_model, d_ffn, dropout=0):
super().__init__()
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = F.rel... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | SelvamArul/MOTR | FFN | false | 1,059 | [
"MIT"
] | 0 | 2a0b70288feaca665d460096159100d5077e9312 | https://github.com/SelvamArul/MOTR/tree/2a0b70288feaca665d460096159100d5077e9312 | import torch
import torch.utils.data
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, d_model, d_ffn, dropout=0):
super().__init__()
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = F.r... |
BinaryReg | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
import torch.utils.data
class BinaryReg(nn.Module):
"""Regularization for encouraging the outputs to be binary.
"""
def __init__(self, alpha=0.1):
super().__init__()
self.alpha = alpha
def forward(self, pred):
diff = pred - 0.5
diff ... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
... | Shray64/pytorch_connectomics | BinaryReg | false | 1,060 | [
"MIT"
] | 0 | d6c814f11ac2f8418ede5ae220a93016f50214fc | https://github.com/Shray64/pytorch_connectomics/tree/d6c814f11ac2f8418ede5ae220a93016f50214fc | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""Regularization for encouraging the outputs to be binary.
"""
def __init__(self, alpha=0.1):
super().__init__()
self.alpha = alpha
def forward(self, pred):
diff = pred - 0.5
diff = to... |
MessageNormalizer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class MessageNormalizer(nn.Module):
def __init__(self, in_features, init_mean=1.0, init_stddev=0.01):
super(MessageNormalizer, self).__init__()
self.in_features = in_features
self.out_features = in_features
self.weight = torch.nn.Parameter(torch.... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_st... | ShinyaFUKUMOTO/LeMPA | MessageNormalizer | false | 1,061 | [
"BSD-2-Clause"
] | 0 | 23b8c9f60fc13cf28d4485757d2ae0b3465b3e92 | https://github.com/ShinyaFUKUMOTO/LeMPA/tree/23b8c9f60fc13cf28d4485757d2ae0b3465b3e92 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_features, init_mean=1.0, init_stddev=0.01):
super().__init__()
self.in_features = in_features
self.out_features = in_features
self.weight = torch.nn.Parameter(torch.Tensor(in_features))
self.i... |
MaxPoolStride1 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.functional as F
import torch._utils
class MaxPoolStride1(nn.Module):
def __init__(self, kernel_size):
super(MaxPoolStride1, self).__init__()
self.kernel_size = kernel_size
self.p... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import ... | Sarathismg/Pose-Estimator-Old-Version | MaxPoolStride1 | false | 1,062 | [
"Apache-2.0"
] | 0 | ecaa03769323b94a4d7222e2d3606d1ce92a2fae | https://github.com/Sarathismg/Pose-Estimator-Old-Version/tree/ecaa03769323b94a4d7222e2d3606d1ce92a2fae | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.functional as F
import torch._utils
class Model(nn.Module):
def __init__(self, kernel_size):
super().__init__()
self.kernel_size = kernel_size
self.pad = kernel_size - 1
def... |
GroupNorm32 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.nn.functional as F
class GroupNorm32(nn.GroupNorm):
def __init__(self, num_groups, num_channels, swish, eps=1e-05):
super().__init__(num_groups=num_groups, num_channels=num_channels,
eps=eps)
self.swish = swish
def forward(self, x):... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_... | ShivanshuPurohit/Diffusion | GroupNorm32 | false | 1,063 | [
"MIT"
] | 0 | 9a190d9aa4ed9767cf223e4ef57d0c31690f92cc | https://github.com/ShivanshuPurohit/Diffusion/tree/9a190d9aa4ed9767cf223e4ef57d0c31690f92cc | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.GroupNorm):
def __init__(self, num_groups, num_channels, swish, eps=1e-05):
super().__init__(num_groups=num_groups, num_channels=num_channels,
eps=eps)
self.swish = swish
def forward(self, x):
... |
HardSigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn.functional as F
class HardSigmoid(torch.nn.Module):
"""
Pytorch implementation of the hard sigmoid activation function
"""
def __init__(self):
super(HardSigmoid, self).__init__()
def forward(self, input):
x = 0.2 * input + 0.5
x = torch.clamp(... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torc... | ShiraLightricks/3d-photo-inpainting | HardSigmoid | false | 1,064 | [
"MIT"
] | 0 | c42ac41576690b765e50f5281ddbfb58439ff36d | https://github.com/ShiraLightricks/3d-photo-inpainting/tree/c42ac41576690b765e50f5281ddbfb58439ff36d | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
"""
Pytorch implementation of the hard sigmoid activation function
"""
def __init__(self):
super().__init__()
def forward(self, input):
x = 0.2 * input + 0.5
x = torch.clamp(x, 0, 1)
x = F.... |
Classifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
from abc import *
class Classifier(nn.Module):
def __init__(self, in_channels, num_classes):
super(Classifier, self).__init__()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_channels, num_classes)
def forward(self, x):
o... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from abc import *
assert_size_stride = torch._C._dynamo.gu... | Slime0519/simple-faster-rcnn-pytorch | Classifier | false | 1,065 | [
"MIT"
] | 0 | 0503e9b4d07a24ae0bc1789a61ed937709f5304c | https://github.com/Slime0519/simple-faster-rcnn-pytorch/tree/0503e9b4d07a24ae0bc1789a61ed937709f5304c | import torch
import torch.nn as nn
from abc import *
class Model(nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_channels, num_classes)
def forward(self, x):
out = self.avgpool(x)
... |
TemporalAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.nn.functional as F
class TemporalAttention(nn.Module):
def __init__(self, hidden_size, feat_size, bottleneck_size):
super(TemporalAttention, self).__init__()
self.hidden_size = hidden_size
self.feat_size = feat_size
self.bottleneck_s... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | Shashwat07gupta/MSVD | TemporalAttention | false | 1,066 | [
"MIT"
] | 0 | 8026557ef7681a504b5140560ec4aaad9944de2d | https://github.com/Shashwat07gupta/MSVD/tree/8026557ef7681a504b5140560ec4aaad9944de2d | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, hidden_size, feat_size, bottleneck_size):
super().__init__()
self.hidden_size = hidden_size
self.feat_size = feat_size
self.bottleneck_size = bottleneck_size
self.... |
FastRNNCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.onnx
from itertools import product as product
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | ShishirPatil/EdgeML-1 | FastRNNCell | false | 1,067 | [
"MIT"
] | 0 | cbba9f8b989e545788427c004eb8450e7e4c1a21 | https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21 | import torch
import torch.nn as nn
import torch.onnx
from itertools import product as product
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm... |
Downsample | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import numpy as np
class BaseModule(torch.nn.Module):
def __init__(self):
super(BaseModule, self).__init__()
@property
def nparams(self):
"""
Returns number of trainable parameters of the module.
"""
num_params = 0
for name, param in self.name... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
assert_size_stride = torch._C._dynamo.guards.assert_size_stri... | Sobsz/uberduck-ml-dev | Downsample | false | 1,068 | [
"Apache-2.0"
] | 0 | f099238f6f2e3f600d72d89dea3c883c59d91387 | https://github.com/Sobsz/uberduck-ml-dev/tree/f099238f6f2e3f600d72d89dea3c883c59d91387 | import torch
import numpy as np
class BaseModule(torch.nn.Module):
def __init__(self):
super().__init__()
@property
def nparams(self):
"""
Returns number of trainable parameters of the module.
"""
num_params = 0
for name, param in self.named_parameters():
... |
AddFunction | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class AddFunction(nn.Module):
def __init__(self):
super(AddFunction, self).__init__()
def forward(self, x, y):
return x + y
def get_inputs():
retur... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_st... | ShounoLab/res-net-interpretation-open | AddFunction | false | 1,069 | [
"MIT"
] | 0 | 282dc0ae261467ee1866996416149959db216c02 | https://github.com/ShounoLab/res-net-interpretation-open/tree/282dc0ae261467ee1866996416149959db216c02 | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y
def get_inputs():
return [torch.rand([4, 4, 4,... |
WeightedCE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
class WeightedCE(nn.Module):
"""Mask weighted multi-class cross-entropy (CE) loss.
"""
def __init__(self):
super().__init__()
def forward(self, pred, target, weight_mask=None):
loss = F.cross_e... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
... | Shray64/pytorch_connectomics | WeightedCE | false | 1,070 | [
"MIT"
] | 0 | d6c814f11ac2f8418ede5ae220a93016f50214fc | https://github.com/Shray64/pytorch_connectomics/tree/d6c814f11ac2f8418ede5ae220a93016f50214fc | import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
class Model(nn.Module):
"""Mask weighted multi-class cross-entropy (CE) loss.
"""
def __init__(self):
super().__init__()
def forward(self, pred, target, weight_mask=None):
loss = F.cross_entrop... |
PartialConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import math
import torch
import torch.nn as nn
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.a... | ShiraLightricks/3d-photo-inpainting | PartialConv | false | 1,071 | [
"MIT"
] | 0 | c42ac41576690b765e50f5281ddbfb58439ff36d | https://github.com/ShiraLightricks/3d-photo-inpainting/tree/c42ac41576690b765e50f5281ddbfb58439ff36d | import math
import torch
import torch.nn as nn
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
... |
ProtoNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import numpy as np
import torch.nn as nn
import torch.onnx
from itertools import product as product
class ProtoNN(nn.Module):
def __init__(self, inputDimension, projectionDimension, numPrototypes,
numOutputLabels, gamma, W=None, B=None, Z=None):
"""
Forward computation graph ... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy ... | ShishirPatil/EdgeML-1 | ProtoNN | false | 1,072 | [
"MIT"
] | 0 | cbba9f8b989e545788427c004eb8450e7e4c1a21 | https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21 | import torch
import numpy as np
import torch.nn as nn
import torch.onnx
from itertools import product as product
class Model(nn.Module):
def __init__(self, inputDimension, projectionDimension, numPrototypes,
numOutputLabels, gamma, W=None, B=None, Z=None):
"""
Forward computation graph fo... |
GRULRCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.onnx
from itertools import product as product
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | ShishirPatil/EdgeML-1 | GRULRCell | false | 1,073 | [
"MIT"
] | 0 | cbba9f8b989e545788427c004eb8450e7e4c1a21 | https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21 | import torch
import torch.nn as nn
import torch.onnx
from itertools import product as product
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm... |
Connect2Model | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class Connect2Model(nn.Module):
def __init__(self, board_size, action_size, device):
super(Connect2Model, self).__init__()
self.device = device
self.size = board_size
self.action_size = action_si... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | ShokuninSan/AlphaZeroSimple | Connect2Model | false | 1,074 | [
"MIT"
] | 0 | e32e6a28f872a046705a3f68882139688d5a43c3 | https://github.com/ShokuninSan/AlphaZeroSimple/tree/e32e6a28f872a046705a3f68882139688d5a43c3 | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, board_size, action_size, device):
super().__init__()
self.device = device
self.size = board_size
self.action_size = action_size
self.fc1 = nn.Li... |
CausalConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torch import nn
import torch.utils.data
class WNConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True, activation=None):
super().__init__()
self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel,
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import n... | Shivanshu-Gupta/KaoKore-VQ-VAE2 | CausalConv2d | false | 1,075 | [
"MIT"
] | 0 | 38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c | https://github.com/Shivanshu-Gupta/KaoKore-VQ-VAE2/tree/38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c | import torch
from torch import nn
import torch.utils.data
class WNConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True, activation=None):
super().__init__()
self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel,
... |
FastGRNNCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.onnx
from itertools import product as product
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | ShishirPatil/EdgeML-1 | FastGRNNCell | false | 1,076 | [
"MIT"
] | 0 | cbba9f8b989e545788427c004eb8450e7e4c1a21 | https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21 | import torch
import torch.nn as nn
import torch.onnx
from itertools import product as product
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm... |
ResidualConvUnit | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class ResidualConvUnit(nn.Module):
"""Residual convolution module.
"""
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.conv1 = nn.Conv2d(features, features, ... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_... | ShiraLightricks/3d-photo-inpainting | ResidualConvUnit | false | 1,077 | [
"MIT"
] | 0 | c42ac41576690b765e50f5281ddbfb58439ff36d | https://github.com/ShiraLightricks/3d-photo-inpainting/tree/c42ac41576690b765e50f5281ddbfb58439ff36d | import torch
import torch.nn as nn
class Model(nn.Module):
"""Residual convolution module.
"""
def __init__(self, features):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.conv1 = nn.Conv2d(features, features, kernel_size... |
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import numpy as np
class BaseModule(torch.nn.Module):
def __init__(self):
super(BaseModule, self).__init__()
@property
def nparams(self):
"""
Returns number of trainable parameters of the module.
"""
num_params = 0
for name, param in self.name... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
assert_size_stride = torch._C._dynamo.guards.assert_size_str... | Sobsz/uberduck-ml-dev | LayerNorm | false | 1,078 | [
"Apache-2.0"
] | 0 | f099238f6f2e3f600d72d89dea3c883c59d91387 | https://github.com/Sobsz/uberduck-ml-dev/tree/f099238f6f2e3f600d72d89dea3c883c59d91387 | import torch
import numpy as np
class BaseModule(torch.nn.Module):
def __init__(self):
super().__init__()
@property
def nparams(self):
"""
Returns number of trainable parameters of the module.
"""
num_params = 0
for name, param in self.named_parameters():
... |
Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch as t
import torch.nn as nn
def indicator(K):
"""
@K: number of users
"""
return t.eye(5 * K)
class Loss(nn.Module):
def __init__(self, K, Nt, Vartheta):
super(Loss, self).__init__()
self.K = K
self.Nt = Nt
self.Delta = indica... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | SoulVen/USRMNet-HWGCN | Loss | false | 1,079 | [
"Apache-2.0"
] | 0 | 2f99f53150335be26270bd408ce59dc51c8435cc | https://github.com/SoulVen/USRMNet-HWGCN/tree/2f99f53150335be26270bd408ce59dc51c8435cc | import torch
import torch as t
import torch.nn as nn
def indicator(K):
"""
@K: number of users
"""
return t.eye(5 * K)
class Model(nn.Module):
def __init__(self, K, Nt, Vartheta):
super().__init__()
self.K = K
self.Nt = Nt
self.Delta = indicator(self.... |
AttNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.nn.functional as F
class AttNet(nn.Module):
def __init__(self, num_input_ch):
super(AttNet, self).__init__()
self.num_input_ch = num_input_ch
self.conv1 = nn.Conv2d(self.num_input_ch, 64, 3, padding=1, bias=True)
self.conv2 = nn.Conv... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | SionHu/LP-MOT | AttNet | false | 1,080 | [
"MIT"
] | 0 | 90e6a1d51ebe1a948ac5c018a5ee560654e824f1 | https://github.com/SionHu/LP-MOT/tree/90e6a1d51ebe1a948ac5c018a5ee560654e824f1 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, num_input_ch):
super().__init__()
self.num_input_ch = num_input_ch
self.conv1 = nn.Conv2d(self.num_input_ch, 64, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(64, 16, 1,... |
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
class Net(nn.Module):
def __init__(se... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_s... | Sreehari-S/Tiramisu_DigestPath | Net | false | 1,081 | [
"Apache-2.0"
] | 0 | a884ee911bc60ce997996e0ec2e6036600ffcffa | https://github.com/Sreehari-S/Tiramisu_DigestPath/tree/a884ee911bc60ce997996e0ec2e6036600ffcffa | import torch
import torch.nn as nn
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super().__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
class Model(nn.Module):
def __init__(self, nFeat... |
DecoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | SeungoneKim/Transformer_implementation | DecoderLayer | false | 1,082 | [
"Apache-2.0"
] | 0 | a52bf552eb645fc9bfb812cc26842fc147d6c008 | https://github.com/SeungoneKim/Transformer_implementation/tree/a52bf552eb645fc9bfb812cc26842fc147d6c008 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super().__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
batch_size, num_head, ke... |
TransitionUp | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width]
class TransitionUp(nn.Module):
def __init__(self, in_channels, out_cha... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_s... | Sreehari-S/Tiramisu_DigestPath | TransitionUp | false | 1,083 | [
"Apache-2.0"
] | 0 | a884ee911bc60ce997996e0ec2e6036600ffcffa | https://github.com/Sreehari-S/Tiramisu_DigestPath/tree/a884ee911bc60ce997996e0ec2e6036600ffcffa | import torch
import torch.nn as nn
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width]
class Model(nn.Module):
def __init__(self, in_channels, out_channels):... |
ConvBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.utils.data
class WSConv2d(nn.Module):
"""
Weight scaled Conv2d (Equalized Learning Rate)
Note that input is multiplied rather than changing weights
this will have the same result.
Inspired by:
https://github.com/nvnbny/progressive_growing_of_gan... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | SongsLearning/Machine-Learning-Collection | ConvBlock | false | 1,084 | [
"MIT"
] | 0 | a8dff83969f67d37f70a89db06b851057d2da539 | https://github.com/SongsLearning/Machine-Learning-Collection/tree/a8dff83969f67d37f70a89db06b851057d2da539 | import torch
import torch.nn as nn
import torch.utils.data
class WSConv2d(nn.Module):
"""
Weight scaled Conv2d (Equalized Learning Rate)
Note that input is multiplied rather than changing weights
this will have the same result.
Inspired by:
https://github.com/nvnbny/progressive_growing_of_gan... |
FcCat | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
def get_inputs():
return [torch.rand([... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_s... | Sreehari-S/Tiramisu_DigestPath | FcCat | false | 1,086 | [
"Apache-2.0"
] | 0 | a884ee911bc60ce997996e0ec2e6036600ffcffa | https://github.com/Sreehari-S/Tiramisu_DigestPath/tree/a884ee911bc60ce997996e0ec2e6036600ffcffa | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, nIn, nOut):
super().__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]... |
WSConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.utils.data
class WSConv2d(nn.Module):
"""
Weight scaled Conv2d (Equalized Learning Rate)
Note that input is multiplied rather than changing weights
this will have the same result.
Inspired by:
https://github.com/nvnbny/progressive_growing_of_gan... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dyn... | SongsLearning/Machine-Learning-Collection | WSConv2d | false | 1,087 | [
"MIT"
] | 0 | a8dff83969f67d37f70a89db06b851057d2da539 | https://github.com/SongsLearning/Machine-Learning-Collection/tree/a8dff83969f67d37f70a89db06b851057d2da539 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
Weight scaled Conv2d (Equalized Learning Rate)
Note that input is multiplied rather than changing weights
this will have the same result.
Inspired by:
https://github.com/nvnbny/progressive_growing_of_gans/b... |
Standardscaler | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
class Standardscaler(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_batch):
std, mean = torch.std_mean(input_batch.type(torch.float32),
unbiased=False)
total = (input_batch - mean) / std
return total
def get_inputs(... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._... | Stuksus/StandardScaler_for_pytorch | Standardscaler | false | 1,088 | [
"MIT"
] | 0 | 27da9afd111007f20a615bee9a5a7ac272adb241 | https://github.com/Stuksus/StandardScaler_for_pytorch/tree/27da9afd111007f20a615bee9a5a7ac272adb241 | import torch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_batch):
std, mean = torch.std_mean(input_batch.type(torch.float32),
unbiased=False)
total = (input_batch - mean) / std
return total
def get_inputs():
re... |
FeatureResizer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.utils.data
import torch
from torch import nn
class FeatureResizer(nn.Module):
"""
This class takes as input a set of embeddings of dimension C1 and outputs a set of
embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
"""
def __init__... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.... | Sudhir11292rt/DefVisTR | FeatureResizer | false | 1,089 | [
"Apache-2.0"
] | 0 | d52b2d88c10c6239de1c1ff851a743c58b708b75 | https://github.com/Sudhir11292rt/DefVisTR/tree/d52b2d88c10c6239de1c1ff851a743c58b708b75 | import torch
import torch.utils.data
import torch
from torch import nn
class Model(nn.Module):
"""
This class takes as input a set of embeddings of dimension C1 and outputs a set of
embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
"""
def __init__(self, in... |
UGRNNLRCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.onnx
from itertools import product as product
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | ShishirPatil/EdgeML-1 | UGRNNLRCell | false | 1,090 | [
"MIT"
] | 0 | cbba9f8b989e545788427c004eb8450e7e4c1a21 | https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21 | import torch
import torch.nn as nn
import torch.onnx
from itertools import product as product
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm... |
FCLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class FCLayer(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.0,
use_activation=True):
super(FCLayer, self).__init__()
self.use_activation = use_activation
self.dropout = nn.Dropout(dropout_rate)
self.linear = nn.Line... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | StevenChaoo/R-BERT-DDI | FCLayer | false | 1,091 | [
"MIT"
] | 0 | 6d9666e0bc61397ca942ffad53653690c1e8a899 | https://github.com/StevenChaoo/R-BERT-DDI/tree/6d9666e0bc61397ca942ffad53653690c1e8a899 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.0,
use_activation=True):
super().__init__()
self.use_activation = use_activation
self.dropout = nn.Dropout(dropout_rate)
self.linear = nn.Linear(input_dim, o... |
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import math
import torch
import numpy as np
def convert_pad_shape(pad_shape):
"""Reverse, then flatten a list of lists."""
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
class BaseModule(torch.nn.Module):
def __init__(self):
super(BaseModule... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | Sobsz/uberduck-ml-dev | MultiHeadAttention | false | 1,092 | [
"Apache-2.0"
] | 0 | f099238f6f2e3f600d72d89dea3c883c59d91387 | https://github.com/Sobsz/uberduck-ml-dev/tree/f099238f6f2e3f600d72d89dea3c883c59d91387 | import math
import torch
import numpy as np
def convert_pad_shape(pad_shape):
"""Reverse, then flatten a list of lists."""
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
class BaseModule(torch.nn.Module):
def __init__(self):
super().__init__... |
WNConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torch import nn
import torch.utils.data
class WNConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True, activation=None):
super().__init__()
self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel,
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import n... | Shivanshu-Gupta/KaoKore-VQ-VAE2 | WNConv2d | false | 1,093 | [
"MIT"
] | 0 | 38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c | https://github.com/Shivanshu-Gupta/KaoKore-VQ-VAE2/tree/38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c | import torch
from torch import nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True, activation=None):
super().__init__()
self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel,
k... |
DisparityRegression | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
import torch.utils.data
class DisparityRegression(nn.Module):
def __init__(self, maxdisp, win_size):
super(DisparityRegression, self).__init__()
self.max_disp = maxdisp
self.win_size = win_size
def forward(self, x):
disp = torch.arange(0, se... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C.... | SpadeLiu/Graft-PSMNet | DisparityRegression | false | 1,094 | [
"MIT"
] | 0 | 1f2950d5afd85237f8d3604caab20dd47a8c9889 | https://github.com/SpadeLiu/Graft-PSMNet/tree/1f2950d5afd85237f8d3604caab20dd47a8c9889 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, maxdisp, win_size):
super().__init__()
self.max_disp = maxdisp
self.win_size = win_size
def forward(self, x):
disp = torch.arange(0, self.max_disp).view(1, -1, 1, 1).float()
... |
Message_Passing_Unit_v1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Message_Passing_Unit_v1(nn.Module):
def __init__(self, fea_size, filter_size=128):
super(Message_Passing_Unit_v1, self).__init__()
self.w = nn.Linear(... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from ... | SpartaG117/scene_graph_benchmark | Message_Passing_Unit_v1 | false | 1,095 | [
"MIT"
] | 0 | e2e49940dd2f752b1faf9ae26707435ba3441bcb | https://github.com/SpartaG117/scene_graph_benchmark/tree/e2e49940dd2f752b1faf9ae26707435ba3441bcb | import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, fea_size, filter_size=128):
super().__init__()
self.w = nn.Linear(fea_size * 2, filter_size, bias=True)
s... |
ExpModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
class ExpModule(nn.Module):
def __init__(self):
super(ExpModule, self).__init__()
def forward(self, x):
return torch.exp(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert... | SimonTreu/sdvae | ExpModule | false | 1,096 | [
"MIT"
] | 0 | e0270b9b2acf2d66eec93870f1c5633c8f04d9ab | https://github.com/SimonTreu/sdvae/tree/e0270b9b2acf2d66eec93870f1c5633c8f04d9ab | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.exp(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
EncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | SeungoneKim/Transformer_implementation | EncoderLayer | false | 1,097 | [
"Apache-2.0"
] | 0 | a52bf552eb645fc9bfb812cc26842fc147d6c008 | https://github.com/SeungoneKim/Transformer_implementation/tree/a52bf552eb645fc9bfb812cc26842fc147d6c008 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super().__init__()
def forward(self, query, key, value, mask=None):
_1, _2, query_sequence_length, _3 = query.size()
batch_size, num_head, ke... |
Residual_Covolution | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class Residual_Covolution(nn.Module):
def __init__(self, icol, ocol, num_classes):
super(Residual_Covolution, self).__init__()
self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding
=12, dilation=12, bias=True)
self.conv2 = nn.Co... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_... | SultanAbuGhazal/CGNet | Residual_Covolution | false | 1,098 | [
"MIT"
] | 0 | f10b976b984ba09be26b902ed4da97cd1311cf17 | https://github.com/SultanAbuGhazal/CGNet/tree/f10b976b984ba09be26b902ed4da97cd1311cf17 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, icol, ocol, num_classes):
super().__init__()
self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding
=12, dilation=12, bias=True)
self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, ... |
ResidualBlockNoBN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torch import nn
class ResidualBlockNoBN(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResidualBlockNoBN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(3, 3), stride=stride, paddi... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_s... | Suvapna/ArtificialLaughter | ResidualBlockNoBN | false | 1,100 | [
"MIT"
] | 0 | a7114134b698f829e05e74cac30052e18b260f85 | https://github.com/Suvapna/ArtificialLaughter/tree/a7114134b698f829e05e74cac30052e18b260f85 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(3, 3), stride=stride, padding=1,
bias=True)
... |
SpatialAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7, bias=True):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, ... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_... | SuzaKrish/mmdetection | SpatialAttention | false | 1,101 | [
"Apache-2.0"
] | 0 | 31c16891d7493252262e738bcbf05326dba866b2 | https://github.com/SuzaKrish/mmdetection/tree/31c16891d7493252262e738bcbf05326dba866b2 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, kernel_size=7, bias=True):
super().__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, ... |
Message_Passing_Unit_v2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Message_Passing_Unit_v2(nn.Module):
def __init__(self, fea_size, filter_size=128):
super(Message_Passing_Unit_v2, self).__init__()
self.w = nn.Linear(... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from ... | SpartaG117/scene_graph_benchmark | Message_Passing_Unit_v2 | false | 1,102 | [
"MIT"
] | 0 | e2e49940dd2f752b1faf9ae26707435ba3441bcb | https://github.com/SpartaG117/scene_graph_benchmark/tree/e2e49940dd2f752b1faf9ae26707435ba3441bcb | import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, fea_size, filter_size=128):
super().__init__()
self.w = nn.Linear(fea_size, filter_size, bias=True)
self.... |
PositionalEmbedding | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import math
import torch
class PositionalEmbedding(torch.nn.Module):
def __init__(self):
super(PositionalEmbedding, self).__init__()
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError('The rank of input must be 3.')
length = inputs.shape[1]
channels... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_str... | THUNLP-MT/PLM4MT | PositionalEmbedding | false | 1,103 | [
"BSD-3-Clause"
] | 0 | 85bd2ee9d96b07ac827e14d4b3e5b0d0924c3401 | https://github.com/THUNLP-MT/PLM4MT/tree/85bd2ee9d96b07ac827e14d4b3e5b0d0924c3401 | import math
import torch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError('The rank of input must be 3.')
length = inputs.shape[1]
channels = inputs.shape[2]
half_dim = c... |
MaxPool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
class MaxPool(nn.Module):
def __init__(self, dim=1):
super(MaxPool, self).__init__()
self.dim = dim
def forward(self, input):
return torch.max(input, self.dim)[0]
def __repr__(self):
return self.__class__.__name__ + ' (' + 'dim=' + str(... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
emp... | SwaggyZhang/Geometry-aware | MaxPool | false | 1,104 | [
"Apache-2.0"
] | 0 | a750c00aa2f0bda5160dfdeee2eef5230fd9d993 | https://github.com/SwaggyZhang/Geometry-aware/tree/a750c00aa2f0bda5160dfdeee2eef5230fd9d993 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, input):
return torch.max(input, self.dim)[0]
def __repr__(self):
return self.__class__.__name__ + ' (' + 'dim=' + str(self.dim) + ')'... |
Transpose | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
class Transpose(nn.Module):
def __init__(self, dim1=0, dim2=1):
super(Transpose, self).__init__()
self.dim1 = dim1
self.dim2 = dim2
def forward(self, input):
return input.transpose(self.dim1, self.dim2).contiguous()
def __repr__(self):
... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_st... | SwaggyZhang/Geometry-aware | Transpose | false | 1,105 | [
"Apache-2.0"
] | 0 | a750c00aa2f0bda5160dfdeee2eef5230fd9d993 | https://github.com/SwaggyZhang/Geometry-aware/tree/a750c00aa2f0bda5160dfdeee2eef5230fd9d993 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, dim1=0, dim2=1):
super().__init__()
self.dim1 = dim1
self.dim2 = dim2
def forward(self, input):
return input.transpose(self.dim1, self.dim2).contiguous()
def __repr__(self):
return self... |
GraphConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torch import nn
import torch.nn
import torch.autograd
def sparse_bmm(sparse_matrix, dense_matrix_batch):
"""
Perform torch.bmm on an unbatched sparse matrix and a batched dense matrix.
Args:
sparse_matrix (torch.sparse.FloatTensor): Shape = (m, n)
dense_matrix_batch (tor... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn
import torch.autograd
assert_size_stride = ... | T0mt0mp/kaolin | GraphConv | false | 1,106 | [
"ECL-2.0",
"Apache-2.0"
] | 0 | 57d1e1478eec8df49dc7cc492f25637cec40399f | https://github.com/T0mt0mp/kaolin/tree/57d1e1478eec8df49dc7cc492f25637cec40399f | import torch
from torch import nn
import torch.nn
import torch.autograd
def sparse_bmm(sparse_matrix, dense_matrix_batch):
"""
Perform torch.bmm on an unbatched sparse matrix and a batched dense matrix.
Args:
sparse_matrix (torch.sparse.FloatTensor): Shape = (m, n)
dense_matrix_batch (tor... |
Align | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn.functional as F
class Align(torch.nn.Module):
def __init__(self, p):
super(Align, self).__init__()
self.p = p
def forward(self, e1, e2):
pred = -torch.norm(e1 - e2, p=self.p, dim=1)
return pred
def only_pos_loss(self, e1, r, e2):
retu... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.as... | TMUITLab/EAFR | Align | false | 1,108 | [
"MIT"
] | 0 | dadb6485d48711ccb8aa2f03760aeb437645f1ff | https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
def __init__(self, p):
super().__init__()
self.p = p
def forward(self, e1, e2):
pred = -torch.norm(e1 - e2, p=self.p, dim=1)
return pred
def only_pos_loss(self, e1, r, e2):
return -F.logsi... |
MNISTGenerator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torch import nn as nn
from torch import optim as optim
from torchvision import transforms as transforms
class MNISTGenerator(nn.Module):
def __init__(self, latent_dim):
super(MNISTGenerator, self).__init__()
self.image_shape = 1, 28, 28
self.latent_dim = latent_dim
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn as nn
fr... | RobinMaas95/GTSRB_Visualization | MNISTGenerator | false | 1,109 | [
"MIT"
] | 0 | fa837ff94e089a936ef4f4418970d262b35f70b6 | https://github.com/RobinMaas95/GTSRB_Visualization/tree/fa837ff94e089a936ef4f4418970d262b35f70b6 | import torch
from torch import nn as nn
from torch import optim as optim
from torchvision import transforms as transforms
class Model(nn.Module):
def __init__(self, latent_dim):
super().__init__()
self.image_shape = 1, 28, 28
self.latent_dim = latent_dim
self.dense1 = nn.Linear(se... |
Conv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
class Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_siz... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | T1anZhenYu/pytorch-classification | Conv2d | false | 1,110 | [
"MIT"
] | 0 | ad68e09f20a98541bcb437a7df8e7d14e8c21636 | https://github.com/T1anZhenYu/pytorch-classification/tree/ad68e09f20a98541bcb437a7df8e7d14e8c21636 | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
class Model(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size,
... |
lovasz_hinge | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn.parallel
import torch.utils.data
from torchvision.transforms import functional as F
import torch.nn.functional as F
from torch.autograd import Variable
def flatten_binary_scores(scores, labels, ignore=255):
"""
Flattens predictions in the batch (binary case)
Remove labels equa... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.utils.data
from torchvision.transforms import functional as F
import torch.nn.functional as F
from tor... | PhillipHuang2017/ext_portrait_segmentation | lovasz_hinge | false | 1,111 | [
"MIT"
] | 0 | 6d0cec0a953dacbc94a01ea8b719feb687b7c029 | https://github.com/PhillipHuang2017/ext_portrait_segmentation/tree/6d0cec0a953dacbc94a01ea8b719feb687b7c029 | import torch
import torch.nn.parallel
import torch.utils.data
from torchvision.transforms import functional as F
import torch.nn.functional as F
from torch.autograd import Variable
def flatten_binary_scores(scores, labels, ignore=255):
"""
Flattens predictions in the batch (binary case)
Remove labels equa... |
AlignEA | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn.functional as F
class AlignEA(torch.nn.Module):
def __init__(self, p, feat_drop, params):
super(AlignEA, self).__init__()
self.params = params
def forward(self, e1, r, e2):
return torch.sum(torch.pow(e1 + r - e2, 2), 1)
def only_pos_loss(self, e1, r,... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards... | TMUITLab/EAFR | AlignEA | false | 1,112 | [
"MIT"
] | 0 | dadb6485d48711ccb8aa2f03760aeb437645f1ff | https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
def __init__(self, p, feat_drop, params):
super().__init__()
self.params = params
def forward(self, e1, r, e2):
return torch.sum(torch.pow(e1 + r - e2, 2), 1)
def only_pos_loss(self, e1, r, e2):
r... |
fpn_module | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn.functional as F
import torch.nn as nn
class fpn_module(nn.Module):
def __init__(self, numClass):
super(fpn_module, self).__init__()
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0
)
self.smooth1_1 = nn.Conv2d(256, 256, kerne... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.functional as... | LOUEY233/CPS3320_python | fpn_module | false | 1,113 | [
"MIT"
] | 0 | 3cc1733d91c3a8f680eeb984348e2a52ae3285ec | https://github.com/LOUEY233/CPS3320_python/tree/3cc1733d91c3a8f680eeb984348e2a52ae3285ec | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, numClass):
super().__init__()
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0
)
self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, p... |
Bilinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class Bilinear(nn.Module):
def __init__(self, size):
super(Bilinear, self).__init__()
self.size = size
self.mat = nn.Parameter(torch.FloatTensor(self.size, self.size))
self.reset_parameters()
def reset_parameters(self):
params = [p f... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_s... | TRUMANCFY/VL-DIORA | Bilinear | false | 1,114 | [
"Apache-2.0"
] | 0 | cef398e05842d4a30345260d8e27d1c362671834 | https://github.com/TRUMANCFY/VL-DIORA/tree/cef398e05842d4a30345260d8e27d1c362671834 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, size):
super().__init__()
self.size = size
self.mat = nn.Parameter(torch.FloatTensor(self.size, self.size))
self.reset_parameters()
def reset_parameters(self):
params = [p for p in self.para... |
N_TransE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn.functional as F
class N_TransE(torch.nn.Module):
def __init__(self, p, params):
super(N_TransE, self).__init__()
self.p = p
self.params = params
def forward(self, e1, r, e2):
pred = -torch.norm(e1 + r - e2, p=self.p, dim=1)
return pred
... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.as... | TMUITLab/EAFR | N_TransE | false | 1,115 | [
"MIT"
] | 0 | dadb6485d48711ccb8aa2f03760aeb437645f1ff | https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
def __init__(self, p, params):
super().__init__()
self.p = p
self.params = params
def forward(self, e1, r, e2):
pred = -torch.norm(e1 + r - e2, p=self.p, dim=1)
return pred
def loss(self, ... |
FM | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
from sklearn.metrics import *
class FM(nn.Module):
"""Factorization Machine models pairwise (order-2) feature interactions
without linear term and bias.
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = tor... | Sunmyunghan/Final_Project | FM | false | 1,117 | [
"MIT"
] | 0 | 28cde293dc6d07521b2e1c5613b20444aea91d21 | https://github.com/Sunmyunghan/Final_Project/tree/28cde293dc6d07521b2e1c5613b20444aea91d21 | import torch
import torch.nn as nn
from sklearn.metrics import *
class Model(nn.Module):
"""Factorization Machine models pairwise (order-2) feature interactions
without linear term and bias.
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
... |
VertexDirectEmbedder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.utils.data
from torch import nn
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vecto... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from... | TWJianNuo/detectron2 | VertexDirectEmbedder | false | 1,118 | [
"Apache-2.0"
] | 0 | 091bc43e85b8f7cefdccebf8d85afb7cfff2a3f0 | https://github.com/TWJianNuo/detectron2/tree/091bc43e85b8f7cefdccebf8d85afb7cfff2a3f0 | import torch
import torch.utils.data
from torch import nn
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vecto... |
HighWay | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
from torch.nn import Parameter
class HighWay(torch.nn.Module):
def __init__(self, f_in, f_out, bias=True):
super(HighWay, self).__init__()
self.w = Parameter(torch.Tensor(f_in, f_out))
nn.init.xavier_uniform_(self.w)
if bias:
self.bia... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch.... | TMUITLab/EAFR | HighWay | false | 1,119 | [
"MIT"
] | 0 | dadb6485d48711ccb8aa2f03760aeb437645f1ff | https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff | import torch
import torch.nn as nn
from torch.nn import Parameter
class Model(torch.nn.Module):
def __init__(self, f_in, f_out, bias=True):
super().__init__()
self.w = Parameter(torch.Tensor(f_in, f_out))
nn.init.xavier_uniform_(self.w)
if bias:
self.bias = Parameter(t... |
Network | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
from torch.nn.functional import relu
from torch.nn.functional import softmax
class Network(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.fc1 = nn.Linea... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | THE-RAF/Reinforcement-Learning | Network | false | 1,120 | [
"MIT"
] | 0 | 36b4c5330740b533fb8170263f995afb91a1d021 | https://github.com/THE-RAF/Reinforcement-Learning/tree/36b4c5330740b533fb8170263f995afb91a1d021 | import torch
import torch.nn as nn
from torch.nn.functional import relu
from torch.nn.functional import softmax
class Model(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.fc1 = nn.Linear(... |
SpatialCrossMapLRN | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class SpatialCrossMapLRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1,
ACROSS_CHANNELS=True):
super(SpatialCrossMapLRN, self).__init__()
self.ACROSS_CHANNELS = ... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.... | Tagussan/pretrained-models.pytorch | SpatialCrossMapLRN | false | 1,121 | [
"BSD-3-Clause"
] | 0 | 854e6c153c2534dd7cf76a5ec102307ea5171167 | https://github.com/Tagussan/pretrained-models.pytorch/tree/854e6c153c2534dd7cf76a5ec102307ea5171167 | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class Model(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1,
ACROSS_CHANNELS=True):
super().__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHA... |
MLPBase | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torch import nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
de... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_s... | TachikakaMin/dreamer-torch | MLPBase | false | 1,122 | [
"MIT"
] | 0 | 3c99526f4507e28cf8b34ada0321001adcf8ae1f | https://github.com/TachikakaMin/dreamer-torch/tree/3c99526f4507e28cf8b34ada0321001adcf8ae1f | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self,... |
N_R_Align | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
class N_R_Align(torch.nn.Module):
def __init__(self, params):
super(N_R_Align, self).__init__()
self.params = params
self.cos_sim = nn.CosineSimilarity(dim=1, eps=1e-06)
def forward(self, e1, e2, n1, n2):
return self.params * torch.sigmoid(s... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert... | TMUITLab/EAFR | N_R_Align | false | 1,123 | [
"MIT"
] | 0 | dadb6485d48711ccb8aa2f03760aeb437645f1ff | https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff | import torch
import torch.nn as nn
class Model(torch.nn.Module):
def __init__(self, params):
super().__init__()
self.params = params
self.cos_sim = nn.CosineSimilarity(dim=1, eps=1e-06)
def forward(self, e1, e2, n1, n2):
return self.params * torch.sigmoid(self.cos_sim(n1, n2)... |
FC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.nn.functional as F
class FC(nn.Module):
def __init__(self, in_channels, out_channels, use_bias=False,
activation='LR', gain=2 ** 0.5):
super(FC, self).__init__()
self.he_std = in_channels * -0.5 * gain
self.weight = torch.nn.Paramete... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_s... | TOMeoww/STGAN | FC | false | 1,124 | [
"MIT"
] | 0 | 090a4024999e68f017140312ecfdd0d4dc3dc425 | https://github.com/TOMeoww/STGAN/tree/090a4024999e68f017140312ecfdd0d4dc3dc425 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_channels, out_channels, use_bias=False,
activation='LR', gain=2 ** 0.5):
super().__init__()
self.he_std = in_channels * -0.5 * gain
self.weight = torch.nn.Parameter(tor... |
Mean | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
class Mean(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
_std, mean = torch.std_mean(x, self.dim)
return mean
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[]... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret... | Tahlor/glom-pytorch | Mean | false | 1,125 | [
"MIT"
] | 0 | 45b2fc52af5288cd53611e497a70d53ffa303410 | https://github.com/Tahlor/glom-pytorch/tree/45b2fc52af5288cd53611e497a70d53ffa303410 | import torch
class Model(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
_std, mean = torch.std_mean(x, self.dim)
return mean
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4... |
LinearModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
class LinearModel(torch.nn.Module):
def __init__(self, input_size: 'int', output_size: 'int', dropout: 'float'
):
super().__init__()
self.linear = torch.nn.Linear(input_size, output_size)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, data):
d... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cu... | TDteach/SEAM | LinearModel | false | 1,126 | [
"MIT"
] | 0 | 231447dad15403e7620adcf6629b6e7fccc4b809 | https://github.com/TDteach/SEAM/tree/231447dad15403e7620adcf6629b6e7fccc4b809 | import torch
class Model(torch.nn.Module):
def __init__(self, input_size: 'int', output_size: 'int', dropout: 'float'
):
super().__init__()
self.linear = torch.nn.Linear(input_size, output_size)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, data):
data = ... |
GeometricMean | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn.functional as F
class GeometricMean(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
log_x = torch.log(F.relu(x))
return torch.exp(torch.mean(log_x, dim=self.dim))
def get_inputs():
return [t... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = t... | Tahlor/glom-pytorch | GeometricMean | false | 1,127 | [
"MIT"
] | 0 | 45b2fc52af5288cd53611e497a70d53ffa303410 | https://github.com/Tahlor/glom-pytorch/tree/45b2fc52af5288cd53611e497a70d53ffa303410 | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
log_x = torch.log(F.relu(x))
return torch.exp(torch.mean(log_x, dim=self.dim))
def get_inputs():
return [torch.ran... |
MinibatchStd | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
class MinibatchStd(nn.Module):
"""
calculate minibatch std to avoid mode collapse
"""
def __init__(self):
super(MinibatchStd, self).__init__()
def forward(self, x):
size = list(x.size())
size[1] = 1
std = torch.std(x, dim=0)
... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_... | Tak-jae-ho/RGBD-GAN-pytorch | MinibatchStd | false | 1,128 | [
"MIT"
] | 0 | 4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb | https://github.com/Tak-jae-ho/RGBD-GAN-pytorch/tree/4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb | import torch
import torch.nn as nn
class Model(nn.Module):
"""
calculate minibatch std to avoid mode collapse
"""
def __init__(self):
super().__init__()
def forward(self, x):
size = list(x.size())
size[1] = 1
std = torch.std(x, dim=0)
mean = torch.mean(std... |
PixelwiseNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
class PixelwiseNorm(nn.Module):
"""
layer pixelwise normalization
"""
def __init__(self, eps=1e-07):
super(PixelwiseNorm, self).__init__()
self.eps = eps
def forward(self, x):
return x / torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True) ... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_... | Tak-jae-ho/RGBD-GAN-pytorch | PixelwiseNorm | false | 1,129 | [
"MIT"
] | 0 | 4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb | https://github.com/Tak-jae-ho/RGBD-GAN-pytorch/tree/4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb | import torch
import torch.nn as nn
class Model(nn.Module):
"""
layer pixelwise normalization
"""
def __init__(self, eps=1e-07):
super().__init__()
self.eps = eps
def forward(self, x):
return x / torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True) + self.eps
)
... |
ConsensusAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn.functional as F
from torch import nn
from torch import einsum
class ConsensusAttention(nn.Module):
def __init__(self, num_patches_side, attend_self=True,
local_consensus_radius=0):
super().__init__()
self.attend_self = attend_self
self.local_consensus_... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | Tahlor/glom-pytorch | ConsensusAttention | false | 1,130 | [
"MIT"
] | 0 | 45b2fc52af5288cd53611e497a70d53ffa303410 | https://github.com/Tahlor/glom-pytorch/tree/45b2fc52af5288cd53611e497a70d53ffa303410 | import torch
import torch.nn.functional as F
from torch import nn
from torch import einsum
class Model(nn.Module):
def __init__(self, num_patches_side, attend_self=True,
local_consensus_radius=0):
super().__init__()
self.attend_self = attend_self
self.local_consensus_radius = loca... |
DenseCrossEntropy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
class DenseCrossEntropy(nn.Module):
def forward(self, x, target):
x = x.float()
target = target.float()
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
loss = -logprobs * target
loss ... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
... | Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution | DenseCrossEntropy | false | 1,131 | [
"Apache-2.0"
] | 0 | 8e2d9056d5c88c6415827086809e73522b336fbb | https://github.com/Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution/tree/8e2d9056d5c88c6415827086809e73522b336fbb | import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
class Model(nn.Module):
def forward(self, x, target):
x = x.float()
target = target.float()
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
loss = -logprobs * target
loss = loss.sum(-... |
HalfMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _al... | import torch
from torch.nn.modules.loss import MSELoss
class HalfMSELoss(MSELoss):
def __init__(self, reduction='mean'):
super().__init__(reduction=reduction)
def forward(self, input, target):
return super().forward(input, target) / 2
def get_inputs():
return [torch.rand([4, 4, 4, 4]),... | import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn.modules.loss import MSELoss
assert_size_stride = torch._C._dynamo.guards.as... | ThayaFluss/candle | HalfMSELoss | false | 1,132 | [
"MIT"
] | 0 | 4a12fde60ffbbf0cb688617fee81aded94c0b613 | https://github.com/ThayaFluss/candle/tree/4a12fde60ffbbf0cb688617fee81aded94c0b613 | import torch
from torch.nn.modules.loss import MSELoss
class Model(MSELoss):
def __init__(self, reduction='mean'):
super().__init__(reduction=reduction)
def forward(self, input, target):
return super().forward(input, target) / 2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch... |
EqualLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torch import nn
import torch.nn.functional as F
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul=1, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(o... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_st... | TheSignPainter/AGGAN | EqualLinear | false | 1,133 | [
"Apache-2.0"
] | 0 | d75144f81df3f5a0a761d48c6285c38e74002be3 | https://github.com/TheSignPainter/AGGAN/tree/d75144f81df3f5a0a761d48c6285c38e74002be3 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul=1, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim... |
SuperPointNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.optim
import torch.utils.data
class SuperPointNet(torch.nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self):
super(SuperPointNet, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | Sunny-Qin-0314/pytorch-superpoint | SuperPointNet | false | 1,134 | [
"MIT"
] | 0 | 5c5325a1e5917afcc7469e137206990a8cd33725 | https://github.com/Sunny-Qin-0314/pytorch-superpoint/tree/5c5325a1e5917afcc7469e137206990a8cd33725 | import torch
import torch.optim
import torch.utils.data
class Model(torch.nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
c1, c2... |
ArcMarginProduct_subcenter | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.nn.parallel
class ArcMarginProduct_subcenter(nn.Module):
def __init__(self, in_features, out_features, k=3):
super().__init__()
self.weight = nn.Parameter(torch.FloatTensor(out_feat... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution | ArcMarginProduct_subcenter | false | 1,135 | [
"Apache-2.0"
] | 0 | 8e2d9056d5c88c6415827086809e73522b336fbb | https://github.com/Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution/tree/8e2d9056d5c88c6415827086809e73522b336fbb | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.nn.parallel
class Model(nn.Module):
def __init__(self, in_features, out_features, k=3):
super().__init__()
self.weight = nn.Parameter(torch.FloatTensor(out_features * k,
... |
ChannelMixer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class ChannelMixer(nn.Module):
def __init__(self, input_size, hidden_size, dropout=None):
super(ChannelMixer, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
self.dropout = None
... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as ... | TheRealMarVin/mlp-mixer | ChannelMixer | false | 1,136 | [
"MIT"
] | 0 | 2124cb5c5adfc7af473cab535095471d4943adab | https://github.com/TheRealMarVin/mlp-mixer/tree/2124cb5c5adfc7af473cab535095471d4943adab | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_size, hidden_size, dropout=None):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
self.dropout = None
if dropout is not None:
... |
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torch import nn
from torch.nn import functional as F
class Net(nn.Module):
def __init__(self, obs_dim, act_dim):
super(Net, self).__init__()
self.fc0 = nn.Linear(obs_dim, 128)
self.fc1 = nn.Linear(128, act_dim)
def forward(self, x):
x = x.type_as(self.fc0.bi... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_s... | TommeyChang/CS294-Homework | Net | false | 1,137 | [
"MIT"
] | 0 | 17b525bf4366034b45c4febd89f1053d44550237 | https://github.com/TommeyChang/CS294-Homework/tree/17b525bf4366034b45c4febd89f1053d44550237 | import torch
from torch import nn
from torch.nn import functional as F
class Model(nn.Module):
def __init__(self, obs_dim, act_dim):
super().__init__()
self.fc0 = nn.Linear(obs_dim, 128)
self.fc1 = nn.Linear(128, act_dim)
def forward(self, x):
x = x.type_as(self.fc0.bias)
... |
ActorDownAction | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
from torch import nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
de... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.... | TachikakaMin/dreamer-torch | ActorDownAction | false | 1,138 | [
"MIT"
] | 0 | 3c99526f4507e28cf8b34ada0321001adcf8ae1f | https://github.com/TachikakaMin/dreamer-torch/tree/3c99526f4507e28cf8b34ada0321001adcf8ae1f | import torch
from torch import nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(sel... |
UpsampleConvLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
class UpsampleConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
scale_factor):
super(UpsampleConvLayer, self).__init__()
self._scale_factor = scale_factor
self._reflection_pad = nn.ReflectionPad2d(kernel_siz... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.... | ThomasRanvier/cnn_style_transfer | UpsampleConvLayer | false | 1,139 | [
"MIT"
] | 0 | 90b6c76c20263c22f4e45184d572284726ecbd7b | https://github.com/ThomasRanvier/cnn_style_transfer/tree/90b6c76c20263c22f4e45184d572284726ecbd7b | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
scale_factor):
super().__init__()
self._scale_factor = scale_factor
self._reflection_pad = nn.ReflectionPad2d(kernel_size // 2)
self._conv = nn.Con... |
StructuralProbe | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _alig... | import torch
import torch.nn as nn
import torch.utils.data.dataloader
class StructuralProbe(nn.Module):
""" Computes squared L2 distance after projection by a matrix.
For a batch of sentences, computes all n^2 pairs of distances
for each sentence in the batch.
"""
def __init__(self, model_dim, ra... | import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data.dataloader
assert_size_stride = to... | TimO96/NLP2 | StructuralProbe | false | 1,140 | [
"MIT"
] | 0 | 83f65a385457f68397c641f38b53df0110282578 | https://github.com/TimO96/NLP2/tree/83f65a385457f68397c641f38b53df0110282578 | import torch
import torch.nn as nn
import torch.utils.data.dataloader
class Model(nn.Module):
""" Computes squared L2 distance after projection by a matrix.
For a batch of sentences, computes all n^2 pairs of distances
for each sentence in the batch.
"""
def __init__(self, model_dim, rank, device... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.