entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
SpeakNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn import torch.optim def xavier_init(module): """ Xavier initializer for module parameters. """ for parameter in module.parameters(): if len(parameter.data.shape) == 1: parameter.data.fill_(0) else: fan_in = parameter...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
aaronmueller/discourse-hw3
SpeakNet
false
1,350
[ "MIT" ]
0
93313a2ce83fde9480914384be52ec9160e967af
https://github.com/aaronmueller/discourse-hw3/tree/93313a2ce83fde9480914384be52ec9160e967af
import math import torch import torch.nn as nn import torch.optim def xavier_init(module): """ Xavier initializer for module parameters. """ for parameter in module.parameters(): if len(parameter.data.shape) == 1: parameter.data.fill_(0) else: fan_in = parameter...
StochasticPool2D
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.utils import _pair class StochasticPool2D(nn.Module): """ Args: kernel_size : size of the pooling kernel stride : pool stride Note: valid padding is implemented """ def __init__(sel...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.nn.modules.utils import _pair assert_size_stride = torch...
abhishek30-ml/Multiple-Deep-Network-Learning
StochasticPool2D
false
1,351
[ "MIT" ]
0
adf705c004cc8a1e517eeed99e7030acb476dfd9
https://github.com/abhishek30-ml/Multiple-Deep-Network-Learning/tree/adf705c004cc8a1e517eeed99e7030acb476dfd9
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.utils import _pair class Model(nn.Module): """ Args: kernel_size : size of the pooling kernel stride : pool stride Note: valid padding is implemented """ def __init__(self, kernel_s...
AvgPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
from torch.nn import Module import torch import torch as th class AvgPool2d(Module): """ This class is the beginning of an exact python port of the torch.nn.AvgPool2d module. Because PySyft cannot hook into layers which are implemented in C++, our special functionalities (such as encrypted computation...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._em...
abogaziah/PySyft
AvgPool2d
false
1,352
[ "Apache-2.0" ]
0
812dc6f350261793c67a928786fc081158f22a76
https://github.com/abogaziah/PySyft/tree/812dc6f350261793c67a928786fc081158f22a76
from torch.nn import Module import torch import torch as th class Model(Module): """ This class is the beginning of an exact python port of the torch.nn.AvgPool2d module. Because PySyft cannot hook into layers which are implemented in C++, our special functionalities (such as encrypted computation) do...
ResidualBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class ResidualBlock(nn.Module): def __init__(self, in_channels): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= in_channels, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_ch...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
abhay97ps/visual-control-ppo-procgen
ResidualBlock
false
1,353
[ "MIT" ]
0
765fe1ddb289d384abddc4df8eb865379c8da76a
https://github.com/abhay97ps/visual-control-ppo-procgen/tree/765fe1ddb289d384abddc4df8eb865379c8da76a
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels): super().__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= in_channels, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=in_channels, out_cha...
Attention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Attention(nn...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
abhinavrangarajan/genienlp
Attention
false
1,354
[ "BSD-3-Clause" ]
0
ba121274b3365739ce9e5a8facc9a2904149b9c7
https://github.com/abhinavrangarajan/genienlp/tree/ba121274b3365739ce9e5a8facc9a2904149b9c7
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Model(nn.Mod...
LinearAttentionBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class LinearAttentionBlock(nn.Module): def __init__(self, in_features): super(LinearAttentionBlock, self).__init__() self.op = nn.Conv2d(in_channels=in_features, out_channels=1, kernel_size=1, padding=0, bias=False) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
abhay97ps/visual-control-ppo-procgen
LinearAttentionBlock
false
1,355
[ "MIT" ]
0
765fe1ddb289d384abddc4df8eb865379c8da76a
https://github.com/abhay97ps/visual-control-ppo-procgen/tree/765fe1ddb289d384abddc4df8eb865379c8da76a
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_features): super().__init__() self.op = nn.Conv2d(in_channels=in_features, out_channels=1, kernel_size=1, padding=0, bias=False) def forward(self, l, g): N, C,...
SirenLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np from torch import nn class SirenLayer(nn.Module): def __init__(self, in_f, out_f, w0=30, is_first=False, is_last=False): super().__init__() self.in_f = in_f self.w0 = w0 self.linear = nn.Linear(in_f, out_f) self.is_first = is_first s...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy ...
abrilcf/pixel-nerf
SirenLayer
false
1,356
[ "BSD-2-Clause" ]
0
9a6a8ab6c39ec01d52df3bf4c03830f7162cc679
https://github.com/abrilcf/pixel-nerf/tree/9a6a8ab6c39ec01d52df3bf4c03830f7162cc679
import torch import numpy as np from torch import nn class Model(nn.Module): def __init__(self, in_f, out_f, w0=30, is_first=False, is_last=False): super().__init__() self.in_f = in_f self.w0 = w0 self.linear = nn.Linear(in_f, out_f) self.is_first = is_first self.i...
ContextGating
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class ContextGating(nn.Module): def __init__(self, input_num): super(ContextGating, self).__init__() self.sigmoid = nn.Sigmoid() self.linear = nn.Linear(input_num, input_num) def forward(self, x): lin = self.linear(x.permute(0, 2, 3, 1)) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_st...
aagnone3/dc19t2
ContextGating
false
1,357
[ "Apache-2.0" ]
0
cc7baf2a8fe73d28c224f0bf68b5355efd96c24f
https://github.com/aagnone3/dc19t2/tree/cc7baf2a8fe73d28c224f0bf68b5355efd96c24f
import torch from torch import nn class Model(nn.Module): def __init__(self, input_num): super().__init__() self.sigmoid = nn.Sigmoid() self.linear = nn.Linear(input_num, input_num) def forward(self, x): lin = self.linear(x.permute(0, 2, 3, 1)) lin = lin.permute(0, 3,...
MultiHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Attention(nn...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
abhinavrangarajan/genienlp
MultiHead
false
1,358
[ "BSD-3-Clause" ]
0
ba121274b3365739ce9e5a8facc9a2904149b9c7
https://github.com/abhinavrangarajan/genienlp/tree/ba121274b3365739ce9e5a8facc9a2904149b9c7
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class Attention(nn...
Net1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torch.utils.data.distributed class Net1(nn.Module): def __init__(self): super(Net1, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) def forward(...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import ...
abhinavs95/deep-learning-containers
Net1
false
1,359
[ "Apache-2.0" ]
0
bd1cb70a8cd1cbb5d39bc825fc7ab9f53ebf9f89
https://github.com/abhinavs95/deep-learning-containers/tree/bd1cb70a8cd1cbb5d39bc825fc7ab9f53ebf9f89
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torch.utils.data.distributed class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) def forward(self, x):...
WeightedL2
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class WeightedL2(nn.Module): def __init__(self): super(WeightedL2, self).__init__() def forward(self, x, target, w): return torch.sum(w * (x - target) ** 2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride emp...
acrosson/dl
WeightedL2
false
1,360
[ "MIT" ]
0
94e68533b38f53f09e9bef460ba96fa389fc8eb4
https://github.com/acrosson/dl/tree/94e68533b38f53f09e9bef460ba96fa389fc8eb4
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x, target, w): return torch.sum(w * (x - target) ** 2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] ...
CustomGruCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn class CustomGruCell(nn.Module): """ A forward only GRU cell. Input should be: (sequence length x batch size x input_size). The output is the output of the final forward call. It's not clear if it would be possible to use the output from each ce...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np ...
abogaziah/PySyft
CustomGruCell
false
1,361
[ "Apache-2.0" ]
0
812dc6f350261793c67a928786fc081158f22a76
https://github.com/abogaziah/PySyft/tree/812dc6f350261793c67a928786fc081158f22a76
import torch import numpy as np import torch.nn as nn class Model(nn.Module): """ A forward only GRU cell. Input should be: (sequence length x batch size x input_size). The output is the output of the final forward call. It's not clear if it would be possible to use the output from each cell in a ...
ImpalaBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class ResidualBlock(nn.Module): def __init__(self, in_channels): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= in_channels, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_ch...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
abhay97ps/visual-control-ppo-procgen
ImpalaBlock
false
1,362
[ "MIT" ]
0
765fe1ddb289d384abddc4df8eb865379c8da76a
https://github.com/abhay97ps/visual-control-ppo-procgen/tree/765fe1ddb289d384abddc4df8eb865379c8da76a
import torch import torch.nn as nn class ResidualBlock(nn.Module): def __init__(self, in_channels): super().__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= in_channels, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=in_channels,...
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.utils.checkpoint from torch import nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def forward(self, query, key, value, mask=None): dk = query.size()[-1] scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk) if ma...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
abedi1/ECLARe
MultiHeadAttention
false
1,363
[ "Apache-2.0" ]
0
a446b8086404b058923a9b3ce47e75cc40436a58
https://github.com/abedi1/ECLARe/tree/a446b8086404b058923a9b3ce47e75cc40436a58
import math import torch import torch.utils.checkpoint from torch import nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def forward(self, query, key, value, mask=None): dk = query.size()[-1] scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk) if ma...
BertSelfOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.onnx class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import n...
Alwaysproblem/examples-1
BertSelfOutput
false
1,364
[ "MIT" ]
0
9754fa63ed1931489a21ac1f5b299f945e369a5c
https://github.com/Alwaysproblem/examples-1/tree/9754fa63ed1931489a21ac1f5b299f945e369a5c
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.onnx class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super().__init__() ...
LinearWithChannel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn class LinearWithChannel(nn.Module): def __init__(self, channel_size, input_size, output_size): super(LinearWithChannel, self).__init__() self.weight = torch.nn.Parameter(torch.zeros(channel_size, output_size, input_size)) self.bia...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.a...
adam-coogan/swyft
LinearWithChannel
false
1,365
[ "MIT" ]
0
c54bdd9f77ddf02fda857e26640df012cbe545fc
https://github.com/adam-coogan/swyft/tree/c54bdd9f77ddf02fda857e26640df012cbe545fc
import math import torch import torch.nn as nn class Model(nn.Module): def __init__(self, channel_size, input_size, output_size): super().__init__() self.weight = torch.nn.Parameter(torch.zeros(channel_size, output_size, input_size)) self.bias = torch.nn.Parameter(torch.zeros(...
WeightedL1
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class WeightedL1(nn.Module): def __init__(self): super(WeightedL1, self).__init__() def forward(self, x, target, w): return (w * torch.abs(x - target)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
acrosson/dl
WeightedL1
false
1,366
[ "MIT" ]
0
94e68533b38f53f09e9bef460ba96fa389fc8eb4
https://github.com/acrosson/dl/tree/94e68533b38f53f09e9bef460ba96fa389fc8eb4
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x, target, w): return (w * torch.abs(x - target)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] ...
minibatch_std_concat_layer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import copy import torch import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd class minibatch_std_concat_layer(nn.Module): def __init__(self, averaging='all'): ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch.utils import data as data import torch.nn as...
achrefjarray/ESRGANplus-master
minibatch_std_concat_layer
false
1,367
[ "Apache-2.0" ]
0
ba470ec5c565a6dc8b48575b1e185ef6b796aec6
https://github.com/achrefjarray/ESRGANplus-master/tree/ba470ec5c565a6dc8b48575b1e185ef6b796aec6
import copy import torch import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd class Model(nn.Module): def __init__(self, averaging='all'): super().__init__() ...
ResidualBlockNoBN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from torchvision.models import vgg as vgg from torch import autograd as autograd @torch.no_grad() def default_init_weights(module_list, sc...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from ...
achrefjarray/ESRGANplus-master
ResidualBlockNoBN
false
1,368
[ "Apache-2.0" ]
0
ba470ec5c565a6dc8b48575b1e185ef6b796aec6
https://github.com/achrefjarray/ESRGANplus-master/tree/ba470ec5c565a6dc8b48575b1e185ef6b796aec6
import torch import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from torchvision.models import vgg as vgg from torch import autograd as autograd @torch.no_grad() def default_init_weights(module_list, sc...
MySoftmax
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.functional as F class MySoftmax(nn.Module): def forward(self, input_): batch_size = input_.size()[0] output_ = torch.stack([F.softmax(input_[i]) for i in range( batch_size)], 0) return output_ def get_inputs(): return [t...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
acrosson/dl
MySoftmax
false
1,369
[ "MIT" ]
0
94e68533b38f53f09e9bef460ba96fa389fc8eb4
https://github.com/acrosson/dl/tree/94e68533b38f53f09e9bef460ba96fa389fc8eb4
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def forward(self, input_): batch_size = input_.size()[0] output_ = torch.stack([F.softmax(input_[i]) for i in range( batch_size)], 0) return output_ def get_inputs(): return [torch...
CBR
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class CBR(nn.Module): def __init__(self, in_channels, out_channels, kernel_size): super(CBR, self).__init__() self.cnn = nn.Conv2d(in_channels, out_channels, kernel_size, stride =2, padding=2) self.relu = nn.ReLU() def forward(self, x): ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
adamkrekorian/CI-UNet
CBR
false
1,370
[ "MIT" ]
0
fab0f8806540f5d79911bd81ba54dff135f9814f
https://github.com/adamkrekorian/CI-UNet/tree/fab0f8806540f5d79911bd81ba54dff135f9814f
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size): super().__init__() self.cnn = nn.Conv2d(in_channels, out_channels, kernel_size, stride =2, padding=2) self.relu = nn.ReLU() def forward(self, x): ...
TransformerEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class LayerNorm(nn...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
abhinavrangarajan/genienlp
TransformerEncoderLayer
false
1,371
[ "BSD-3-Clause" ]
0
ba121274b3365739ce9e5a8facc9a2904149b9c7
https://github.com/abhinavrangarajan/genienlp/tree/ba121274b3365739ce9e5a8facc9a2904149b9c7
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data def matmul(x, y): if x.dim() == y.dim(): return x @ y if x.dim() == y.dim() - 1: return (x.unsqueeze(-2) @ y).squeeze(-2) return (x @ y.unsqueeze(-2)).squeeze(-2) class LayerNorm(nn...
DCT
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class DCT(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, out_pad=0): super(DCT, self).__init__() self.dcnn = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2, output_padding=out_pad) self.tanh = nn...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
adamkrekorian/CI-UNet
DCT
false
1,372
[ "MIT" ]
0
fab0f8806540f5d79911bd81ba54dff135f9814f
https://github.com/adamkrekorian/CI-UNet/tree/fab0f8806540f5d79911bd81ba54dff135f9814f
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, out_pad=0): super().__init__() self.dcnn = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2, output_padding=out_pad) self.tanh = nn.Tanh()...
CL
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class CL(nn.Module): def __init__(self, in_channels, out_channels, kernel_size): super(CL, self).__init__() self.cnn = nn.Conv2d(in_channels, out_channels, kernel_size, stride =2, padding=2) self.lr = nn.LeakyReLU(negative_slope=0.2) def...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
adamkrekorian/CI-UNet
CL
false
1,373
[ "MIT" ]
0
fab0f8806540f5d79911bd81ba54dff135f9814f
https://github.com/adamkrekorian/CI-UNet/tree/fab0f8806540f5d79911bd81ba54dff135f9814f
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size): super().__init__() self.cnn = nn.Conv2d(in_channels, out_channels, kernel_size, stride =2, padding=2) self.lr = nn.LeakyReLU(negative_slope=0.2) def forw...
ModulatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.autograd import Function import math import torch import torch.utils.data from torch.nn import functional as F from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd def pad(pad_type, padd...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd...
achrefjarray/ESRGANplus-master
ModulatedConv2d
false
1,374
[ "Apache-2.0" ]
0
ba470ec5c565a6dc8b48575b1e185ef6b796aec6
https://github.com/achrefjarray/ESRGANplus-master/tree/ba470ec5c565a6dc8b48575b1e185ef6b796aec6
from torch.autograd import Function import math import torch import torch.utils.data from torch.nn import functional as F from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd def pad(pad_type, padd...
ToRGB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.autograd import Function import math import torch import torch.utils.data from torch.nn import functional as F from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd def pad(pad_type, padd...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math import torch.utils.data from tor...
achrefjarray/ESRGANplus-master
ToRGB
false
1,375
[ "Apache-2.0" ]
0
ba470ec5c565a6dc8b48575b1e185ef6b796aec6
https://github.com/achrefjarray/ESRGANplus-master/tree/ba470ec5c565a6dc8b48575b1e185ef6b796aec6
from torch.autograd import Function import math import torch import torch.utils.data from torch.nn import functional as F from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd def pad(pad_type, padd...
HighwayLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed def my_xavier_init(m, gain=1): """Xavier initialization: weights initialization that tries to make variance of outputs of a layer equal to variance of its inputs. """ for p in m.parameters(): if p.di...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
adamlerer/droidlet
HighwayLayer
false
1,376
[ "MIT" ]
0
ada38d191dadcea9aba12330e35e8e7d6d1663d9
https://github.com/adamlerer/droidlet/tree/ada38d191dadcea9aba12330e35e8e7d6d1663d9
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed def my_xavier_init(m, gain=1): """Xavier initialization: weights initialization that tries to make variance of outputs of a layer equal to variance of its inputs. """ for p in m.parameters(): if p.di...
GramMatrix
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.onnx class GramMatrix(nn.Module): def forward(self, input): a, b, c, d = input.size() features = input.view(a, b, c * d) G = torch.matmul(features, torch.transpose(features, 1, 2)) return G.div(a * b * c * d) def get_inputs(): ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.gu...
adi-horowitz/final-project
GramMatrix
false
1,377
[ "MIT" ]
0
0fd864663e92a6bcaa5f068e3e45b2a76460d335
https://github.com/adi-horowitz/final-project/tree/0fd864663e92a6bcaa5f068e3e45b2a76460d335
import torch import torch.nn as nn import torch.onnx class Model(nn.Module): def forward(self, input): a, b, c, d = input.size() features = input.view(a, b, c * d) G = torch.matmul(features, torch.transpose(features, 1, 2)) return G.div(a * b * c * d) def get_inputs(): retur...
MDN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class MDN(nn.Module): def __init__(self, input_size, hidden_size, output_size, batch_size): super(MDN, self).__init__() self.hidden_size = hidden_size self.batch_size = batch_size self.z_h = nn.Linear(input_size, h...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
adelaunay3/DL-Seq2Seq
MDN
false
1,378
[ "MIT" ]
0
7b83e2501b26780ff2dec140ac15a6664699dc16
https://github.com/adelaunay3/DL-Seq2Seq/tree/7b83e2501b26780ff2dec140ac15a6664699dc16
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, input_size, hidden_size, output_size, batch_size): super().__init__() self.hidden_size = hidden_size self.batch_size = batch_size self.z_h = nn.Linear(input_size, hidden_s...
ConvReLUNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.cuda import torch.distributed import torch.optim class ConvReLUNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0): super(ConvReLUNorm, self).__init__() self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
admariner/NeMo
ConvReLUNorm
false
1,379
[ "Apache-2.0" ]
0
e542d7f9063a40afa4119a3b94de4c2c636a37bb
https://github.com/admariner/NeMo/tree/e542d7f9063a40afa4119a3b94de4c2c636a37bb
import torch import torch.cuda import torch.distributed import torch.optim class Model(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0): super().__init__() self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, paddi...
MultiHeadDense
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn class MultiHeadDense(nn.Module): def __init__(self, d): super(MultiHeadDense, self).__init__() self.weight = nn.Parameter(torch.Tensor(d, d)) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.a...
afperezm/DeepGlobe-Road-Extraction-Challenge
MultiHeadDense
false
1,380
[ "MIT" ]
0
d3e0a8123d64baa3975663ece053edbc4bbdc4e6
https://github.com/afperezm/DeepGlobe-Road-Extraction-Challenge/tree/d3e0a8123d64baa3975663ece053edbc4bbdc4e6
import math import torch import torch.nn as nn class Model(nn.Module): def __init__(self, d): super().__init__() self.weight = nn.Parameter(torch.Tensor(d, d)) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self) ->None: nn.init.kai...
EqualLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.autograd import Function import math import torch import torch.utils.data from torch.nn import functional as F from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd def fused_leaky_relu(i...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math import torch.utils.data from tor...
achrefjarray/ESRGANplus-master
EqualLinear
false
1,381
[ "Apache-2.0" ]
0
ba470ec5c565a6dc8b48575b1e185ef6b796aec6
https://github.com/achrefjarray/ESRGANplus-master/tree/ba470ec5c565a6dc8b48575b1e185ef6b796aec6
from torch.autograd import Function import math import torch import torch.utils.data from torch.nn import functional as F from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd def fused_leaky_relu(i...
GlobalMaxPool1d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn class GlobalMaxPool1d(nn.Module): """Performs global max pooling over the entire length of a batched 1D tensor # Arguments input: Input tensor """ def forward(self, input): return nn.functional.max_pool1d(input, kernel_size=input.size()[2:] ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empt...
aiswaryasankar/Few_shot_exp
GlobalMaxPool1d
false
1,382
[ "MIT" ]
0
21c5a5d93dc69715b037a0fd2dca1b6f7d9de773
https://github.com/aiswaryasankar/Few_shot_exp/tree/21c5a5d93dc69715b037a0fd2dca1b6f7d9de773
import torch from torch import nn class Model(nn.Module): """Performs global max pooling over the entire length of a batched 1D tensor # Arguments input: Input tensor """ def forward(self, input): return nn.functional.max_pool1d(input, kernel_size=input.size()[2:] ).view(...
VectorQuantizer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import Tensor from torch import nn from torch.nn import functional as F class VectorQuantizer(nn.Module): """ Reference: [1] https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', beta: ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
adammoss/vae
VectorQuantizer
false
1,383
[ "Apache-2.0" ]
0
52f0f56492e3ac7c8b866ae99d5333b4281a371f
https://github.com/adammoss/vae/tree/52f0f56492e3ac7c8b866ae99d5333b4281a371f
import torch from torch import Tensor from torch import nn from torch.nn import functional as F class Model(nn.Module): """ Reference: [1] https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py """ def __init__(self, num_embeddings: 'int', embedding_dim: 'int', beta: 'float'=...
VectorQuantizer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class VectorQuantizer(nn.Module): def __init__(self, num_embeddings, embedding_dim, commitment_cost): super(VectorQuantizer, self).__init__() self._embedding_dim = embedding_dim self._num_embeddings = num_embeddings ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
acyclics/neurips2020-procgen-starter-kit
VectorQuantizer
false
1,384
[ "Apache-2.0" ]
0
16d52eb72d41c6b808c20644501710842134add4
https://github.com/acyclics/neurips2020-procgen-starter-kit/tree/16d52eb72d41c6b808c20644501710842134add4
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, num_embeddings, embedding_dim, commitment_cost): super().__init__() self._embedding_dim = embedding_dim self._num_embeddings = num_embeddings self._embedding = nn.Embeddin...
ANN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class ANN(nn.Module): def __init__(self, n_li, n_l1, n_l2, n_l3, n_lo): super(ANN, self).__init__() self.lin_in = nn.Linear(n_li, n_l1) self.lin_h1 = nn.Linear(n_l1, n_l2) self.lin_h2 = nn.Linear(n_l2, n_l3) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
aixpact/python
ANN
false
1,385
[ "MIT" ]
0
41256672472fec2c0f8bf6a9146c4053b16fd907
https://github.com/aixpact/python/tree/41256672472fec2c0f8bf6a9146c4053b16fd907
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, n_li, n_l1, n_l2, n_l3, n_lo): super().__init__() self.lin_in = nn.Linear(n_li, n_l1) self.lin_h1 = nn.Linear(n_l1, n_l2) self.lin_h2 = nn.Linear(n_l2, n_l3) self....
Affine
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data class Affine(nn.Module): def __init__(self, dim): super().__init__() self.alpha = nn.Parameter(torch.ones((1, 1, dim))) self.beta = nn.Parameter(torch.zeros((1, 1, dim))) def forward(self, x): ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty...
akarshkumar0101/timm-mlp-shaker
Affine
false
1,386
[ "Apache-2.0" ]
0
ab211dd137b790ac57f5ed924c2ada148d54a194
https://github.com/akarshkumar0101/timm-mlp-shaker/tree/ab211dd137b790ac57f5ed924c2ada148d54a194
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data class Model(nn.Module): def __init__(self, dim): super().__init__() self.alpha = nn.Parameter(torch.ones((1, 1, dim))) self.beta = nn.Parameter(torch.zeros((1, 1, dim))) def forward(self, x): ...
WeightedAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class WeightedAttention(nn.Module): def __init__(self, dim, eps=1e-08, softmax_dim=1, weighted_mean_dim=2): super().__init__() self.norm_input = nn.LayerNorm(dim) self.norm_context = nn.LayerNorm(dim) self.to_q = nn.Linear(dim, dim) self.t...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
ajabri/slot-attention
WeightedAttention
false
1,387
[ "MIT" ]
0
32acb6614f1bd511f2dc3c263f852ed2dbe9c213
https://github.com/ajabri/slot-attention/tree/32acb6614f1bd511f2dc3c263f852ed2dbe9c213
import torch from torch import nn class Model(nn.Module): def __init__(self, dim, eps=1e-08, softmax_dim=1, weighted_mean_dim=2): super().__init__() self.norm_input = nn.LayerNorm(dim) self.norm_context = nn.LayerNorm(dim) self.to_q = nn.Linear(dim, dim) self.to_k = nn.Lin...
GDN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.autograd import Function import torch import torch.nn.functional as F import torch.nn as nn class LowerBound(Function): @staticmethod def forward(ctx, inputs, bound): ctx.save_for_backward(inputs, inputs.new_ones(1) * bound) return inputs.clamp(min=bound) @staticmethod def...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
agr17/pytorch-msssim
GDN
false
1,388
[ "MIT" ]
0
69aec4113ccceafa5568d1191e98c1db525c0c0f
https://github.com/agr17/pytorch-msssim/tree/69aec4113ccceafa5568d1191e98c1db525c0c0f
from torch.autograd import Function import torch import torch.nn.functional as F import torch.nn as nn class LowerBound(Function): @staticmethod def forward(ctx, inputs, bound): ctx.save_for_backward(inputs, inputs.new_ones(1) * bound) return inputs.clamp(min=bound) @staticmethod def...
ResidualDenseBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from torchvision.models import vgg as vgg from torch import autograd as autograd @torch.no_grad() def default_init_weights(module_list, sc...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch.utils import data as data import torch.nn as ...
achrefjarray/ESRGANplus-master
ResidualDenseBlock
false
1,389
[ "Apache-2.0" ]
0
ba470ec5c565a6dc8b48575b1e185ef6b796aec6
https://github.com/achrefjarray/ESRGANplus-master/tree/ba470ec5c565a6dc8b48575b1e185ef6b796aec6
import torch import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from torchvision.models import vgg as vgg from torch import autograd as autograd @torch.no_grad() def default_init_weights(module_list, sc...
EncoderImagePrecomp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn import torch.nn.init from collections import OrderedDict def l2norm(X): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt() X = torch.div(X, norm) return X class EncoderImagePrecomp(nn.Module): def __i...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np ...
akurniawan/jina-hub
EncoderImagePrecomp
false
1,390
[ "Apache-2.0" ]
0
d89bc5e8f527f1212c3228a15775e222983c0087
https://github.com/akurniawan/jina-hub/tree/d89bc5e8f527f1212c3228a15775e222983c0087
import torch import numpy as np import torch.nn as nn import torch.nn.init from collections import OrderedDict def l2norm(X): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt() X = torch.div(X, norm) return X class Model(nn.Module): def __init__(self, im...
_ShiftedSoftPlus
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import math import torch import torch.jit import torch.nn.functional import torch.nn class _ShiftedSoftPlus(torch.nn.Module): """ Shifted softplus as defined in SchNet, NeurIPS 2017. :param beta: value for the a more general softplus, default = 1 :param threshold: values above are linear function, de...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.jit import torch.nn.functional import...
albertzhu01/nequip
_ShiftedSoftPlus
false
1,391
[ "MIT" ]
0
63ba41185e7852ebb6f68983ec30d1f569e43271
https://github.com/albertzhu01/nequip/tree/63ba41185e7852ebb6f68983ec30d1f569e43271
import math import torch import torch.jit import torch.nn.functional import torch.nn class Model(torch.nn.Module): """ Shifted softplus as defined in SchNet, NeurIPS 2017. :param beta: value for the a more general softplus, default = 1 :param threshold: values above are linear function, default = 20 ...
SinActv
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class SinActv(nn.Module): """The sin activation function. """ def __init__(self): """Initializer method. """ super().__init__() def forward(self, input_): return torch.sin(input_) def get_inputs(): return [torch.rand([4, 4, 4, ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert...
aksj98/neurodiffeq
SinActv
false
1,392
[ "MIT" ]
0
f0ee8ee1e78f2aeee97a4e1fc887ebe0b50749c8
https://github.com/aksj98/neurodiffeq/tree/f0ee8ee1e78f2aeee97a4e1fc887ebe0b50749c8
import torch import torch.nn as nn class Model(nn.Module): """The sin activation function. """ def __init__(self): """Initializer method. """ super().__init__() def forward(self, input_): return torch.sin(input_) def get_inputs(): return [torch.rand([4, 4, 4, 4]...
AlphaModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn from torch.nn.parameter import Parameter import torch.optim class AlphaModule(nn.Module): def __init__(self, shape): super(AlphaModule, self).__init__() if not isinstance(shape, tuple): shape = shape, self.alpha = Parameter(torch.rand(tuple([1...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.nn.parameter import Parameter import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_st...
albertozurli/mammoth
AlphaModule
false
1,393
[ "MIT" ]
0
849234afe084b4f707de5300e953a2a8c104ea36
https://github.com/albertozurli/mammoth/tree/849234afe084b4f707de5300e953a2a8c104ea36
import torch from torch import nn from torch.nn.parameter import Parameter import torch.optim class Model(nn.Module): def __init__(self, shape): super().__init__() if not isinstance(shape, tuple): shape = shape, self.alpha = Parameter(torch.rand(tuple([1] + list(shape))) * 0.1...
MultiHeadAttn
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.cuda from torch import nn import torch.nn.functional as F import torch.distributed import torch.optim class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1, pre_lnorm=False): super(MultiHeadAttn, self).__init__() self.n_...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
admariner/NeMo
MultiHeadAttn
false
1,394
[ "Apache-2.0" ]
0
e542d7f9063a40afa4119a3b94de4c2c636a37bb
https://github.com/admariner/NeMo/tree/e542d7f9063a40afa4119a3b94de4c2c636a37bb
import torch import torch.cuda from torch import nn import torch.nn.functional as F import torch.distributed import torch.optim class Model(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1, pre_lnorm=False): super().__init__() self.n_head = n_head self....
NormalizationLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.init class NormalizationLayer(torch.nn.Module): """Class for normalization layer.""" def __init__(self, normalize_scale=1.0, learn_scale=True): super(NormalizationLayer, self).__init__() self.norm_s = float(normalize_scale) if learn_scale: self...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_s...
akurniawan/jina-hub
NormalizationLayer
false
1,395
[ "Apache-2.0" ]
0
d89bc5e8f527f1212c3228a15775e222983c0087
https://github.com/akurniawan/jina-hub/tree/d89bc5e8f527f1212c3228a15775e222983c0087
import torch import torch.nn.init class Model(torch.nn.Module): """Class for normalization layer.""" def __init__(self, normalize_scale=1.0, learn_scale=True): super().__init__() self.norm_s = float(normalize_scale) if learn_scale: self.norm_s = torch.nn.Parameter(torch.Fl...
BesselBasis
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.jit import torch.nn.functional from torch import nn import torch.nn class BesselBasis(nn.Module): r_max: 'float' prefactor: 'float' def __init__(self, r_max, num_basis=8, trainable=True): """Radial Bessel Basis, as proposed in DimeNet: https://arxiv.org/abs/2...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.jit import torch.nn.functional from torch import...
albertzhu01/nequip
BesselBasis
false
1,396
[ "MIT" ]
0
63ba41185e7852ebb6f68983ec30d1f569e43271
https://github.com/albertzhu01/nequip/tree/63ba41185e7852ebb6f68983ec30d1f569e43271
import math import torch import torch.jit import torch.nn.functional from torch import nn import torch.nn class Model(nn.Module): r_max: 'float' prefactor: 'float' def __init__(self, r_max, num_basis=8, trainable=True): """Radial Bessel Basis, as proposed in DimeNet: https://arxiv.org/abs/2003.03...
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch.nn.modules.loss import _Loss class DiceLoss(_Loss): def __init__(self): super(DiceLoss, self).__init__() self.axis = 2, 3, 4 self.smooth = 1e-07 def forward(self, input, target): return 1.0 - self.dice_score(input, target) def dice_score(self, inp...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynam...
aledelmo/KDCompression
DiceLoss
false
1,397
[ "Apache-2.0" ]
0
030e7331f72ac8977964b6adb65d268c23d59130
https://github.com/aledelmo/KDCompression/tree/030e7331f72ac8977964b6adb65d268c23d59130
import torch from torch.nn.modules.loss import _Loss class Model(_Loss): def __init__(self): super().__init__() self.axis = 2, 3, 4 self.smooth = 1e-07 def forward(self, input, target): return 1.0 - self.dice_score(input, target) def dice_score(self, input, target): ...
AuxiliaryConvolutions
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn import torch.nn.functional as F from itertools import product as product import torch.optim import torch.utils.data class AuxiliaryConvolutions(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super(Auxilia...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from ite...
aarashfeizi/a-PyTorch-Tutorial-to-Object-Detection
AuxiliaryConvolutions
false
1,398
[ "MIT" ]
0
a9e1f3092d4b8c094bff5cd0897e0e3c1e0bc9c2
https://github.com/aarashfeizi/a-PyTorch-Tutorial-to-Object-Detection/tree/a9e1f3092d4b8c094bff5cd0897e0e3c1e0bc9c2
import torch from torch import nn import torch.nn.functional as F from itertools import product as product import torch.optim import torch.utils.data class Model(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super().__init__() se...
Recon_Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Recon_Block(nn.Module): def __init__(self, num_chans=64): super(Recon_Block, self).__init__() bias = True self.conv1 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride= 1, padding=1, bias=bias) self.relu2 = nn.PReLU() ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
albangossard/Course-inverse-problems-and-unrolled-networks
Recon_Block
false
1,399
[ "MIT" ]
0
0d4161c905149817e3abff9e70c101f36fac4270
https://github.com/albangossard/Course-inverse-problems-and-unrolled-networks/tree/0d4161c905149817e3abff9e70c101f36fac4270
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, num_chans=64): super().__init__() bias = True self.conv1 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride= 1, padding=1, bias=bias) self.relu2 = nn.PReLU() self.conv3 = nn.Conv...
Classification3DUnet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Classification3DUnet(nn.Module): def __init__(self, base_filters): super().__init__() self.conv = nn.Conv3d(in_channels=base_filters, out_channels=1, kernel_size=1, stride=1, padding=0) self.act = nn.Sigmoid() def forward(self, x):...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
aledelmo/KDCompression
Classification3DUnet
false
1,400
[ "Apache-2.0" ]
0
030e7331f72ac8977964b6adb65d268c23d59130
https://github.com/aledelmo/KDCompression/tree/030e7331f72ac8977964b6adb65d268c23d59130
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, base_filters): super().__init__() self.conv = nn.Conv3d(in_channels=base_filters, out_channels=1, kernel_size=1, stride=1, padding=0) self.act = nn.Sigmoid() def forward(self, x): conv_c...
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from math import * def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, a...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np ...
albimc/deep-reinforcement-learning
Actor
false
1,401
[ "MIT" ]
0
e11a6c9d4c8991cf229e686b645ae22ec4cff4f5
https://github.com/albimc/deep-reinforcement-learning/tree/e11a6c9d4c8991cf229e686b645ae22ec4cff4f5
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from math import * def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Model(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, a...
Policy
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F from math import * from torch.distributions import Categorical class Policy(nn.Module): def __init__(self, s_size=4, h_size=16, a_size=2): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.L...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
albimc/deep-reinforcement-learning
Policy
false
1,402
[ "MIT" ]
0
e11a6c9d4c8991cf229e686b645ae22ec4cff4f5
https://github.com/albimc/deep-reinforcement-learning/tree/e11a6c9d4c8991cf229e686b645ae22ec4cff4f5
import torch import torch.nn as nn import torch.nn.functional as F from math import * from torch.distributions import Categorical class Model(nn.Module): def __init__(self, s_size=4, h_size=16, a_size=2): super().__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size,...
MINCNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd class MINCNet(nn.Module): def __init__(self): super(MINCNet, self).__init__() self.ReL...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from ...
achrefjarray/ESRGANplus-master
MINCNet
false
1,403
[ "Apache-2.0" ]
0
ba470ec5c565a6dc8b48575b1e185ef6b796aec6
https://github.com/achrefjarray/ESRGANplus-master/tree/ba470ec5c565a6dc8b48575b1e185ef6b796aec6
import torch import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd class Model(nn.Module): def __init__(self): super().__init__() self.ReLU = nn.ReLU(Tru...
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn import torch.nn.functional as F class FocalLoss(nn.Module): def __init__(self, alpha=0.25, gamma=2, with_logits=True, reduction: 'str'='mean'): """ https://github.com/mathiaszinnen/focal_loss_torch/blob/main/focal_loss/focal_loss.py https://arxiv....
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn a...
alexander-soare/PyTorch-Custom
FocalLoss
false
1,404
[ "Apache-2.0" ]
0
f4f9865f960806f7e05d55ea259e861ee2d7c6dc
https://github.com/alexander-soare/PyTorch-Custom/tree/f4f9865f960806f7e05d55ea259e861ee2d7c6dc
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, alpha=0.25, gamma=2, with_logits=True, reduction: 'str'='mean'): """ https://github.com/mathiaszinnen/focal_loss_torch/blob/main/focal_loss/focal_loss.py https://arxiv.org/...
Foo
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed class Foo(torch.nn.Module): def __init__(self, size): super(Foo, self).__init__() self.n = torch.nn.Parameter(torch.ones(size)) self.m = torch.nn...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed assert_si...
alexshuang/apex
Foo
false
1,405
[ "BSD-3-Clause" ]
0
107f1ff569c40769de2ed8d366126282e63b63ce
https://github.com/alexshuang/apex/tree/107f1ff569c40769de2ed8d366126282e63b63ce
import torch import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed class Model(torch.nn.Module): def __init__(self, size): super().__init__() self.n = torch.nn.Parameter(torch.ones(size)) self.m = torch.nn.Parame...
Conv1DSame
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn class Conv1DSame(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, groups=1, bias=True, stride=1): super(Conv1DSame, self).__init__() p = (kernel_size - 1) / 2 self.padding = nn.ConstantPad1d((math.floor(p), math....
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.a...
alexchartrand/IoT
Conv1DSame
false
1,406
[ "MIT" ]
0
2cc0d40b7f8305b9f82fc83ad4ed55c83efa1bfd
https://github.com/alexchartrand/IoT/tree/2cc0d40b7f8305b9f82fc83ad4ed55c83efa1bfd
import math import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, groups=1, bias=True, stride=1): super().__init__() p = (kernel_size - 1) / 2 self.padding = nn.ConstantPad1d((math.floor(p), math.ceil(p)), 0.0) ...
GE2ELoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F def calc_loss(sim_matrix): same_idx = list(range(sim_matrix.size(0))) pos = sim_matrix[same_idx, :, same_idx] neg = (torch.exp(sim_matrix).sum(dim=2) + 1e-06).log_() per_embedding_loss = -1 * (pos - neg) loss = per_embedding_loss.s...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
akuz91/SpeakerVerificationEmbedding
GE2ELoss
false
1,407
[ "BSD-3-Clause" ]
0
461d10c2bc34e70f3eb2798bcae803d2ca00f16b
https://github.com/akuz91/SpeakerVerificationEmbedding/tree/461d10c2bc34e70f3eb2798bcae803d2ca00f16b
import torch import torch.nn as nn import torch.nn.functional as F def calc_loss(sim_matrix): same_idx = list(range(sim_matrix.size(0))) pos = sim_matrix[same_idx, :, same_idx] neg = (torch.exp(sim_matrix).sum(dim=2) + 1e-06).log_() per_embedding_loss = -1 * (pos - neg) loss = per_embedding_loss.s...
BCELabelSmoothingLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn import torch.nn.functional as F class BCELabelSmoothingLoss(nn.Module): """ Binary Cross Entropy Loss with label smoothing, takes logits """ def __init__(self, smoothing): """ `smoothing` is the smoothing factor. How much less confident than 100% ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch ...
alexander-soare/PyTorch-Custom
BCELabelSmoothingLoss
false
1,408
[ "Apache-2.0" ]
0
f4f9865f960806f7e05d55ea259e861ee2d7c6dc
https://github.com/alexander-soare/PyTorch-Custom/tree/f4f9865f960806f7e05d55ea259e861ee2d7c6dc
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): """ Binary Cross Entropy Loss with label smoothing, takes logits """ def __init__(self, smoothing): """ `smoothing` is the smoothing factor. How much less confident than 100% are we on t...
TSAFusion
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd class TSAFusion(nn.Module): """Temporal Spatial Attention (TSA) fusion module. Temporal: Calculate...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from ...
achrefjarray/ESRGANplus-master
TSAFusion
false
1,409
[ "Apache-2.0" ]
0
ba470ec5c565a6dc8b48575b1e185ef6b796aec6
https://github.com/achrefjarray/ESRGANplus-master/tree/ba470ec5c565a6dc8b48575b1e185ef6b796aec6
import torch import torch.utils.data from torch.utils import data as data import torch.nn as nn from torch.nn import init as init from torchvision.models import vgg as vgg from torch import autograd as autograd class Model(nn.Module): """Temporal Spatial Attention (TSA) fusion module. Temporal: Calculate the...
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from math import * def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn from math import * assert_size_stride =...
albimc/deep-reinforcement-learning
Critic
false
1,410
[ "MIT" ]
0
e11a6c9d4c8991cf229e686b645ae22ec4cff4f5
https://github.com/albimc/deep-reinforcement-learning/tree/e11a6c9d4c8991cf229e686b645ae22ec4cff4f5
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from math import * def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Model(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, a...
DownSampleConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class DownSampleConv(nn.Module): def __init__(self, in_feature, out_feature, kernel): super(DownSampleConv, self).__init__() self.conv = nn.Conv1d(in_feature, out_feature, kernel_size=kernel, stride=2, padding=kernel // 2, groups=in_feature) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
alexchartrand/IoT
DownSampleConv
false
1,411
[ "MIT" ]
0
2cc0d40b7f8305b9f82fc83ad4ed55c83efa1bfd
https://github.com/alexchartrand/IoT/tree/2cc0d40b7f8305b9f82fc83ad4ed55c83efa1bfd
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_feature, out_feature, kernel): super().__init__() self.conv = nn.Conv1d(in_feature, out_feature, kernel_size=kernel, stride=2, padding=kernel // 2, groups=in_feature) self._init_weigth() def ...
Conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class Conv(nn.Module): """ Convenience class that does padding and convolution for inputs in the format [batch_size, sequence length, hidden size] """ def __init__(self, input_size, output_size, kernel_size, pad_type): """ ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dyn...
alifkurniawan/tesis
Conv
false
1,412
[ "MIT" ]
0
6330dba32f5dc12785e956875c94d83344d788a8
https://github.com/alifkurniawan/tesis/tree/6330dba32f5dc12785e956875c94d83344d788a8
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """ Convenience class that does padding and convolution for inputs in the format [batch_size, sequence length, hidden size] """ def __init__(self, input_size, output_size, kernel_size, pad_type): """ ...
TorchModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn class TorchLinearModule(torch.nn.Module): def __init__(self, in_size, out_size): super(TorchLinearModule, self).__init__() self._linear = torch.nn.Linear(in_size, out_size) def forward(self, x): return self._linear(x) class TorchModule(torch.nn.Module):...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn ass...
alierenak/ivy
TorchModule
false
1,413
[ "Apache-2.0" ]
0
6e91bae159101abbac904a0dd37d0f59daaa75e3
https://github.com/alierenak/ivy/tree/6e91bae159101abbac904a0dd37d0f59daaa75e3
import torch import torch.nn class TorchLinearModule(torch.nn.Module): def __init__(self, in_size, out_size): super().__init__() self._linear = torch.nn.Linear(in_size, out_size) def forward(self, x): return self._linear(x) class Model(torch.nn.Module): def __init__(self, in_s...
PSNRLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn from torch.nn.functional import mse_loss def psnr_loss(input: 'torch.Tensor', target: 'torch.Tensor', max_val: 'float' ) ->torch.Tensor: """Function that computes PSNR See :class:`~kornia.losses.PSNRLoss` for details. """ if not torch.is_tensor(input) or not tor...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from t...
alopezgit/kornia
PSNRLoss
false
1,414
[ "ECL-2.0", "Apache-2.0" ]
0
7bf47ae472012d2d6cf24463a76e8089daa65d0b
https://github.com/alopezgit/kornia/tree/7bf47ae472012d2d6cf24463a76e8089daa65d0b
import torch import torch.nn as nn from torch.nn.functional import mse_loss def psnr_loss(input: 'torch.Tensor', target: 'torch.Tensor', max_val: 'float' ) ->torch.Tensor: """Function that computes PSNR See :class:`~kornia.losses.PSNRLoss` for details. """ if not torch.is_tensor(input) or not tor...
LayerNormalization
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class LayerNormalization(nn.Module): def __init__(self, d_hid, eps=1e-06): super(LayerNormalization, self).__init__() self.gamma = nn.Parameter(torch.ones(d_hid)) self.beta = nn.Parameter(torch.zeros(d_hid)) self.eps = eps def forward(self, ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
alisure-fork/CONTA
LayerNormalization
false
1,415
[ "MIT" ]
0
dde3e5083f45598d859dde889de3ae85c7a416e9
https://github.com/alisure-fork/CONTA/tree/dde3e5083f45598d859dde889de3ae85c7a416e9
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, d_hid, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_hid)) self.beta = nn.Parameter(torch.zeros(d_hid)) self.eps = eps def forward(self, z): mean = z.mean(dim=-1, kee...
BoxFilter
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torchvision.transforms import functional as F from torch import nn from torch.nn import functional as F class BoxFilter(nn.Module): def __init__(self, r): super(BoxFilter, self).__init__() self.r = r def forward(self, x): kernel_size = 2 * self.r + 1 kernel_...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_st...
allen0125/RobustVideoMatting
BoxFilter
false
1,416
[ "Apache-2.0" ]
0
c0f17ca45a9de7586c570753064187200dec487a
https://github.com/allen0125/RobustVideoMatting/tree/c0f17ca45a9de7586c570753064187200dec487a
import torch from torchvision.transforms import functional as F from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, r): super().__init__() self.r = r def forward(self, x): kernel_size = 2 * self.r + 1 kernel_x = torch.full((x.d...
ScModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch as t import torch.nn as nn from torch.nn.parameter import Parameter class ScModel(nn.Module): """ Model for singel cell data """ def __init__(self, n_genes: 'int', n_celltypes: 'int', device: 't.device' ) ->None: super().__init__() self.K = n_celltypes ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
almaan/STereoSCope
ScModel
false
1,417
[ "MIT" ]
0
8f6a2021b6cb73aecda14f6bbbd25e26bfc9301a
https://github.com/almaan/STereoSCope/tree/8f6a2021b6cb73aecda14f6bbbd25e26bfc9301a
import torch import torch as t import torch.nn as nn from torch.nn.parameter import Parameter class Model(nn.Module): """ Model for singel cell data """ def __init__(self, n_genes: 'int', n_celltypes: 'int', device: 't.device' ) ->None: super().__init__() self.K = n_celltypes ...
PoswiseFeedForwardNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class LayerNormalization(nn.Module): def __init__(self, d_hid, eps=1e-06): super(LayerNormalization, self).__init__() self.gamma = nn.Parameter(torch.ones(d_hid)) self.beta = nn.Parameter(torch.zeros(d_hid)) self.eps = eps def forward(self, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
alisure-fork/CONTA
PoswiseFeedForwardNet
false
1,418
[ "MIT" ]
0
dde3e5083f45598d859dde889de3ae85c7a416e9
https://github.com/alisure-fork/CONTA/tree/dde3e5083f45598d859dde889de3ae85c7a416e9
import torch import torch.nn as nn class LayerNormalization(nn.Module): def __init__(self, d_hid, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_hid)) self.beta = nn.Parameter(torch.zeros(d_hid)) self.eps = eps def forward(self, z): mean = z.mea...
Pool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class crop(nn.Module): def __init__(self): super().__init__() def forward(self, x): N, C, H, W = x.shape x = x[0:N, 0:C, 0:H - 1, 0:W] return x class shift(nn.Module): def __init__(self): super().__init__() self.shift_...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride emp...
amonod/udvd
Pool
false
1,419
[ "MIT" ]
0
a1ccb777d205255ac68c40efb93dd3996f562c45
https://github.com/amonod/udvd/tree/a1ccb777d205255ac68c40efb93dd3996f562c45
import torch import torch.nn as nn class crop(nn.Module): def __init__(self): super().__init__() def forward(self, x): N, C, H, W = x.shape x = x[0:N, 0:C, 0:H - 1, 0:W] return x class shift(nn.Module): def __init__(self): super().__init__() self.shift_...
FastGuidedFilter
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torchvision.transforms import functional as F from torch import nn from torch.nn import functional as F class BoxFilter(nn.Module): def __init__(self, r): super(BoxFilter, self).__init__() self.r = r def forward(self, x): kernel_size = 2 * self.r + 1 kernel_...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torchvision.transforms i...
allen0125/RobustVideoMatting
FastGuidedFilter
false
1,420
[ "Apache-2.0" ]
0
c0f17ca45a9de7586c570753064187200dec487a
https://github.com/allen0125/RobustVideoMatting/tree/c0f17ca45a9de7586c570753064187200dec487a
import torch from torchvision.transforms import functional as F from torch import nn from torch.nn import functional as F class BoxFilter(nn.Module): def __init__(self, r): super().__init__() self.r = r def forward(self, x): kernel_size = 2 * self.r + 1 kernel_x = torch.full(...
MaxMarginCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class MaxMarginCriterion(nn.Module): def __init__(self, visual_rank_weight, lang_rank_weight, margin): super(MaxMarginCriterion, self).__init__() self.visual_rank = visual_rank_weight > 0 self.lang_rank = lang_rank_weight > 0 self.visual_rank_wei...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride emp...
andfoy/MAttNet
MaxMarginCriterion
false
1,421
[ "MIT" ]
0
defa58649951ab8f6a7dcca25475e91f5e53ffcf
https://github.com/andfoy/MAttNet/tree/defa58649951ab8f6a7dcca25475e91f5e53ffcf
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, visual_rank_weight, lang_rank_weight, margin): super().__init__() self.visual_rank = visual_rank_weight > 0 self.lang_rank = lang_rank_weight > 0 self.visual_rank_weight = visual_rank_weight self...
SoftTree
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np class SoftTree(torch.nn.Module): """Soft decision tree.""" def __init__(self, in_features, out_features, depth, projection= 'constant', dropout=0.0): super(SoftTree, self).__init__() self.proj = projection self.depth = depth self.in_feat...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stri...
alper111/symbol-emergence
SoftTree
false
1,422
[ "MIT" ]
0
a4abd5d26b6fb36fe1ab3d6304a257df29be8e2c
https://github.com/alper111/symbol-emergence/tree/a4abd5d26b6fb36fe1ab3d6304a257df29be8e2c
import torch import numpy as np class Model(torch.nn.Module): """Soft decision tree.""" def __init__(self, in_features, out_features, depth, projection= 'constant', dropout=0.0): super().__init__() self.proj = projection self.depth = depth self.in_features = in_feature...
RobertaClassificationHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
MCplayerFromPRC/CodeT5
RobertaClassificationHead
false
1,423
[ "BSD-3-Clause" ]
0
726ed4a4221be125b6083439667573ef5fef6984
https://github.com/MCplayerFromPRC/CodeT5/tree/726ed4a4221be125b6083439667573ef5fef6984
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Model(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size) self.out_proj ...
FractionProposalModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn from torch.nn import functional as F class FractionProposalModel(nn.Module): def __init__(self, in_dim, out_dim): super(FractionProposalModel, self).__init__() self.in_dim = in_dim self.out_dim = out_dim self.layer = nn.Linear(self.in_dim, self.ou...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
alirezakazemipour/Distributional-RL
FractionProposalModel
false
1,424
[ "MIT" ]
0
a3de3a1707bdd57a420f85c6d64a3fb84fb075af
https://github.com/alirezakazemipour/Distributional-RL/tree/a3de3a1707bdd57a420f85c6d64a3fb84fb075af
import torch from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.layer = nn.Linear(self.in_dim, self.out_dim) nn.init.xavier_uniform_(self...
unrotate
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class unrotate(nn.Module): def __init__(self): super().__init__() def forward(self, x): x0, x90, x180, x270 = torch.chunk(x, 4, dim=0) x90 = x90.transpose(2, 3).flip(2) x180 = x180.flip(2).flip(3) x270 = x270.transpose(2, 3).flip(3) ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_st...
amonod/udvd
unrotate
false
1,425
[ "MIT" ]
0
a1ccb777d205255ac68c40efb93dd3996f562c45
https://github.com/amonod/udvd/tree/a1ccb777d205255ac68c40efb93dd3996f562c45
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): x0, x90, x180, x270 = torch.chunk(x, 4, dim=0) x90 = x90.transpose(2, 3).flip(2) x180 = x180.flip(2).flip(3) x270 = x270.transpose(2, 3).flip(3) ...
SEModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class SEModule(nn.Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
amajidsinar/seer
SEModule
false
1,426
[ "Apache-2.0" ]
0
35f25b3fbf22968f0b09c266b8fd66a44fcc4d9c
https://github.com/amajidsinar/seer/tree/35f25b3fbf22968f0b09c266b8fd66a44fcc4d9c
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, channels, reduction): super().__init__() self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // red...
MNIST_CNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class MNIST_CNN(nn.Module): """ Hand-tuned architecture for MNIST. Weirdness I've noticed so far with this architecture: - adding a linear layer after the mean-pool in features hurts RotatedMNIST-100 gen...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
alceubissoto/DomainBed
MNIST_CNN
false
1,427
[ "MIT" ]
0
80d54050f52fb5349e2a47c0674046e6d0674f3d
https://github.com/alceubissoto/DomainBed/tree/80d54050f52fb5349e2a47c0674046e6d0674f3d
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Model(nn.Module): """ Hand-tuned architecture for MNIST. Weirdness I've noticed so far with this architecture: - adding a linear layer after the mean-pool in features hurts RotatedMNIST-100 general...
shift
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class crop(nn.Module): def __init__(self): super().__init__() def forward(self, x): N, C, H, W = x.shape x = x[0:N, 0:C, 0:H - 1, 0:W] return x class shift(nn.Module): def __init__(self): super().__init__() self.shift_...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_st...
amonod/udvd
shift
false
1,428
[ "MIT" ]
0
a1ccb777d205255ac68c40efb93dd3996f562c45
https://github.com/amonod/udvd/tree/a1ccb777d205255ac68c40efb93dd3996f562c45
import torch import torch.nn as nn class crop(nn.Module): def __init__(self): super().__init__() def forward(self, x): N, C, H, W = x.shape x = x[0:N, 0:C, 0:H - 1, 0:W] return x class Model(nn.Module): def __init__(self): super().__init__() self.shift_...
rotate
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class rotate(nn.Module): def __init__(self): super().__init__() def forward(self, x): x90 = x.transpose(2, 3).flip(3) x180 = x.flip(2).flip(3) x270 = x.transpose(2, 3).flip(2) x = torch.cat((x, x90, x180, x270), dim=0) return...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_st...
amonod/udvd
rotate
false
1,429
[ "MIT" ]
0
a1ccb777d205255ac68c40efb93dd3996f562c45
https://github.com/amonod/udvd/tree/a1ccb777d205255ac68c40efb93dd3996f562c45
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): x90 = x.transpose(2, 3).flip(3) x180 = x.flip(2).flip(3) x270 = x.transpose(2, 3).flip(2) x = torch.cat((x, x90, x180, x270), dim=0) return ...
CmapPafHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data import torch.nn import torch.optim class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels els...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn import torch.optim assert_size_stride = ...
andreiday/trt_pose_motion_tracking_robot_arm
CmapPafHead
false
1,430
[ "MIT" ]
0
4ada47f6f12e18ce14ee53a22540e02923745dd8
https://github.com/andreiday/trt_pose_motion_tracking_robot_arm/tree/4ada47f6f12e18ce14ee53a22540e02923745dd8
import torch import torch.utils.data import torch.nn import torch.optim class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels els...
MAP_Linear_Layer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn class MAP_Linear_Layer(nn.Module): def __init__(self, n_input, n_output): super(MAP_Linear_Layer, self).__init__() self.weight = nn.Parameter(torch.Tensor(n_input, n_output).normal_( 0, 1 / np.sqrt(4 * n_output))) self.bias...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.g...
andrewfoongyk/cs230-code-examples
MAP_Linear_Layer
false
1,431
[ "MIT" ]
0
8e12aa3414bdada6ec6002bedf919a6816ba237c
https://github.com/andrewfoongyk/cs230-code-examples/tree/8e12aa3414bdada6ec6002bedf919a6816ba237c
import torch import numpy as np import torch.nn as nn class Model(nn.Module): def __init__(self, n_input, n_output): super().__init__() self.weight = nn.Parameter(torch.Tensor(n_input, n_output).normal_( 0, 1 / np.sqrt(4 * n_output))) self.bias = nn.Parameter(torch.Tensor(n_ou...
LossD
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn from torch.nn import functional as F class LossD(nn.Module): def __init__(self, gpu=None): super(LossD, self).__init__() self.gpu = gpu if gpu is not None: self def forward(self, r_x, r_x_hat): if self.gpu is not None: ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride emp...
andy6804tw/talking-hands-API
LossD
false
1,432
[ "MIT" ]
0
4895c980565082b0fdcabbc704ee871855e6d5f5
https://github.com/andy6804tw/talking-hands-API/tree/4895c980565082b0fdcabbc704ee871855e6d5f5
import torch import torch.nn as nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, gpu=None): super().__init__() self.gpu = gpu if gpu is not None: self def forward(self, r_x, r_x_hat): if self.gpu is not None: r_x = r_x...
CompActor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch class CompActor(torch.nn.Module): def __init__(self, state_dim: 'int', hidden_dim: 'int', action_dim: 'int'): super(CompActor, self).__init__() self.fc1 = torch.nn.Linear(state_dim, hidden_dim) self.fc2 = torch.nn.Linear(hidden_dim, hidden_dim) self.fc3 = torch.nn.Lin...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C...
andreabradpitto/turtlex
CompActor
false
1,433
[ "Apache-2.0" ]
0
37a2315450f896d10dcb9ebc8968207e476dcf82
https://github.com/andreabradpitto/turtlex/tree/37a2315450f896d10dcb9ebc8968207e476dcf82
import torch class Model(torch.nn.Module): def __init__(self, state_dim: 'int', hidden_dim: 'int', action_dim: 'int'): super().__init__() self.fc1 = torch.nn.Linear(state_dim, hidden_dim) self.fc2 = torch.nn.Linear(hidden_dim, hidden_dim) self.fc3 = torch.nn.Linear(hidden_dim, act...
Conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class crop(nn.Module): def __init__(self): super().__init__() def forward(self, x): N, C, H, W = x.shape x = x[0:N, 0:C, 0:H - 1, 0:W] return x class Conv(nn.Module): def __init__(self, in_channels, out_channels, bias=False, blind=Tru...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
amonod/udvd
Conv
false
1,434
[ "MIT" ]
0
a1ccb777d205255ac68c40efb93dd3996f562c45
https://github.com/amonod/udvd/tree/a1ccb777d205255ac68c40efb93dd3996f562c45
import torch import torch.nn as nn class crop(nn.Module): def __init__(self): super().__init__() def forward(self, x): N, C, H, W = x.shape x = x[0:N, 0:C, 0:H - 1, 0:W] return x class Model(nn.Module): def __init__(self, in_channels, out_channels, bias=False, blind=Tr...
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn import torch.optim as optim from collections import OrderedDict class MLP(nn.Module): def __init__(self, input_size, output_size): super(MLP, self).__init__() self.input_size = input_size self.output_size = output_size self.mlp...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import tor...
andreasbinder/Stochastic-Graph-assisted-Genre-Classification
MLP
false
1,435
[ "MIT" ]
0
78752716030466f02424dcf1cbe5a66d756a13c4
https://github.com/andreasbinder/Stochastic-Graph-assisted-Genre-Classification/tree/78752716030466f02424dcf1cbe5a66d756a13c4
import torch import numpy as np import torch.nn as nn import torch.optim as optim from collections import OrderedDict class Model(nn.Module): def __init__(self, input_size, output_size): super().__init__() self.input_size = input_size self.output_size = output_size self.mlp = nn.S...
N3
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils from typing import Tuple from abc import ABC from abc import abstractmethod import torch.utils.data class Regularizer(nn.Module, ABC): @abstractmethod def forward(self, factors: 'Tuple[torch.Tensor]'): pass class N3(Regularizer): def __init...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils from typing import Tuple from ab...
angusl95/darts-kbc
N3
false
1,436
[ "Apache-2.0" ]
0
85fc6f4bdb7ba73c07d96ce47e96634599b346f9
https://github.com/angusl95/darts-kbc/tree/85fc6f4bdb7ba73c07d96ce47e96634599b346f9
import torch import torch.nn as nn import torch.utils from typing import Tuple from abc import ABC from abc import abstractmethod import torch.utils.data class Regularizer(nn.Module, ABC): @abstractmethod def forward(self, factors: 'Tuple[torch.Tensor]'): pass class Model(Regularizer): def __i...
Stack
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.utils import torch.utils.data class Stack(torch.nn.Module): def __init__(self, repeats): super(Stack, self).__init__() self.repeats = repeats def forward(self, x): x = torch.repeat_interleave(x, self.repeats, dim=1) return x def get_inputs(): r...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dy...
angusl95/darts-kbc
Stack
false
1,437
[ "Apache-2.0" ]
0
85fc6f4bdb7ba73c07d96ce47e96634599b346f9
https://github.com/angusl95/darts-kbc/tree/85fc6f4bdb7ba73c07d96ce47e96634599b346f9
import torch import torch.utils import torch.utils.data class Model(torch.nn.Module): def __init__(self, repeats): super().__init__() self.repeats = repeats def forward(self, x): x = torch.repeat_interleave(x, self.repeats, dim=1) return x def get_inputs(): return [torc...
StatsNet
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class StatsNet(nn.Module): def __init__(self): super(StatsNet, self).__init__() def forward(self, x): x = x.view(x.data.shape[0], x.data.shape[1], x.data.shape[2] * x. data.shape[3]) mean = torch.mean(x, 2) std = torch.std(x, 2) ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
angelicagardner/ensemble-cnn-deepfakes-detection
StatsNet
false
1,438
[ "BSD-3-Clause" ]
0
8740d2317848250249c741e0af5c4cbbe2d8af46
https://github.com/angelicagardner/ensemble-cnn-deepfakes-detection/tree/8740d2317848250249c741e0af5c4cbbe2d8af46
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): x = x.view(x.data.shape[0], x.data.shape[1], x.data.shape[2] * x. data.shape[3]) mean = torch.mean(x, 2) std = torch.std(x, 2) return to...
AdaIn
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class AdaIn(nn.Module): def __init__(self): super(AdaIn, self).__init__() self.eps = 1e-05 def forward(self, x, mean_style, std_style): B, C, H, W = x.shape feature = x.view(B, C, -1) std_feat = (torch.std(feature, dim=2) + self.eps)...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
andy6804tw/talking-hands-API
AdaIn
false
1,439
[ "MIT" ]
0
4895c980565082b0fdcabbc704ee871855e6d5f5
https://github.com/andy6804tw/talking-hands-API/tree/4895c980565082b0fdcabbc704ee871855e6d5f5
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.eps = 1e-05 def forward(self, x, mean_style, std_style): B, C, H, W = x.shape feature = x.view(B, C, -1) std_feat = (torch.std(feature, dim=2) + self.eps).view(B, C,...
DEC_Conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class crop(nn.Module): def __init__(self): super().__init__() def forward(self, x): N, C, H, W = x.shape x = x[0:N, 0:C, 0:H - 1, 0:W] return x class Conv(nn.Module): def __init__(self, in_channels, out...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
amonod/udvd
DEC_Conv
false
1,440
[ "MIT" ]
0
a1ccb777d205255ac68c40efb93dd3996f562c45
https://github.com/amonod/udvd/tree/a1ccb777d205255ac68c40efb93dd3996f562c45
import torch import torch.nn as nn import torch.nn.functional as F class crop(nn.Module): def __init__(self): super().__init__() def forward(self, x): N, C, H, W = x.shape x = x[0:N, 0:C, 0:H - 1, 0:W] return x class Conv(nn.Module): def __init__(self, in_channels, out...
ENC_Conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class crop(nn.Module): def __init__(self): super().__init__() def forward(self, x): N, C, H, W = x.shape x = x[0:N, 0:C, 0:H - 1, 0:W] return x class shift(nn.Module): def __init__(self): super().__init__() self.shift_...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
amonod/udvd
ENC_Conv
false
1,441
[ "MIT" ]
0
a1ccb777d205255ac68c40efb93dd3996f562c45
https://github.com/amonod/udvd/tree/a1ccb777d205255ac68c40efb93dd3996f562c45
import torch import torch.nn as nn class crop(nn.Module): def __init__(self): super().__init__() def forward(self, x): N, C, H, W = x.shape x = x[0:N, 0:C, 0:H - 1, 0:W] return x class shift(nn.Module): def __init__(self): super().__init__() self.shift_...
N2
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils from typing import Tuple from abc import ABC from abc import abstractmethod import torch.utils.data class Regularizer(nn.Module, ABC): @abstractmethod def forward(self, factors: 'Tuple[torch.Tensor]'): pass class N2(Regularizer): def __init...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils from typing import Tuple from abc impo...
angusl95/darts-kbc
N2
false
1,442
[ "Apache-2.0" ]
0
85fc6f4bdb7ba73c07d96ce47e96634599b346f9
https://github.com/angusl95/darts-kbc/tree/85fc6f4bdb7ba73c07d96ce47e96634599b346f9
import torch import torch.nn as nn import torch.utils from typing import Tuple from abc import ABC from abc import abstractmethod import torch.utils.data class Regularizer(nn.Module, ABC): @abstractmethod def forward(self, factors: 'Tuple[torch.Tensor]'): pass class Model(Regularizer): def __i...
_Residual_Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class _Residual_Block(nn.Module): def __init__(self, num_chans=64): super(_Residual_Block, self).__init__() bias = True self.conv1 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride= 1, padding=1, bias=bias) self.relu2 = nn.PReLU...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
albangossard/Course-inverse-problems-and-unrolled-networks
_Residual_Block
false
1,443
[ "MIT" ]
0
0d4161c905149817e3abff9e70c101f36fac4270
https://github.com/albangossard/Course-inverse-problems-and-unrolled-networks/tree/0d4161c905149817e3abff9e70c101f36fac4270
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, num_chans=64): super().__init__() bias = True self.conv1 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride= 1, padding=1, bias=bias) self.relu2 = nn.PReLU() self.conv3 = nn.Conv...
PredictionConvolutions
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn from itertools import product as product import torch.optim import torch.utils.data class PredictionConvolutions(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicte...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from itertools import product as product import torch.optim...
aarashfeizi/a-PyTorch-Tutorial-to-Object-Detection
PredictionConvolutions
false
1,444
[ "MIT" ]
0
a9e1f3092d4b8c094bff5cd0897e0e3c1e0bc9c2
https://github.com/aarashfeizi/a-PyTorch-Tutorial-to-Object-Detection/tree/a9e1f3092d4b8c094bff5cd0897e0e3c1e0bc9c2
import torch from torch import nn from itertools import product as product import torch.optim import torch.utils.data class Model(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicted as encoded offs...
residualUnit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init import torch.nn.init class conv23DUnit(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, bias=True, dilation=1, nd=2): super(conv2...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
andry900/NN-Project
residualUnit
false
1,445
[ "MIT" ]
0
e04a83029f5990d9b65216ab0648a8826a8ebca7
https://github.com/andry900/NN-Project/tree/e04a83029f5990d9b65216ab0648a8826a8ebca7
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init import torch.nn.init class conv23DUnit(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, bias=True, dilation=1, nd=2): super().__i...
ContrastiveLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class ContrastiveLoss(nn.Module): def __init__(self, margin=2.0): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pair...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import...
ani0075/learning-pytorch
ContrastiveLoss
false
1,446
[ "MIT" ]
0
c50e4126751821075e94c4cc3950dd8780370fce
https://github.com/ani0075/learning-pytorch/tree/c50e4126751821075e94c4cc3950dd8780370fce
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class Model(nn.Module): def __init__(self, margin=2.0): super().__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2)...
h_swish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.utils.data import torch.nn.functional as F import torch.nn as nn class h_swish(nn.Module): def __init__(self, inplace=False): super(h_swish, self).__init__() self.inplace = inplace def forward(self, x): return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guard...
anonymous2022ijcai/RGSL
h_swish
false
1,447
[ "MIT" ]
0
11c38ee50d50127c0f7c2a137bdb21ca5f7f3644
https://github.com/anonymous2022ijcai/RGSL/tree/11c38ee50d50127c0f7c2a137bdb21ca5f7f3644
import torch import torch.utils.data import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self, inplace=False): super().__init__() self.inplace = inplace def forward(self, x): return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0 def get_inpu...
CmapPafHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data import torch.nn import torch.optim class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels els...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils....
andreiday/trt_pose_motion_tracking_robot_arm
CmapPafHeadAttention
false
1,448
[ "MIT" ]
0
4ada47f6f12e18ce14ee53a22540e02923745dd8
https://github.com/andreiday/trt_pose_motion_tracking_robot_arm/tree/4ada47f6f12e18ce14ee53a22540e02923745dd8
import torch import torch.utils.data import torch.nn import torch.optim class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels els...
h_sigmoid
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.utils.data import torch.nn as nn class h_sigmoid(nn.Module): def __init__(self, inplace=True, h_max=1): super(h_sigmoid, self).__init__() self.relu = nn.ReLU6(inplace=inplace) self.h_max = h_max def forward(self, x): return self.relu(x + 3) * self.h_...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guard...
anonymous2022ijcai/RGSL
h_sigmoid
false
1,449
[ "MIT" ]
0
11c38ee50d50127c0f7c2a137bdb21ca5f7f3644
https://github.com/anonymous2022ijcai/RGSL/tree/11c38ee50d50127c0f7c2a137bdb21ca5f7f3644
import torch import torch.utils.data import torch.nn as nn class Model(nn.Module): def __init__(self, inplace=True, h_max=1): super().__init__() self.relu = nn.ReLU6(inplace=inplace) self.h_max = h_max def forward(self, x): return self.relu(x + 3) * self.h_max / 6 def get_i...