entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
SplitAndConcat
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.quantization.quantize_fx import torch.utils.data class SplitAndConcat(nn.Module): """Split the data from split_dim and concatenate in concat_dim. @param split_dim from which axis the data will be chunk @param concat_dim to which axis the data will be concat...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.quantization.quantize_fx import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size...
JacobSzwejbka/d2go
SplitAndConcat
false
618
[ "Apache-2.0" ]
0
d86ecc92eb97f14fcd97d626185f61c6817351e4
https://github.com/JacobSzwejbka/d2go/tree/d86ecc92eb97f14fcd97d626185f61c6817351e4
import torch import torch.nn as nn import torch.quantization.quantize_fx import torch.utils.data class Model(nn.Module): """Split the data from split_dim and concatenate in concat_dim. @param split_dim from which axis the data will be chunk @param concat_dim to which axis the data will be concatenated ...
ResidualBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class ResidualBlock(nn.Module): def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.prelu = nn.PReLU() self.conv2 = nn.Conv2d(channe...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dyn...
JaguAroo/SRResCGAN
ResidualBlock
false
619
[ "MIT" ]
0
9aac612aff631f7fb9142e0a36de9559cfc1a62d
https://github.com/JaguAroo/SRResCGAN/tree/9aac612aff631f7fb9142e0a36de9559cfc1a62d
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, channels): super().__init__() self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.prelu = nn.PReLU() self.conv2 = nn.Conv2d(channels, channels, kernel_size=3...
SigmoidFocalClassificationLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class SigmoidFocalClassificationLoss(nn.Module): """ Sigmoid focal cross entropy loss. """ def __init__(self, gamma: 'float'=2.0, alpha: 'float'=0.25): """ Args: gamma: Weighting parameter to balance loss for hard and easy examples. ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
Javier-DlaP/OpenPCDet
SigmoidFocalClassificationLoss
false
620
[ "Apache-2.0" ]
0
c4d308ea8052dd92948e2377b161b2519254275b
https://github.com/Javier-DlaP/OpenPCDet/tree/c4d308ea8052dd92948e2377b161b2519254275b
import torch import torch.nn as nn class Model(nn.Module): """ Sigmoid focal cross entropy loss. """ def __init__(self, gamma: 'float'=2.0, alpha: 'float'=0.25): """ Args: gamma: Weighting parameter to balance loss for hard and easy examples. alpha: Weighting p...
PinballLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn class PinballLoss(nn.Module): """ Calculates the quantile loss function. Attributes ---------- self.pred : torch.tensor Predictions. self.target : torch.tensor Target to predict. self.quantiles : torch.tensor """ def __init__(self...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empt...
Javicadserres/wind-production-forecast
PinballLoss
false
621
[ "MIT" ]
0
903fbf53d2ea34dc1a63e89cee252e76d6c25876
https://github.com/Javicadserres/wind-production-forecast/tree/903fbf53d2ea34dc1a63e89cee252e76d6c25876
import torch from torch import nn class Model(nn.Module): """ Calculates the quantile loss function. Attributes ---------- self.pred : torch.tensor Predictions. self.target : torch.tensor Target to predict. self.quantiles : torch.tensor """ def __init__(self, quan...
SpatialGatherModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch.nn import functional as F import torch.nn as nn class SpatialGatherModule(nn.Module): """Aggregate the context features according to the initial predicted probability distribution. Employ the soft-weighted method to aggregate the context. """ def __init__(self, scale): ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Jason-Khan/mmediting
SpatialGatherModule
false
622
[ "Apache-2.0" ]
0
d187f95a675dff3eb975a575bd9278d643b5b645
https://github.com/Jason-Khan/mmediting/tree/d187f95a675dff3eb975a575bd9278d643b5b645
import torch from torch.nn import functional as F import torch.nn as nn class Model(nn.Module): """Aggregate the context features according to the initial predicted probability distribution. Employ the soft-weighted method to aggregate the context. """ def __init__(self, scale): super()....
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class MultiHeadAttention(nn.Module): def __init__(self, dim_self, dim_ref, num_heads, bias=True, dropout=0.0): super().__init__() self.num_heads = num_heads head_dim = dim_self // num_heads self.scale = head_dim ** -0.5 self.to_queries = n...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
JAVI897/CLIP_prefix_caption
MultiHeadAttention
false
623
[ "MIT" ]
0
f4569891d01a5a790e9cdf850fb7feda3a0affc7
https://github.com/JAVI897/CLIP_prefix_caption/tree/f4569891d01a5a790e9cdf850fb7feda3a0affc7
import torch from torch import nn class Model(nn.Module): def __init__(self, dim_self, dim_ref, num_heads, bias=True, dropout=0.0): super().__init__() self.num_heads = num_heads head_dim = dim_self // num_heads self.scale = head_dim ** -0.5 self.to_queries = nn.Linear(dim_...
PolicyNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim import torch.autograd from torch.distributions import Normal class PolicyNet(nn.Module): def __init__(self, learning_rate, lr_alpha, init_alpha, target_entropy, in_dim): ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
ChangQingAAS/Deep-Reinforcement-Learning
PolicyNet
false
624
[ "MIT" ]
0
3bc1381c632b1730a48e63e972aea62086c4287c
https://github.com/ChangQingAAS/Deep-Reinforcement-Learning/tree/3bc1381c632b1730a48e63e972aea62086c4287c
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim import torch.autograd from torch.distributions import Normal class Model(nn.Module): def __init__(self, learning_rate, lr_alpha, init_alpha, target_entropy, in_dim): ...
BasicBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils import weight_norm import torch.utils.data def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=0, bias=True)...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch....
JaguAroo/SRResCGAN
BasicBlock
false
625
[ "MIT" ]
0
9aac612aff631f7fb9142e0a36de9559cfc1a62d
https://github.com/JaguAroo/SRResCGAN/tree/9aac612aff631f7fb9142e0a36de9559cfc1a62d
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils import weight_norm import torch.utils.data def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=0, bias=True)...
MSEGradLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils.data class MSEGradLoss(nn.Module): def __init__(self, grad=False): super(MSEGradLoss, self).__init__() self.grad = grad def forward(self, input, target): err = input - target loss = err.norm(p=2).pow(2).div(err.numel()) ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import...
JaguAroo/SRResCGAN
MSEGradLoss
false
626
[ "MIT" ]
0
9aac612aff631f7fb9142e0a36de9559cfc1a62d
https://github.com/JaguAroo/SRResCGAN/tree/9aac612aff631f7fb9142e0a36de9559cfc1a62d
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, grad=False): super().__init__() self.grad = grad def forward(self, input, target): err = input - target loss = err.norm(p=2).pow(2).div(err.numel()) if self.grad: ...
L1CompositionLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import functools import torch from torch.nn import functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced lo...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools from ...
Jason-Khan/mmediting
L1CompositionLoss
false
627
[ "Apache-2.0" ]
0
d187f95a675dff3eb975a575bd9278d643b5b645
https://github.com/Jason-Khan/mmediting/tree/d187f95a675dff3eb975a575bd9278d643b5b645
import functools import torch from torch.nn import functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced lo...
KeypointRCNNPredictorNoUpscale
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.quantization.quantize_fx import torch.utils.data class KeypointRCNNPredictorNoUpscale(nn.Module): def __init__(self, in_channels, num_keypoints): super(KeypointRCNNPredictorNoUpscale, self).__init__() input_features = in_channels deconv_kern...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.quantization.quantize_fx import torch.utils.d...
JacobSzwejbka/d2go
KeypointRCNNPredictorNoUpscale
false
628
[ "Apache-2.0" ]
0
d86ecc92eb97f14fcd97d626185f61c6817351e4
https://github.com/JacobSzwejbka/d2go/tree/d86ecc92eb97f14fcd97d626185f61c6817351e4
import torch import torch.nn as nn import torch.quantization.quantize_fx import torch.utils.data class Model(nn.Module): def __init__(self, in_channels, num_keypoints): super().__init__() input_features = in_channels deconv_kernel = 4 self.kps_score_lowres = nn.ConvTranspose2d(inp...
TV_L2Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils.data class TV_L2Loss(nn.Module): def __init__(self): super(TV_L2Loss, self).__init__() def forward(self, x): batch_size = x.size()[0] h_x = x.size()[2] w_x = x.size()[3] count_h = self.tensor_size(x[:, :, 1:, :]) ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C....
JaguAroo/SRResCGAN
TV_L2Loss
false
629
[ "MIT" ]
0
9aac612aff631f7fb9142e0a36de9559cfc1a62d
https://github.com/JaguAroo/SRResCGAN/tree/9aac612aff631f7fb9142e0a36de9559cfc1a62d
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): batch_size = x.size()[0] h_x = x.size()[2] w_x = x.size()[3] count_h = self.tensor_size(x[:, :, 1:, :]) count_w = se...
SmoothPinballLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn from torch.nn import functional class SmoothPinballLoss(nn.Module): """ Smoth version of the pinball loss function. Parameters ---------- quantiles : torch.tensor alpha : int Smoothing rate. Attributes ---------- self.pred : torch.tensor ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.gua...
Javicadserres/wind-production-forecast
SmoothPinballLoss
false
630
[ "MIT" ]
0
903fbf53d2ea34dc1a63e89cee252e76d6c25876
https://github.com/Javicadserres/wind-production-forecast/tree/903fbf53d2ea34dc1a63e89cee252e76d6c25876
import torch from torch import nn from torch.nn import functional class Model(nn.Module): """ Smoth version of the pinball loss function. Parameters ---------- quantiles : torch.tensor alpha : int Smoothing rate. Attributes ---------- self.pred : torch.tensor Pred...
MSECompositionLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import functools import torch from torch.nn import functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced lo...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import functools from torch.nn import functional as F import torch.nn as nn assert_size_s...
Jason-Khan/mmediting
MSECompositionLoss
false
631
[ "Apache-2.0" ]
0
d187f95a675dff3eb975a575bd9278d643b5b645
https://github.com/Jason-Khan/mmediting/tree/d187f95a675dff3eb975a575bd9278d643b5b645
import functools import torch from torch.nn import functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced lo...
MaskedDense
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.nn import Module import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from torch.nn.modules import Module class MaskedDense(Module): def __init__(self, in_dim, out_dim, bias=False): super(MaskedDense, self).__init__() self.in_dim = in_dim ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn as nn from torch.nn import init from...
DwaraknathT/pyfl
MaskedDense
false
632
[ "MIT" ]
0
e9a4d1ca98c6167a567d0d46771ac9e1c7bb7322
https://github.com/DwaraknathT/pyfl/tree/e9a4d1ca98c6167a567d0d46771ac9e1c7bb7322
from torch.nn import Module import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from torch.nn.modules import Module class Model(Module): def __init__(self, in_dim, out_dim, bias=False): super().__init__() self.in_dim = in_dim self.out_dim = out_dim...
CharbonnierCompLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import functools import torch from torch.nn import functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced lo...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import functools from torch....
Jason-Khan/mmediting
CharbonnierCompLoss
false
633
[ "Apache-2.0" ]
0
d187f95a675dff3eb975a575bd9278d643b5b645
https://github.com/Jason-Khan/mmediting/tree/d187f95a675dff3eb975a575bd9278d643b5b645
import functools import torch from torch.nn import functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Returns: Tensor: Reduced lo...
Fire
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn from torchvision.transforms.functional import * class Fire(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(Fire, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, sque...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from to...
BiggerBinBin/e3d_handpose_x-master
Fire
false
634
[ "Apache-2.0" ]
0
20d091a8a019d85de26c81d02985868f79d5de84
https://github.com/BiggerBinBin/e3d_handpose_x-master/tree/20d091a8a019d85de26c81d02985868f79d5de84
import torch import torch.nn as nn from torchvision.transforms.functional import * class Model(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super().__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_plane...
MaskedConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.nn import Module import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from torch.nn.modules import Module from torch.nn.modules.utils import _pair class MaskedConv(Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, d...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn as nn from torch.nn import init from...
DwaraknathT/pyfl
MaskedConv
false
635
[ "MIT" ]
0
e9a4d1ca98c6167a567d0d46771ac9e1c7bb7322
https://github.com/DwaraknathT/pyfl/tree/e9a4d1ca98c6167a567d0d46771ac9e1c7bb7322
from torch.nn import Module import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from torch.nn.modules import Module from torch.nn.modules.utils import _pair class Model(Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilati...
WeightedCrossEntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.functional as F class WeightedCrossEntropyLoss(nn.Module): """ Transform input to fit the fomation of PyTorch offical cross entropy loss with anchor-wise weighting. """ def __init__(self): super(WeightedCrossEntropyLoss, self).__init__() ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
Javier-DlaP/OpenPCDet
WeightedCrossEntropyLoss
false
636
[ "Apache-2.0" ]
0
c4d308ea8052dd92948e2377b161b2519254275b
https://github.com/Javier-DlaP/OpenPCDet/tree/c4d308ea8052dd92948e2377b161b2519254275b
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """ Transform input to fit the fomation of PyTorch offical cross entropy loss with anchor-wise weighting. """ def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', tar...
FusedLeakyReLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda...
Jerry2001/StyleCLIP
FusedLeakyReLU
false
637
[ "MIT" ]
0
806216b4ce7b4c001ff05d7bd707b28d20ea6191
https://github.com/Jerry2001/StyleCLIP/tree/806216b4ce7b4c001ff05d7bd707b28d20ea6191
import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), ...
Img_decoder_v3
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class ResidualBlock(nn.Module): def __init__(self, input_channel, output_channel, upsample=True): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(input_channel, output_channel, kernel_size=3, padding=0) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Holmes-Alan/Photo2Sketch
Img_decoder_v3
false
638
[ "MIT" ]
0
43a0ca6bb8a8e645b35a2ab23d11ed5efe117e09
https://github.com/Holmes-Alan/Photo2Sketch/tree/43a0ca6bb8a8e645b35a2ab23d11ed5efe117e09
import torch import torch.nn as nn import torch.nn.functional as F class ResidualBlock(nn.Module): def __init__(self, input_channel, output_channel, upsample=True): super().__init__() self.conv1 = nn.Conv2d(input_channel, output_channel, kernel_size=3, padding=0) self.conv2 = ...
ScaledDotProduction
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class ScaledDotProduction(nn.Module): """Scaled Dot Production""" def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Jincheng-Sun/Kylearn-pytorch
ScaledDotProduction
false
639
[ "MIT" ]
0
e72f2ab45a3f4724e843a27bec37664d3612fdca
https://github.com/Jincheng-Sun/Kylearn-pytorch/tree/e72f2ab45a3f4724e843a27bec37664d3612fdca
import torch import torch.nn as nn class Model(nn.Module): """Scaled Dot Production""" def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward...
AdditiveAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class AdditiveAttention(torch.nn.Module): """ A general additive attention module. Originally for NAML. """ def __init__(self, query_vector_dim, candidate_vector_dim, writer=None, tag=None, names=None): super(Addit...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Janelovelzy/NewsRecommendation
AdditiveAttention
false
640
[ "MIT" ]
0
bd2d70e828ffa66ea4cbbd3c6ac09f14e7f0179b
https://github.com/Janelovelzy/NewsRecommendation/tree/bd2d70e828ffa66ea4cbbd3c6ac09f14e7f0179b
import torch import torch.nn as nn import torch.nn.functional as F class Model(torch.nn.Module): """ A general additive attention module. Originally for NAML. """ def __init__(self, query_vector_dim, candidate_vector_dim, writer=None, tag=None, names=None): super().__init__() ...
ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import numpy as np import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) s...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Jincheng-Sun/Kylearn-pytorch
ScaledDotProductAttention
false
641
[ "MIT" ]
0
e72f2ab45a3f4724e843a27bec37664d3612fdca
https://github.com/Jincheng-Sun/Kylearn-pytorch/tree/e72f2ab45a3f4724e843a27bec37664d3612fdca
import torch import numpy as np import torch.nn as nn class Model(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Sof...
BCE_loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class BCE_loss(nn.Module): def __init__(self): super(BCE_loss, self).__init__() def forward(self, pred, gt): bce_loss = nn.BCELoss(size_average=True) bce_out = bce_loss(pred, gt) return bce_out def get_inputs(): return [torch.rand([4, ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
Jianrong-Lu/Head-and-Neck-Tumour-Segmentation-and-Prediction-of-Patient-Survival
BCE_loss
false
642
[ "MIT" ]
0
257cf17ce6d405166dd8449f3b34e305cb5103b2
https://github.com/Jianrong-Lu/Head-and-Neck-Tumour-Segmentation-and-Prediction-of-Patient-Survival/tree/257cf17ce6d405166dd8449f3b34e305cb5103b2
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, pred, gt): bce_loss = nn.BCELoss(size_average=True) bce_out = bce_loss(pred, gt) return bce_out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch....
ReactionDotProduction
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class ReactionDotProduction(nn.Module): """ Scaled Dot Productionss """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Jincheng-Sun/Kylearn-pytorch
ReactionDotProduction
false
643
[ "MIT" ]
0
e72f2ab45a3f4724e843a27bec37664d3612fdca
https://github.com/Jincheng-Sun/Kylearn-pytorch/tree/e72f2ab45a3f4724e843a27bec37664d3612fdca
import torch import torch.nn as nn class Model(nn.Module): """ Scaled Dot Productionss """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def for...
GAT
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class GATLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, alpha=0.2): super(GATLayer, self).__init__() self.in_features = in_features...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
JLUVicent/DAEGC
GAT
false
644
[ "MIT" ]
0
9a4cc50e40e8521fafb00960d1adf8216674c8f6
https://github.com/JLUVicent/DAEGC/tree/9a4cc50e40e8521fafb00960d1adf8216674c8f6
import torch import torch.nn as nn import torch.nn.functional as F class GATLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, alpha=0.2): super().__init__() self.in_features = in_features self....
LinearConvExpansion
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn class LinearConvExpansion(nn.Module): """expansion 1D -> 2D""" def __init__(self, d_features, n_channel, n_depth): super().__init__() self.d_features = d_features self.n_channel = n_channel self.n_depth = n_depth se...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.g...
Jincheng-Sun/Kylearn-pytorch
LinearConvExpansion
false
645
[ "MIT" ]
0
e72f2ab45a3f4724e843a27bec37664d3612fdca
https://github.com/Jincheng-Sun/Kylearn-pytorch/tree/e72f2ab45a3f4724e843a27bec37664d3612fdca
import torch import numpy as np import torch.nn as nn class Model(nn.Module): """expansion 1D -> 2D""" def __init__(self, d_features, n_channel, n_depth): super().__init__() self.d_features = d_features self.n_channel = n_channel self.n_depth = n_depth self.d_hid = int...
PlainRefiner
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class PlainRefiner(nn.Module): """Simple refiner from Deep Image Matting. Args: conv_channels (int): Number of channels produced by the three main convolutional layer. loss_refine (dict): Config of the loss of the refiner. Default: None. ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
Jason-Khan/mmediting
PlainRefiner
false
646
[ "Apache-2.0" ]
0
d187f95a675dff3eb975a575bd9278d643b5b645
https://github.com/Jason-Khan/mmediting/tree/d187f95a675dff3eb975a575bd9278d643b5b645
import torch import torch.nn as nn class Model(nn.Module): """Simple refiner from Deep Image Matting. Args: conv_channels (int): Number of channels produced by the three main convolutional layer. loss_refine (dict): Config of the loss of the refiner. Default: None. pretrai...
AffineGridGen
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
from torch.nn import Module import torch import torch.nn.functional as F import torch.nn from torch.nn.modules.module import Module class AffineGridGen(Module): def __init__(self, out_h=240, out_w=240, out_ch=3, use_cuda=True): super(AffineGridGen, self).__init__() self.out_h = out_h self...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_s...
JiwonCocoder/matching1
AffineGridGen
false
647
[ "MIT" ]
0
75274312a4ab6ba318c3d241abc0eb292f6ce69c
https://github.com/JiwonCocoder/matching1/tree/75274312a4ab6ba318c3d241abc0eb292f6ce69c
from torch.nn import Module import torch import torch.nn.functional as F import torch.nn from torch.nn.modules.module import Module class Model(Module): def __init__(self, out_h=240, out_w=240, out_ch=3, use_cuda=True): super().__init__() self.out_h = out_h self.out_w = out_w self...
FeatureCorrelation
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn def featureL2Norm(feature): epsilon = 1e-06 norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5 ).unsqueeze(1).expand_as(feature) return torch.div(feature, norm) class FeatureCorrelation(torch.nn.Module): def __init__(self, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
JiwonCocoder/matching1
FeatureCorrelation
false
648
[ "MIT" ]
0
75274312a4ab6ba318c3d241abc0eb292f6ce69c
https://github.com/JiwonCocoder/matching1/tree/75274312a4ab6ba318c3d241abc0eb292f6ce69c
import torch import torch.nn as nn import torch.nn def featureL2Norm(feature): epsilon = 1e-06 norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5 ).unsqueeze(1).expand_as(feature) return torch.div(feature, norm) class Model(torch.nn.Module): def __init__(self, shape='3D', n...
StyleMod
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn from torch.nn import functional as F import torchvision.transforms.functional as F import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.nn import functional as F import torchvision.tra...
AnimeshKoratana/blurryface
StyleMod
false
649
[ "Apache-2.0" ]
0
c6cb5feec02f6d5af3acb1678336800390715d65
https://github.com/AnimeshKoratana/blurryface/tree/c6cb5feec02f6d5af3acb1678336800390715d65
import torch from torch import nn from torch.nn import functional as F import torchvision.transforms.functional as F import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain...
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from math import * def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, a...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Jeyhooon/deep-reinforcement-learning
Actor
false
650
[ "MIT" ]
0
7a6f1974493a2058635539a4868512cdf3fb5bdb
https://github.com/Jeyhooon/deep-reinforcement-learning/tree/7a6f1974493a2058635539a4868512cdf3fb5bdb
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from math import * def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Model(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, a...
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from math import * def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import tor...
Jeyhooon/deep-reinforcement-learning
Critic
false
651
[ "MIT" ]
0
7a6f1974493a2058635539a4868512cdf3fb5bdb
https://github.com/Jeyhooon/deep-reinforcement-learning/tree/7a6f1974493a2058635539a4868512cdf3fb5bdb
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from math import * def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Model(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, a...
ModulatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from to...
Jerry2001/StyleCLIP
ModulatedConv2d
false
652
[ "MIT" ]
0
806216b4ce7b4c001ff05d7bd707b28d20ea6191
https://github.com/Jerry2001/StyleCLIP/tree/806216b4ce7b4c001ff05d7bd707b28d20ea6191
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0...
Bottleneck
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class Bottleneck(nn.Module): def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Jincheng-Sun/Kylearn-pytorch
Bottleneck
false
653
[ "MIT" ]
0
e72f2ab45a3f4724e843a27bec37664d3612fdca
https://github.com/Jincheng-Sun/Kylearn-pytorch/tree/e72f2ab45a3f4724e843a27bec37664d3612fdca
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) s...
ConvExpansion
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class ConvExpansion(nn.Module): """expansion 1D -> 2D""" def __init__(self, d_features, n_channel, n_depth): super().__init__() self.d_features = d_features self.n_channel = n_channel self.n_depth = n_depth self.conv = nn.Conv1d(1, n_...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
Jincheng-Sun/Kylearn-pytorch
ConvExpansion
false
654
[ "MIT" ]
0
e72f2ab45a3f4724e843a27bec37664d3612fdca
https://github.com/Jincheng-Sun/Kylearn-pytorch/tree/e72f2ab45a3f4724e843a27bec37664d3612fdca
import torch import torch.nn as nn class Model(nn.Module): """expansion 1D -> 2D""" def __init__(self, d_features, n_channel, n_depth): super().__init__() self.d_features = d_features self.n_channel = n_channel self.n_depth = n_depth self.conv = nn.Conv1d(1, n_channel ...
ChannelSELayer3D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class ChannelSELayer3D(nn.Module): """ 3D extension of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* *Zhu et al., AnatomyNet, arXiv:arXiv:1808.05238* """ def __init__(self, num_channels...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
Jianrong-Lu/Head-and-Neck-Tumour-Segmentation-and-Prediction-of-Patient-Survival
ChannelSELayer3D
false
655
[ "MIT" ]
0
257cf17ce6d405166dd8449f3b34e305cb5103b2
https://github.com/Jianrong-Lu/Head-and-Neck-Tumour-Segmentation-and-Prediction-of-Patient-Survival/tree/257cf17ce6d405166dd8449f3b34e305cb5103b2
import torch import torch.nn as nn class Model(nn.Module): """ 3D extension of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* *Zhu et al., AnatomyNet, arXiv:arXiv:1808.05238* """ def __init__(self, num_channels, reduction...
Dice_Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn from torch import sigmoid class Dice_Loss(nn.Module): """ Taken from https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch """ def __init__(self, weight=None, size_average=True): super(Dice_Loss, self).__init__() def forward(self, out,...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empt...
JoaoCarv/holistic_seg
Dice_Loss
false
656
[ "MIT" ]
0
ea4787e7e9a36dc5caf198d2be1bd1e71c06d440
https://github.com/JoaoCarv/holistic_seg/tree/ea4787e7e9a36dc5caf198d2be1bd1e71c06d440
import torch from torch import nn from torch import sigmoid class Model(nn.Module): """ Taken from https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch """ def __init__(self, weight=None, size_average=True): super().__init__() def forward(self, out, targets, smooth=1)...
StyledConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from to...
Jerry2001/StyleCLIP
StyledConv
false
657
[ "MIT" ]
0
806216b4ce7b4c001ff05d7bd707b28d20ea6191
https://github.com/Jerry2001/StyleCLIP/tree/806216b4ce7b4c001ff05d7bd707b28d20ea6191
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0...
DiceLossWithLogits
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils.data def flatten_samples(input_): """ Flattens a tensor or a variable such that the channel axis is first and the sample axis is second. The shapes are transformed as follows: (N, C, H, W) --> (C, N * H * W) (N, C, D, H, W) --> (C, N * ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guard...
JoOkuma/torch-em
DiceLossWithLogits
false
658
[ "MIT" ]
0
68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
https://github.com/JoOkuma/torch-em/tree/68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
import torch import torch.nn as nn import torch.utils.data def flatten_samples(input_): """ Flattens a tensor or a variable such that the channel axis is first and the sample axis is second. The shapes are transformed as follows: (N, C, H, W) --> (C, N * H * W) (N, C, D, H, W) --> (C, N * ...
LinearBottleneckLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class LinearBottleneckLayer(nn.Module): """ Bottleneck Layer """ def __init__(self, d_features, d_hid, d_out=None, dropout=0.1): super().__init__() if d_out is None: d_out = d_features self.encode = nn.Linear(d_features, d_hid) se...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Jincheng-Sun/Kylearn-pytorch
LinearBottleneckLayer
false
660
[ "MIT" ]
0
e72f2ab45a3f4724e843a27bec37664d3612fdca
https://github.com/Jincheng-Sun/Kylearn-pytorch/tree/e72f2ab45a3f4724e843a27bec37664d3612fdca
import torch import torch.nn as nn class Model(nn.Module): """ Bottleneck Layer """ def __init__(self, d_features, d_hid, d_out=None, dropout=0.1): super().__init__() if d_out is None: d_out = d_features self.encode = nn.Linear(d_features, d_hid) self.decode = nn.L...
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerN...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Jincheng-Sun/Kylearn-pytorch
PositionwiseFeedForward
false
661
[ "MIT" ]
0
e72f2ab45a3f4724e843a27bec37664d3612fdca
https://github.com/Jincheng-Sun/Kylearn-pytorch/tree/e72f2ab45a3f4724e843a27bec37664d3612fdca
import torch import torch.nn as nn class Model(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-0...
ToRGB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn from torch.nn import functional as F assert_siz...
Jerry2001/StyleCLIP
ToRGB
false
662
[ "MIT" ]
0
806216b4ce7b4c001ff05d7bd707b28d20ea6191
https://github.com/Jerry2001/StyleCLIP/tree/806216b4ce7b4c001ff05d7bd707b28d20ea6191
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0...
LinearClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class LinearClassifier(nn.Module): def __init__(self, d_features, seq_length, d_hid, d_out): super(LinearClassifier, self).__init__() self.d_features = d_features self.maxpool = torch.nn.MaxPool1d(seq_length, stride=1, padding=0) self.fc1 = nn.Li...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
Jincheng-Sun/Kylearn-pytorch
LinearClassifier
false
663
[ "MIT" ]
0
e72f2ab45a3f4724e843a27bec37664d3612fdca
https://github.com/Jincheng-Sun/Kylearn-pytorch/tree/e72f2ab45a3f4724e843a27bec37664d3612fdca
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, d_features, seq_length, d_hid, d_out): super().__init__() self.d_features = d_features self.maxpool = torch.nn.MaxPool1d(seq_length, stride=1, padding=0) self.fc1 = nn.Linear(d_features, d_hid) s...
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils.data def flatten_samples(input_): """ Flattens a tensor or a variable such that the channel axis is first and the sample axis is second. The shapes are transformed as follows: (N, C, H, W) --> (C, N * H * W) (N, C, D, H, W) --> (C, N * ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guard...
JoOkuma/torch-em
DiceLoss
false
664
[ "MIT" ]
0
68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
https://github.com/JoOkuma/torch-em/tree/68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
import torch import torch.nn as nn import torch.utils.data def flatten_samples(input_): """ Flattens a tensor or a variable such that the channel axis is first and the sample axis is second. The shapes are transformed as follows: (N, C, H, W) --> (C, N * H * W) (N, C, D, H, W) --> (C, N * ...
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn.functional as F import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class FocalLoss(nn.Module): def __init__(self, gamma=2): super(FocalLoss, self).__init__() self.gamma = gamma def forward...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
JoohyungLee0106/rectal_MR_volume_classification
FocalLoss
false
665
[ "MIT" ]
0
d2a7d13dae9fe7255b983cbc210567dd452a936f
https://github.com/JoohyungLee0106/rectal_MR_volume_classification/tree/d2a7d13dae9fe7255b983cbc210567dd452a936f
import torch import torch.nn.functional as F import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Model(nn.Module): def __init__(self, gamma=2): super().__init__() self.gamma = gamma def forward(self, logit, label...
ChannelSpatialSELayer3D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class ChannelSELayer3D(nn.Module): """ 3D extension of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* *Zhu et al., AnatomyNet, arXiv:arXiv:1808.05238* """ ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import ...
Jianrong-Lu/Head-and-Neck-Tumour-Segmentation-and-Prediction-of-Patient-Survival
ChannelSpatialSELayer3D
false
666
[ "MIT" ]
0
257cf17ce6d405166dd8449f3b34e305cb5103b2
https://github.com/Jianrong-Lu/Head-and-Neck-Tumour-Segmentation-and-Prediction-of-Patient-Survival/tree/257cf17ce6d405166dd8449f3b34e305cb5103b2
import torch import torch.nn as nn import torch.nn.functional as F class ChannelSELayer3D(nn.Module): """ 3D extension of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* *Zhu et al., AnatomyNet, arXiv:arXiv:1808.05238* """ ...
BCEDiceLossWithLogits
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils.data def flatten_samples(input_): """ Flattens a tensor or a variable such that the channel axis is first and the sample axis is second. The shapes are transformed as follows: (N, C, H, W) --> (C, N * H * W) (N, C, D, H, W) --> (C, N * ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
JoOkuma/torch-em
BCEDiceLossWithLogits
false
667
[ "MIT" ]
0
68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
https://github.com/JoOkuma/torch-em/tree/68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
import torch import torch.nn as nn import torch.utils.data def flatten_samples(input_): """ Flattens a tensor or a variable such that the channel axis is first and the sample axis is second. The shapes are transformed as follows: (N, C, H, W) --> (C, N * H * W) (N, C, D, H, W) --> (C, N * ...
AgentConvBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class AgentConvBlock(nn.Module): def __init__(self, nin, nout, ksize=3): super(AgentConvBlock, self).__init__() self.conv1 = nn.Conv2d(nin, nout, ksize, padding=1) self.lrelu1 = nn.LeakyReLU(0.2) self.conv2 = nn.Conv2d(nout, nout, ksize, padding=...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
JoOkuma/DifferentiableSketching
AgentConvBlock
false
668
[ "BSD-3-Clause" ]
0
6672508bd362d90e9bfc07966cb7907879d01385
https://github.com/JoOkuma/DifferentiableSketching/tree/6672508bd362d90e9bfc07966cb7907879d01385
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, nin, nout, ksize=3): super().__init__() self.conv1 = nn.Conv2d(nin, nout, ksize, padding=1) self.lrelu1 = nn.LeakyReLU(0.2) self.conv2 = nn.Conv2d(nout, nout, ksize, padding=1) self.lrelu2 = nn.L...
SimpleModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class SimpleModel(nn.Module): def __init__(self): super(SimpleModel, self).__init__() def forward(self, x): return x * 2 def...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data...
Jovonni/jukebox
SimpleModel
false
669
[ "MIT" ]
0
965a6f78aae67506a6e4fcdb205e2c39132e12e0
https://github.com/Jovonni/jukebox/tree/965a6f78aae67506a6e4fcdb205e2c39132e12e0
import torch import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x * 2 def get_inputs(): retu...
InfoLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import math import torch from torch import nn import torch.utils.data class InfoLoss(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): x = torch.mean(x, 0) logN = math.log(float(x.shape[0])) x = x * (x + eps).log() / logN neg_entropy ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data assert_size_stride = torch._...
Joshua-Schroijen/deepproblog
InfoLoss
false
670
[ "Apache-2.0" ]
0
4ae56f1e860010b7857b29d5bd76fb1555d5e19d
https://github.com/Joshua-Schroijen/deepproblog/tree/4ae56f1e860010b7857b29d5bd76fb1555d5e19d
import math import torch from torch import nn import torch.utils.data class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): x = torch.mean(x, 0) logN = math.log(float(x.shape[0])) x = x * (x + eps).log() / logN neg_entropy = x...
LinearFeedforward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=Non...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import ...
Jonathan-Chin328/genienlp
LinearFeedforward
false
671
[ "BSD-3-Clause" ]
0
6449140bfea2651523abc3500b212c37955aa39e
https://github.com/Jonathan-Chin328/genienlp/tree/6449140bfea2651523abc3500b212c37955aa39e
import torch import torch.nn as nn import torch.utils.data class Linear(nn.Linear): def forward(self, x): size = x.size() return super().forward(x.contiguous().view(-1, size[-1])).view(* size[:-1], -1) class Feedforward(nn.Module): def __init__(self, d_in, d_out, activation=Non...
EntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import math import torch from torch import nn import torch.utils.data class EntropyLoss(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): logN = math.log(float(x.shape[0])) x = x * (x + eps).log() / logN neg_entropy = x.sum(1) return ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data assert_size_stride = torch._...
Joshua-Schroijen/deepproblog
EntropyLoss
false
672
[ "Apache-2.0" ]
0
4ae56f1e860010b7857b29d5bd76fb1555d5e19d
https://github.com/Joshua-Schroijen/deepproblog/tree/4ae56f1e860010b7857b29d5bd76fb1555d5e19d
import math import torch from torch import nn import torch.utils.data class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): logN = math.log(float(x.shape[0])) x = x * (x + eps).log() / logN neg_entropy = x.sum(1) return -neg_e...
BinaryNLLEntropy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss class BinaryNLLEntropy(_Loss): def __init__(self, size_average=True): super(BinaryNLLEntropy, self).__init__() self.size_average = size_average def forward(self, net_output, label_output): """ ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch....
Jupaoqq/Jupaoqq_LaRL
BinaryNLLEntropy
false
673
[ "Apache-2.0" ]
0
ae64adda5627987d71f2948f499daa11e9f309ad
https://github.com/Jupaoqq/Jupaoqq_LaRL/tree/ae64adda5627987d71f2948f499daa11e9f309ad
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss class Model(_Loss): def __init__(self, size_average=True): super().__init__() self.size_average = size_average def forward(self, net_output, label_output): """ :param net_output: batch_size x...
Flatten
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn from itertools import product as product class Flatten(nn.Module): def __init__(self): super(Flatten, self).__init__() def forward(self, x): """ Arguments: x: a float tensor with shape [batch_size, c, h, w]. Returns: ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided...
Juggernaut93/InsightFace-v2
Flatten
false
674
[ "Apache-2.0" ]
0
65e9b8d1f285a87472ffb913bec136d4e046798f
https://github.com/Juggernaut93/InsightFace-v2/tree/65e9b8d1f285a87472ffb913bec136d4e046798f
import torch import torch.nn as nn from itertools import product as product class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): """ Arguments: x: a float tensor with shape [batch_size, c, h, w]. Returns: a float tensor ...
JSD
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import math import torch from torch import nn import torch.utils.data class JSD(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): logN = math.log(float(x.shape[0])) y = torch.mean(x, 0) y = y * (y + eps).log() / logN y = y.sum() ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data assert_size_stride = torch._...
Joshua-Schroijen/deepproblog
JSD
false
675
[ "Apache-2.0" ]
0
4ae56f1e860010b7857b29d5bd76fb1555d5e19d
https://github.com/Joshua-Schroijen/deepproblog/tree/4ae56f1e860010b7857b29d5bd76fb1555d5e19d
import math import torch from torch import nn import torch.utils.data class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): logN = math.log(float(x.shape[0])) y = torch.mean(x, 0) y = y * (y + eps).log() / logN y = y.sum() ...
ContrastiveLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.modules import torch.nn import torch.nn.init import torch.optim.lr_scheduler class ContrastiveLoss(nn.Module): """ Contrastive loss function. References: https://github.com/delijati/pytorch-siamese/blob/master/contrastive.py LaTeX: L...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.modules import torch.nn import torch.nn.init import...
JoshuaBeard/netharn
ContrastiveLoss
false
676
[ "Apache-2.0" ]
0
90773542c47363e663ee58f20fd151eb89bc313b
https://github.com/JoshuaBeard/netharn/tree/90773542c47363e663ee58f20fd151eb89bc313b
import torch import torch.nn as nn import torch.nn.modules import torch.nn import torch.nn.init import torch.optim.lr_scheduler class Model(nn.Module): """ Contrastive loss function. References: https://github.com/delijati/pytorch-siamese/blob/master/contrastive.py LaTeX: Let D be th...
DiceCE_Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn from torch.nn import functional as F from torch import sigmoid class DiceCE_Loss(nn.Module): """ Taken from https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch """ def __init__(self, weight=None, size_average=True): super(DiceCE_Loss, sel...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch ...
JoaoCarv/holistic_seg
DiceCE_Loss
false
677
[ "MIT" ]
0
ea4787e7e9a36dc5caf198d2be1bd1e71c06d440
https://github.com/JoaoCarv/holistic_seg/tree/ea4787e7e9a36dc5caf198d2be1bd1e71c06d440
import torch from torch import nn from torch.nn import functional as F from torch import sigmoid class Model(nn.Module): """ Taken from https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch """ def __init__(self, weight=None, size_average=True): super().__init__() def ...
ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class ScaledDotProductAttention(nn.Module): def __init__(self, dropout: 'float'=None, scale: 'bool'=True): super(ScaledDotProductAttention, self).__init__() if dropout is not None: self.dropout = nn.Dropout(p=dropout) else: self.d...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
JustinNeumann/pytorch-forecasting
ScaledDotProductAttention
false
678
[ "MIT" ]
0
4f6e449cb3788b856e66c4283398a5db201aa6ff
https://github.com/JustinNeumann/pytorch-forecasting/tree/4f6e449cb3788b856e66c4283398a5db201aa6ff
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, dropout: 'float'=None, scale: 'bool'=True): super().__init__() if dropout is not None: self.dropout = nn.Dropout(p=dropout) else: self.dropout = dropout self.softmax = nn.Softmax(...
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn from itertools import product as product class FocalLoss(nn.Module): def __init__(self, gamma=0): super(FocalLoss, self).__init__() self.gamma = gamma self.ce = torch.nn.CrossEntropyLoss() def forward(self, input, target): logp = self.ce(inp...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
Juggernaut93/InsightFace-v2
FocalLoss
false
679
[ "Apache-2.0" ]
0
65e9b8d1f285a87472ffb913bec136d4e046798f
https://github.com/Juggernaut93/InsightFace-v2/tree/65e9b8d1f285a87472ffb913bec136d4e046798f
import torch import torch.nn as nn from itertools import product as product class Model(nn.Module): def __init__(self, gamma=0): super().__init__() self.gamma = gamma self.ce = torch.nn.CrossEntropyLoss() def forward(self, input, target): logp = self.ce(input, target) ...
NormKLLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch as th from torch.nn.modules.loss import _Loss class NormKLLoss(_Loss): def __init__(self, unit_average=False): super(NormKLLoss, self).__init__() self.unit_average = unit_average def forward(self, recog_mu, recog_logvar, prior_mu, prior_logvar): loss = 1.0 +...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dy...
Jupaoqq/Jupaoqq_LaRL
NormKLLoss
false
680
[ "Apache-2.0" ]
0
ae64adda5627987d71f2948f499daa11e9f309ad
https://github.com/Jupaoqq/Jupaoqq_LaRL/tree/ae64adda5627987d71f2948f499daa11e9f309ad
import torch import torch as th from torch.nn.modules.loss import _Loss class Model(_Loss): def __init__(self, unit_average=False): super().__init__() self.unit_average = unit_average def forward(self, recog_mu, recog_logvar, prior_mu, prior_logvar): loss = 1.0 + (recog_logvar - prio...
BlendLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class BlendLinear(nn.Module): def __init__(self, dim_in, dim_out, layer_type=nn.Linear, **unused_kwargs): super(BlendLinear, self).__init__() self._layer0 = layer_type(dim_in, dim_out) self._layer1 = layer_type(dim_in, dim_out) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dyn...
Justin-Tan/ffjord
BlendLinear
false
681
[ "MIT" ]
0
2caf8a4ff84933672fe0d94255d665b3dd7a6791
https://github.com/Justin-Tan/ffjord/tree/2caf8a4ff84933672fe0d94255d665b3dd7a6791
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, dim_in, dim_out, layer_type=nn.Linear, **unused_kwargs): super().__init__() self._layer0 = layer_type(dim_in, dim_out) self._layer1 = layer_type(dim_in, dim_out) def forward(self, t,...
HidingRes
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.nn.functional as F class ResidualBlock(nn.Module): def __init__(self, channel_num, dilation=1, group=1): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(channel_num, channel_num, 3, 1, pa...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Hwihuni/Deep-Model-Watermarking
HidingRes
false
682
[ "MIT" ]
0
73ea2286ace0aac3d55f6056da38ea2bc38ed00d
https://github.com/Hwihuni/Deep-Model-Watermarking/tree/73ea2286ace0aac3d55f6056da38ea2bc38ed00d
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.nn.functional as F class ResidualBlock(nn.Module): def __init__(self, channel_num, dilation=1, group=1): super().__init__() self.conv1 = nn.Conv2d(channel_num, channel_num, 3, 1, padding= ...
ResampleNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F import torch.nn as nn assert_size_stride = torc...
JustinNeumann/pytorch-forecasting
ResampleNorm
false
683
[ "MIT" ]
0
4f6e449cb3788b856e66c4283398a5db201aa6ff
https://github.com/JustinNeumann/pytorch-forecasting/tree/4f6e449cb3788b856e66c4283398a5db201aa6ff
import torch import torch.nn.functional as F import torch.nn as nn class TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_...
Hidden2Discrete
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class Hidden2Discrete(nn.Module): def __init__(self, input_size, y_size, k_size, is_lstm=False, has_bias=True ): super(Hidden2Discrete, self).__init__() self.y_size = y_size self.k_size = k_size latent_size...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Jupaoqq/Jupaoqq_LaRL
Hidden2Discrete
false
684
[ "Apache-2.0" ]
0
ae64adda5627987d71f2948f499daa11e9f309ad
https://github.com/Jupaoqq/Jupaoqq_LaRL/tree/ae64adda5627987d71f2948f499daa11e9f309ad
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, input_size, y_size, k_size, is_lstm=False, has_bias=True ): super().__init__() self.y_size = y_size self.k_size = k_size latent_size = self.k_size * self.y_size ...
VGG19
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class VGG19(nn.Module): def __init__(self): super(VGG19, self).__init__() self.conv0_0 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv0_1 = nn.Conv2d(in_channels=64, ou...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
Hwa-Jong/VGG_in_Torch
VGG19
false
685
[ "MIT" ]
0
99a922070367ee9b9485c4df397f9413d11841b8
https://github.com/Hwa-Jong/VGG_in_Torch/tree/99a922070367ee9b9485c4df397f9413d11841b8
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv0_0 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv0_1 = nn.Conv2d(in_channels=64, out_channels=...
Upsample
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.functional as F from torchvision.transforms.functional import * class Upsample(nn.Module): def __init__(self, scale_factor=1, mode='nearest'): super(Upsample, self).__init__() self.scale_factor = scale_factor self.mode = mode def for...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torchvision.transforms.functional import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_...
BiggerBinBin/e3d_handpose_x-master
Upsample
false
686
[ "Apache-2.0" ]
0
20d091a8a019d85de26c81d02985868f79d5de84
https://github.com/BiggerBinBin/e3d_handpose_x-master/tree/20d091a8a019d85de26c81d02985868f79d5de84
import torch import torch.nn as nn import torch.nn.functional as F from torchvision.transforms.functional import * class Model(nn.Module): def __init__(self, scale_factor=1, mode='nearest'): super().__init__() self.scale_factor = scale_factor self.mode = mode def forward(self, x): ...
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class Attention(nn.Module): """ Applies an attention mechanism on the output features from the decoder. .. math:: \\begin{array}{ll} x = context*output \\\\ attn = exp(x_i) / sum_j exp(x_j) \\\\ ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
JunhoKim94/speech_hackathon_2019
Attention
false
687
[ "Apache-2.0" ]
0
1cb8de873d48e94f58bd1103c32b977a27d34951
https://github.com/JunhoKim94/speech_hackathon_2019/tree/1cb8de873d48e94f58bd1103c32b977a27d34951
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """ Applies an attention mechanism on the output features from the decoder. .. math:: \\begin{array}{ll} x = context*output \\\\ attn = exp(x_i) / sum_j exp(x_j) \\\\ ...
LunaBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class LunaBlock(nn.Module): def __init__(self, in_channels, conv_channels): super().__init__() self.conv1 = nn.Conv3d(in_channels, conv_channels, kernel_size=3, padding=1, bias=True) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn....
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
JulianKlug/scop
LunaBlock
false
688
[ "MIT" ]
0
b0d6a805a11ee8b4d0f53a4d6a5ec402988298e4
https://github.com/JulianKlug/scop/tree/b0d6a805a11ee8b4d0f53a4d6a5ec402988298e4
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, conv_channels): super().__init__() self.conv1 = nn.Conv3d(in_channels, conv_channels, kernel_size=3, padding=1, bias=True) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv...
Sine
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class Sine(nn.Module): def __init(self): super().__init__() def forward(self, input): return torch.sin(30 * input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert...
KIMGEONUNG/multi-memory-siren
Sine
false
689
[ "MIT" ]
0
b372e1b9abe2b3fb502d808eb3a47c2ad287ca3b
https://github.com/KIMGEONUNG/multi-memory-siren/tree/b372e1b9abe2b3fb502d808eb3a47c2ad287ca3b
import torch import torch.nn as nn class Model(nn.Module): def __init(self): super().__init__() def forward(self, input): return torch.sin(30 * input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
My_SmoothL1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch class My_SmoothL1Loss(torch.nn.Module): def __init__(self): super(My_SmoothL1Loss, self).__init__() def forward(self, x, y): total_loss = 0 assert x.shape == y.shape z = (x - y).float() mse_mask = (torch.abs(z) < 0.01).float() l1_mask = (torch.abs...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = t...
Jvictor97/AWR-Adaptive-Weighting-Regression
My_SmoothL1Loss
false
690
[ "MIT" ]
0
2c29f8ac3d824edfff07465232ffed8e4d837ebf
https://github.com/Jvictor97/AWR-Adaptive-Weighting-Regression/tree/2c29f8ac3d824edfff07465232ffed8e4d837ebf
import torch class Model(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): total_loss = 0 assert x.shape == y.shape z = (x - y).float() mse_mask = (torch.abs(z) < 0.01).float() l1_mask = (torch.abs(z) >= 0.01).float() ms...
QNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class QNetwork(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=32, fc2_units=16): """Initialize parameters and build model. Params ====== state_si...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
JamesMcGuigan/udacity-deep-reinforcement-learning
QNetwork
false
691
[ "MIT" ]
0
e093db535fb12dbfb8bc2b5764133e1f52bbbccd
https://github.com/JamesMcGuigan/udacity-deep-reinforcement-learning/tree/e093db535fb12dbfb8bc2b5764133e1f52bbbccd
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=32, fc2_units=16): """Initialize parameters and build model. Params ====== state_size ...
SelfAttn
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch as th import torch.nn as nn import torch.nn.functional as F class SelfAttn(nn.Module): def __init__(self, hidden_size): super(SelfAttn, self).__init__() self.query = nn.Linear(hidden_size, 1) def forward(self, keys, values, attn_mask=None): """ :para...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Jupaoqq/Jupaoqq_LaRL
SelfAttn
false
692
[ "Apache-2.0" ]
0
ae64adda5627987d71f2948f499daa11e9f309ad
https://github.com/Jupaoqq/Jupaoqq_LaRL/tree/ae64adda5627987d71f2948f499daa11e9f309ad
import torch import torch as th import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, hidden_size): super().__init__() self.query = nn.Linear(hidden_size, 1) def forward(self, keys, values, attn_mask=None): """ :param attn_inputs: ba...
GatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class GatedConv2d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None): super(GatedConv2d, self).__init__() self.activation = activation self.sigmoid = ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dyn...
Justin-Tan/ffjord
GatedConv2d
false
693
[ "MIT" ]
0
2caf8a4ff84933672fe0d94255d665b3dd7a6791
https://github.com/Justin-Tan/ffjord/tree/2caf8a4ff84933672fe0d94255d665b3dd7a6791
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None): super().__init__() self.activation = activation self.sigmoid = nn.Sigmoid() se...
SpatialGate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Conv3DSimple(nn.Conv3d): def __init__(self, in_planes, out_planes, stride=1, kernel_size=3): padding = (kernel_size - 1) // 2 super(Conv3DSimple, self)....
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import ...
JoohyungLee0106/rectal_MR_volume_classification
SpatialGate
false
694
[ "MIT" ]
0
d2a7d13dae9fe7255b983cbc210567dd452a936f
https://github.com/JoohyungLee0106/rectal_MR_volume_classification/tree/d2a7d13dae9fe7255b983cbc210567dd452a936f
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Conv3DSimple(nn.Conv3d): def __init__(self, in_planes, out_planes, stride=1, kernel_size=3): padding = (kernel_size - 1) // 2 super().__init__(in_channe...
ConcatSquashLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class ConcatSquashLinear(nn.Module): def __init__(self, dim_in, dim_out): super(ConcatSquashLinear, self).__init__() self._layer = nn.Linear(dim_in, dim_out) self._hyper_bias = nn.Linear(1, dim_out, bias=False) self._hyper...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dyn...
Justin-Tan/ffjord
ConcatSquashLinear
false
695
[ "MIT" ]
0
2caf8a4ff84933672fe0d94255d665b3dd7a6791
https://github.com/Justin-Tan/ffjord/tree/2caf8a4ff84933672fe0d94255d665b3dd7a6791
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self._layer = nn.Linear(dim_in, dim_out) self._hyper_bias = nn.Linear(1, dim_out, bias=False) self._hyper_gate = nn.Linear(1, dim_out) de...
GateAddNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.fun...
JustinNeumann/pytorch-forecasting
GateAddNorm
false
696
[ "MIT" ]
0
4f6e449cb3788b856e66c4283398a5db201aa6ff
https://github.com/JustinNeumann/pytorch-forecasting/tree/4f6e449cb3788b856e66c4283398a5db201aa6ff
import torch import torch.nn.functional as F import torch.nn as nn class TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_...
BlendConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class BlendConv2d(nn.Module): def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False, **unused_kwargs): super(BlendConv2d, self).__init__() module = nn.ConvTranspose2d if...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dyn...
Justin-Tan/ffjord
BlendConv2d
false
697
[ "MIT" ]
0
2caf8a4ff84933672fe0d94255d665b3dd7a6791
https://github.com/Justin-Tan/ffjord/tree/2caf8a4ff84933672fe0d94255d665b3dd7a6791
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False, **unused_kwargs): super().__init__() module = nn.ConvTranspose2d if transpose else nn.Conv...
HyperConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data def get_activation_name(activation): """Given a string or a `torch.nn.modules.activation` return the name of the activation.""" if isinstance(activation, str): return activation mapper = {nn.LeakyReLU: 'leak...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F import torch.utils.data as...
Justin-Tan/ffjord
HyperConv2d
false
698
[ "MIT" ]
0
2caf8a4ff84933672fe0d94255d665b3dd7a6791
https://github.com/Justin-Tan/ffjord/tree/2caf8a4ff84933672fe0d94255d665b3dd7a6791
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data def get_activation_name(activation): """Given a string or a `torch.nn.modules.activation` return the name of the activation.""" if isinstance(activation, str): return activation mapper = {nn.LeakyReLU: 'leak...
GatedLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class GatedLinear(nn.Module): def __init__(self, in_features, out_features): super(GatedLinear, self).__init__() self.layer_f = nn.Linear(in_features, out_features) self.layer_g = nn.Linear(in_features, out_features) def forw...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dyn...
Justin-Tan/ffjord
GatedLinear
false
699
[ "MIT" ]
0
2caf8a4ff84933672fe0d94255d665b3dd7a6791
https://github.com/Justin-Tan/ffjord/tree/2caf8a4ff84933672fe0d94255d665b3dd7a6791
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, in_features, out_features): super().__init__() self.layer_f = nn.Linear(in_features, out_features) self.layer_g = nn.Linear(in_features, out_features) def forward(self, x): f...
ConcatConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class ConcatConv2d(nn.Module): def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False): super(ConcatConv2d, self).__init__() module = nn.ConvTranspose2d if transpose else...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dyn...
Justin-Tan/ffjord
ConcatConv2d
false
700
[ "MIT" ]
0
2caf8a4ff84933672fe0d94255d665b3dd7a6791
https://github.com/Justin-Tan/ffjord/tree/2caf8a4ff84933672fe0d94255d665b3dd7a6791
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False): super().__init__() module = nn.ConvTranspose2d if transpose else nn.Conv2d self._...
NeuralNet1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class NeuralNet1(nn.Module): def __init__(self, input_size, hidden_size): super(NeuralNet1, self).__init__() self.linear1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.linear2 = nn.Linear(hidden_size, 1) def forward(self, x...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
KOPFYF/pytorchTutorial
NeuralNet1
false
701
[ "MIT" ]
0
4ed7642049a0fba46edd505a23ffcea9d8e03679
https://github.com/KOPFYF/pytorchTutorial/tree/4ed7642049a0fba46edd505a23ffcea9d8e03679
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_size, hidden_size): super().__init__() self.linear1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.linear2 = nn.Linear(hidden_size, 1) def forward(self, x): out = self...
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils.data class DiceLoss(nn.Module): """DICE loss. """ def __init__(self, size_average=True, reduce=True, smooth=100.0, power=1): super(DiceLoss, self).__init__() self.smooth = smooth self.reduce = reduce self.power = power ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C....
KeeratKG/pytorch_connectomics
DiceLoss
false
702
[ "MIT" ]
0
ba168da6f077ccfbeffcd8936df90ba413895086
https://github.com/KeeratKG/pytorch_connectomics/tree/ba168da6f077ccfbeffcd8936df90ba413895086
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """DICE loss. """ def __init__(self, size_average=True, reduce=True, smooth=100.0, power=1): super().__init__() self.smooth = smooth self.reduce = reduce self.power = power def dice_los...
BasicBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data class BasicBlock(nn.Module): expansion = 1 def __init__(self, dim): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False) self.bn1 = nn.GroupNorm(2, dim, eps=0.0001) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Justin-Tan/ffjord
BasicBlock
false
703
[ "MIT" ]
0
2caf8a4ff84933672fe0d94255d665b3dd7a6791
https://github.com/Justin-Tan/ffjord/tree/2caf8a4ff84933672fe0d94255d665b3dd7a6791
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): expansion = 1 def __init__(self, dim): super().__init__() self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False) self.bn1 = nn.GroupNorm(2, dim, eps=0.0001) self.relu = nn.ReLU(i...
FactorizedReduce
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.utils.data import torch.utils from matplotlib import cm as cm from torch.nn.parallel import * from torchvision.models import * from torchvision.datasets import * def get_norm_layer(norm, C): if norm in [None, '', 'none']: norm_layer = nn.Identity() elif ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
CrispyHarder/ppuda
FactorizedReduce
false
704
[ "MIT" ]
0
15950ba297188163eaadd8ab69268ee7f6ffcf2a
https://github.com/CrispyHarder/ppuda/tree/15950ba297188163eaadd8ab69268ee7f6ffcf2a
import torch import torch.nn as nn import torch.utils.data import torch.utils from matplotlib import cm as cm from torch.nn.parallel import * from torchvision.models import * from torchvision.datasets import * def get_norm_layer(norm, C): if norm in [None, '', 'none']: norm_layer = nn.Identity() elif ...
AddNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F import torch.nn as nn assert_size_stride = torc...
JustinNeumann/pytorch-forecasting
AddNorm
false
705
[ "MIT" ]
0
4f6e449cb3788b856e66c4283398a5db201aa6ff
https://github.com/JustinNeumann/pytorch-forecasting/tree/4f6e449cb3788b856e66c4283398a5db201aa6ff
import torch import torch.nn.functional as F import torch.nn as nn class TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_...
VOC_VGG16_DeepLabV2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class FCReLUDrop(nn.Sequential): def __init__(self, in_ch, out_ch, kernel_size, dilation, padding, layer_idx, branch_idx): super(FCReLUDrop, self).__init__() self.add_module(f'fc{layer_idx}_{branch_idx}', nn.Conv2d(in_ch, out_ch, kernel_size, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
HAL-42/DeepLabV2YQ
VOC_VGG16_DeepLabV2
false
706
[ "Apache-2.0" ]
0
96bfcf1055da7adeb4a7c1ed841f6ec29957be59
https://github.com/HAL-42/DeepLabV2YQ/tree/96bfcf1055da7adeb4a7c1ed841f6ec29957be59
import torch from torch import nn class FCReLUDrop(nn.Sequential): def __init__(self, in_ch, out_ch, kernel_size, dilation, padding, layer_idx, branch_idx): super().__init__() self.add_module(f'fc{layer_idx}_{branch_idx}', nn.Conv2d(in_ch, out_ch, kernel_size, stride=1, paddin...
BatchLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn from collections import OrderedDict class MetaModule(nn.Module): """ Base class for PyTorch meta-learning modules. These modules accept an additional argument `params` in their `forward` method. Notes ----- Objects inherited from `MetaModule` are fully compa...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
KIMGEONUNG/multi-memory-siren
BatchLinear
false
707
[ "MIT" ]
0
b372e1b9abe2b3fb502d808eb3a47c2ad287ca3b
https://github.com/KIMGEONUNG/multi-memory-siren/tree/b372e1b9abe2b3fb502d808eb3a47c2ad287ca3b
import torch import torch.nn as nn from collections import OrderedDict class MetaModule(nn.Module): """ Base class for PyTorch meta-learning modules. These modules accept an additional argument `params` in their `forward` method. Notes ----- Objects inherited from `MetaModule` are fully compa...
BinaryReg
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils.data class BinaryReg(nn.Module): """Regularization for encouraging the outputs to be binary. """ def __init__(self, alpha=1.0): super().__init__() self.alpha = alpha def forward(self, pred): diff = pred - 0.5 diff ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
KeeratKG/pytorch_connectomics
BinaryReg
false
708
[ "MIT" ]
0
ba168da6f077ccfbeffcd8936df90ba413895086
https://github.com/KeeratKG/pytorch_connectomics/tree/ba168da6f077ccfbeffcd8936df90ba413895086
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """Regularization for encouraging the outputs to be binary. """ def __init__(self, alpha=1.0): super().__init__() self.alpha = alpha def forward(self, pred): diff = pred - 0.5 diff = to...
JaccardLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils.data class JaccardLoss(nn.Module): """Jaccard loss. """ def __init__(self, size_average=True, reduce=True, smooth=1.0): super(JaccardLoss, self).__init__() self.smooth = smooth self.reduce = reduce def jaccard_loss(self, p...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C....
KeeratKG/pytorch_connectomics
JaccardLoss
false
709
[ "MIT" ]
0
ba168da6f077ccfbeffcd8936df90ba413895086
https://github.com/KeeratKG/pytorch_connectomics/tree/ba168da6f077ccfbeffcd8936df90ba413895086
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """Jaccard loss. """ def __init__(self, size_average=True, reduce=True, smooth=1.0): super().__init__() self.smooth = smooth self.reduce = reduce def jaccard_loss(self, pred, target): l...
L2Norm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim from torch.nn import functional as F class L2Norm(nn.Module): def __init__(self): super().__init__() def forward(self, x): assert x.dim( ) == 2, 'the input tensor of L2Norm must be the shape of [B, C]'...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import...
Khanhnn00/image-retrieval
L2Norm
false
710
[ "MIT" ]
0
7c6c5fe9ec5fd6cb0f0906027fd80787e2ad1cf8
https://github.com/Khanhnn00/image-retrieval/tree/7c6c5fe9ec5fd6cb0f0906027fd80787e2ad1cf8
import torch import torch.nn as nn import torch.nn.parallel import torch.optim from torch.nn import functional as F class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): assert x.dim( ) == 2, 'the input tensor of L2Norm must be the shape of [B, C]' ...
MetaBilinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict class MetaModule(nn.Module): """ Base class for PyTorch meta-learning modules. These modules accept an additional argument `params` in their `forward` method. Notes ----- Objects inherited f...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterp...
KIMGEONUNG/multi-memory-siren
MetaBilinear
false
711
[ "MIT" ]
0
b372e1b9abe2b3fb502d808eb3a47c2ad287ca3b
https://github.com/KIMGEONUNG/multi-memory-siren/tree/b372e1b9abe2b3fb502d808eb3a47c2ad287ca3b
import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict class MetaModule(nn.Module): """ Base class for PyTorch meta-learning modules. These modules accept an additional argument `params` in their `forward` method. Notes ----- Objects inherited f...
SimpleGCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import Parameter import torch.nn import torch.autograd class SimpleGCN(nn.Module): """A simple graph convolution layer, similar to the one defined in Kipf et al. https://arxiv.org/abs/1609.02907 """ ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn from torch.nn.parameter import Parameter from ...
Kh4L/kaolin
SimpleGCN
false
712
[ "ECL-2.0", "Apache-2.0" ]
0
83002fedc67e1c9112a8f834ffb4f8a890e6042a
https://github.com/Kh4L/kaolin/tree/83002fedc67e1c9112a8f834ffb4f8a890e6042a
import math import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import Parameter import torch.nn import torch.autograd class Model(nn.Module): """A simple graph convolution layer, similar to the one defined in Kipf et al. https://arxiv.org/abs/1609.02907 """ def...
GatedResidualNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.fun...
JustinNeumann/pytorch-forecasting
GatedResidualNetwork
false
713
[ "MIT" ]
0
4f6e449cb3788b856e66c4283398a5db201aa6ff
https://github.com/JustinNeumann/pytorch-forecasting/tree/4f6e449cb3788b856e66c4283398a5db201aa6ff
import torch import torch.nn.functional as F import torch.nn as nn class TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_...
L1_log
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn.functional as F import torch.nn as nn class L1_log(nn.Module): def __init__(self): super(L1_log, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
Khoronus/MonoDepth-FPN-PyTorch
L1_log
false
714
[ "MIT" ]
0
6e41e297723d1490c537e04afff905c61d6f0ff8
https://github.com/Khoronus/MonoDepth-FPN-PyTorch/tree/6e41e297723d1490c537e04afff905c61d6f0ff8
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinea...
Conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Conv(nn.Module): def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False, relu=True): super(Conv, self).__init__() self.inp_dim = inp_dim self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride, padding=(kerne...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
Jvictor97/AWR-Adaptive-Weighting-Regression
Conv
false
715
[ "MIT" ]
0
2c29f8ac3d824edfff07465232ffed8e4d837ebf
https://github.com/Jvictor97/AWR-Adaptive-Weighting-Regression/tree/2c29f8ac3d824edfff07465232ffed8e4d837ebf
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False, relu=True): super().__init__() self.inp_dim = inp_dim self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride, padding=(kernel_size - ...
MetaLayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict class MetaModule(nn.Module): """ Base class for PyTorch meta-learning modules. These modules accept an additional argument `params` in their `forward` method. Notes ----- Objects inherited f...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_...
KIMGEONUNG/multi-memory-siren
MetaLayerNorm
false
716
[ "MIT" ]
0
b372e1b9abe2b3fb502d808eb3a47c2ad287ca3b
https://github.com/KIMGEONUNG/multi-memory-siren/tree/b372e1b9abe2b3fb502d808eb3a47c2ad287ca3b
import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict class MetaModule(nn.Module): """ Base class for PyTorch meta-learning modules. These modules accept an additional argument `params` in their `forward` method. Notes ----- Objects inherited f...
RMSE_log
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn.functional as F import torch.nn as nn class RMSE_log(nn.Module): def __init__(self): super(RMSE_log, self).__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
Khoronus/MonoDepth-FPN-PyTorch
RMSE_log
false
717
[ "MIT" ]
0
6e41e297723d1490c537e04afff905c61d6f0ff8
https://github.com/Khoronus/MonoDepth-FPN-PyTorch/tree/6e41e297723d1490c537e04afff905c61d6f0ff8
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, fake, real): if not fake.shape == real.shape: _, _, H, W = real.shape fake = F.upsample(fake, size=(H, W), mode='bilinea...
NormalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class NormalLoss(nn.Module): def __init__(self): super(NormalLoss, self).__init__() def forward(self, grad_fake, grad_real): prod = (grad_fake[:, :, None, :] @ grad_real[:, :, :, None]).squeeze(-1 ).squeeze(-1) fake_norm = torch.sqrt(tor...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
Khoronus/MonoDepth-FPN-PyTorch
NormalLoss
false
719
[ "MIT" ]
0
6e41e297723d1490c537e04afff905c61d6f0ff8
https://github.com/Khoronus/MonoDepth-FPN-PyTorch/tree/6e41e297723d1490c537e04afff905c61d6f0ff8
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, grad_fake, grad_real): prod = (grad_fake[:, :, None, :] @ grad_real[:, :, :, None]).squeeze(-1 ).squeeze(-1) fake_norm = torch.sqrt(torch.sum(grad_fake ** 2...