python_code stringlengths 0 1.02M | repo_name stringlengths 9 48 | file_path stringlengths 5 114 |
|---|---|---|
"""
Poison images by adding a mask
"""
from typing import Tuple
from dataclasses import replace
import numpy as np
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.compiler ... | ffcv-main | ffcv/transforms/poisoning.py |
"""
Cutout augmentation (https://arxiv.org/abs/1708.04552)
"""
import numpy as np
from typing import Callable, Optional, Tuple
from dataclasses import replace
from ffcv.pipeline.compiler import Compiler
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.s... | ffcv-main | ffcv/transforms/cutout.py |
"""
Image normalization
"""
from collections.abc import Sequence
from typing import Tuple
import numpy as np
import torch as ch
from numpy import dtype
from numpy.random import rand
from dataclasses import replace
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from... | ffcv-main | ffcv/transforms/normalize.py |
from .cutout import Cutout
from .flip import RandomHorizontalFlip
from .ops import ToTensor, ToDevice, ToTorchImage, Convert, View
from .common import Squeeze
from .random_resized_crop import RandomResizedCrop
from .poisoning import Poison
from .replace_label import ReplaceLabel
from .normalize import NormalizeImage
fr... | ffcv-main | ffcv/transforms/__init__.py |
"""
General operations:
- Collation
- Conversion to PyTorch Tensor
- Change device of Tensor
"""
import torch as ch
import numpy as np
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ... | ffcv-main | ffcv/transforms/ops.py |
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
from dataclasses import replace
class Squeeze(Operation):
"""Remove given dimensions of input of size 1.
Operates on tensors.
P... | ffcv-main | ffcv/transforms/common.py |
"""
Random horizontal flip
"""
from dataclasses import replace
from numpy.random import rand
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.compiler import Compiler
class ... | ffcv-main | ffcv/transforms/flip.py |
"""
Wrapper for a torch.nn.Module
"""
import torch as ch
from numpy.random import permutation, rand
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
class ModuleWrapper(Operation):
"""T... | ffcv-main | ffcv/transforms/module.py |
"""
Random resized crop, similar to torchvision.transforms.RandomResizedCrop
"""
from dataclasses import replace
from .utils import fast_crop
import numpy as np
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline... | ffcv-main | ffcv/transforms/random_resized_crop.py |
"""
Replace label
"""
from typing import Tuple
import numpy as np
from dataclasses import replace
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.compiler import Compiler
... | ffcv-main | ffcv/transforms/replace_label.py |
ffcv-main | ffcv/transforms/utils/__init__.py | |
import ctypes
from numba import njit
import numpy as np
from ...libffcv import ctypes_resize
@njit(inline='always')
def resize_crop(source, start_row, end_row, start_col, end_col, destination):
ctypes_resize(0,
source.ctypes.data,
source.shape[0], source.shape[1],
... | ffcv-main | ffcv/transforms/utils/fast_crop.py |
from .loader import Loader, OrderOption
__all__ = ['Loader', 'OrderOption'] | ffcv-main | ffcv/loader/__init__.py |
from collections import defaultdict
from threading import Thread, Event
from queue import Queue, Full
from contextlib import nullcontext
from typing import Sequence, TYPE_CHECKING
import torch as ch
from ..traversal_order.quasi_random import QuasiRandom
from ..utils import chunks
from ..pipeline.compiler import Compi... | ffcv-main | ffcv/loader/epoch_iterator.py |
"""
FFCV loader
"""
import enum
from os import environ
import ast
from multiprocessing import cpu_count
from re import sub
from typing import Any, Callable, Mapping, Sequence, Type, Union, Literal
from collections import defaultdict
from enum import Enum, unique, auto
from ffcv.fields.base import Field
import torch as... | ffcv-main | ffcv/loader/loader.py |
from abc import ABCMeta, abstractmethod
from contextlib import AbstractContextManager
class Benchmark(AbstractContextManager, metaclass=ABCMeta):
def __init__(self, **kwargs):
pass
@abstractmethod
def run(self):
raise NotImplemented() | ffcv-main | ffcv/benchmarks/benchmark.py |
from itertools import product
from time import time
from collections import defaultdict
from contextlib import redirect_stderr
import pathlib
import numpy as np
from tqdm import tqdm
from .benchmark import Benchmark
ALL_SUITES = {}
class FakeSink(object):
def write(self, *args):
pass
def writelines(... | ffcv-main | ffcv/benchmarks/decorator.py |
ffcv-main | ffcv/benchmarks/__init__.py | |
import argparse
import pandas as pd
from terminaltables import SingleTable
from .suites import *
from .decorator import run_all
parser = argparse.ArgumentParser(description='Run ffcv micro benchmarks')
parser.add_argument('--runs', '-n', type=int,
help='Use the median of --runs runs of each test'... | ffcv-main | ffcv/benchmarks/__main__.py |
from os import path
import numpy as np
import cv2
from numpy.core.numeric import full
from ..decorator import benchmark
from ..benchmark import Benchmark
from ...pipeline.compiler import Compiler
from ...libffcv import imdecode
@benchmark({
'n': [500],
'source_image': ['../../../test_data/pig.png'],
'i... | ffcv-main | ffcv/benchmarks/suites/jpeg_decode.py |
import logging
import os
from tempfile import NamedTemporaryFile
from time import sleep, time
import numpy as np
from assertpy import assert_that
from ffcv.fields import BytesField, IntField, RGBImageField
from ffcv.memory_managers import OSCacheManager
from ffcv.pipeline.compiler import Compiler
from ffcv.reader impo... | ffcv-main | ffcv/benchmarks/suites/image_read.py |
from os import listdir
from os.path import dirname
__all__ = [i[:-3] for i in listdir(dirname(__file__)) if not i.startswith('__') and i.endswith('.py')]
| ffcv-main | ffcv/benchmarks/suites/__init__.py |
import os
from tempfile import NamedTemporaryFile
from time import sleep, time
import numpy as np
from tqdm import tqdm
from assertpy import assert_that
from torch.utils.data import Dataset
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import BytesField, IntField
from ffcv.pipe... | ffcv-main | ffcv/benchmarks/suites/memory_read.py |
from abc import ABCMeta, abstractmethod
from dataclasses import replace
from typing import Optional, Callable, TYPE_CHECKING, Tuple, Type
import cv2
import numpy as np
from numba.typed import Dict
from PIL.Image import Image
from .base import Field, ARG_TYPE
from ..pipeline.operation import Operation
from ..pipeline.... | ffcv-main | ffcv/fields/rgb_image.py |
from .basics import FloatDecoder, IntDecoder
from .ndarray import NDArrayDecoder
from .rgb_image import RandomResizedCropRGBImageDecoder, CenterCropRGBImageDecoder, SimpleRGBImageDecoder
from .bytes import BytesDecoder
__all__ = ['FloatDecoder', 'IntDecoder', 'NDArrayDecoder', 'RandomResizedCropRGBImageDecoder',
... | ffcv-main | ffcv/fields/decoders.py |
from .base import Field
from .basics import FloatField, IntField
from .rgb_image import RGBImageField
from .bytes import BytesField
from .ndarray import NDArrayField
from .json import JSONField
__all__ = ['Field', 'BytesField', 'IntField', 'FloatField', 'RGBImageField',
'NDArrayField', 'JSONField'] | ffcv-main | ffcv/fields/__init__.py |
from typing import Callable, TYPE_CHECKING, Tuple, Type
import json
from dataclasses import replace
import numpy as np
from .base import Field, ARG_TYPE
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.compiler import Compiler
from ..pipeline.allocation_query import Alloca... | ffcv-main | ffcv/fields/ndarray.py |
from typing import Callable, TYPE_CHECKING, Tuple, Type
from dataclasses import replace
import numpy as np
from .base import Field, ARG_TYPE
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.allocation_query import AllocationQuery
if TYPE_CHECKING:
from ..memory_manage... | ffcv-main | ffcv/fields/basics.py |
from typing import Callable, TYPE_CHECKING, Tuple, Type
from dataclasses import replace
import numpy as np
from .base import Field, ARG_TYPE
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.compiler import Compiler
from ..pipeline.allocation_query import AllocationQuery
fr... | ffcv-main | ffcv/fields/bytes.py |
import json
import torch as ch
import numpy as np
from .bytes import BytesField
ENCODING = 'utf8'
SEPARATOR = '\0' # Null byte
class JSONField(BytesField):
"""A subclass of :class:`~ffcv.fields.BytesField` that encodes JSON data.
The writer expects to be passed a dict that is compatible with the JSON spec... | ffcv-main | ffcv/fields/json.py |
from __future__ import annotations
from typing import Type
import numpy as np
from abc import ABC, abstractmethod
from ..pipeline.operation import Operation
ARG_TYPE = np.dtype([('', '<u1', 1024)])
class Field(ABC):
"""
Abstract Base Class for implementing fields (e.g., images, integers).
Each dataset e... | ffcv-main | ffcv/fields/base.py |
"""
Example of defining a custom (image) transform using FFCV.
For tutorial, see https://docs.ffcv.io/ffcv_examples/transform_with_inds.html.
"""
from dataclasses import replace
import time
from typing import Callable, Optional, Tuple
import numpy as np
import torchvision
from ffcv.fields import IntField, RGBImageFie... | ffcv-main | examples/docs_examples/transform_with_inds.py |
"""
Example of using FFCV to speed up large scale linear regression.
For tutorial, see https://docs.ffcv.io/ffcv_examples/linear_regression.html.
"""
from tqdm import tqdm
import time
import numpy as np
import pickle as pkl
import torch as ch
from torch.utils.data import TensorDataset, DataLoader
from ffcv.fields impo... | ffcv-main | examples/docs_examples/linear_regression.py |
"""
Example of defining a custom (image) transform using FFCV.
For tutorial, see https://docs.ffcv.io/ffcv_examples/custom_transforms.html.
"""
import time
import numpy as np
import torchvision
from ffcv.fields import IntField, RGBImageField
from ffcv.fields.decoders import SimpleRGBImageDecoder
from ffcv.loader impo... | ffcv-main | examples/docs_examples/custom_transform.py |
"""
Fast training script for CIFAR-10 using FFCV.
For tutorial, see https://docs.ffcv.io/ffcv_examples/cifar10.html.
First, from the same directory, run:
`python write_datasets.py --data.train_dataset [TRAIN_PATH] \
--data.val_dataset [VAL_PATH]`
to generate the FFCV-formatted versi... | ffcv-main | examples/cifar/train_cifar.py |
from argparse import ArgumentParser
from typing import List
import time
import numpy as np
from tqdm import tqdm
import torch as ch
import torchvision
from fastargs import get_current_config
from fastargs.decorators import param
from fastargs import Param, Section
from fastargs.validation import And, OneOf
from ffcv... | ffcv-main | examples/cifar/write_datasets.py |
import random
import torch
import torch.linalg
import numpy as np
class BlackHole(object):
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, name):
return self
def seed_all(seed):
torch.backends.cudnn.determinist... | binding-ddg-predictor-main | utils/misc.py |
import warnings
import torch
from Bio import BiopythonWarning
from Bio.PDB import Selection
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import three_to_one, three_to_index, is_aa
NON_STANDARD_SUBSTITUTIONS = {
'2AS':'ASP', '3AH':'HIS', '5HP':'GLU', 'ACL':'ARG', 'AGM':'ARG', 'AIB':'ALA', 'ALM'... | binding-ddg-predictor-main | utils/protein.py |
import math
import torch
from torch.utils.data._utils.collate import default_collate
from .protein import ATOM_CA, parse_pdb
class PaddingCollate(object):
def __init__(self, length_ref_key='mutation_mask', pad_values={'aa': 20, 'pos14': float('999'), 'icode': ' ', 'chain_id': '-'}, donot_pad={'foldx'}, eight=Fa... | binding-ddg-predictor-main | utils/data.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.residue import PerResidueEncoder
from models.attention import GAEncoder
from models.common import get_pos_CB, construct_3d_basis
from utils.protein import ATOM_N, ATOM_CA, ATOM_C
class ComplexEncoder(nn.Module):
def __init__(self, cf... | binding-ddg-predictor-main | models/predictor.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .common import mask_zero, global_to_local, local_to_global, normalize_vector
def _alpha_from_logits(logits, mask, inf=1e5):
"""
Args:
logits: Logit matrices, (N, L_i, L_j, num_heads).
mask: Masks, (N,... | binding-ddg-predictor-main | models/attention.py |
import torch
import torch.nn as nn
from models.common import PositionalEncoding, construct_3d_basis, global_to_local
class PerResidueEncoder(nn.Module):
def __init__(self, feat_dim):
super().__init__()
self.aatype_embed = nn.Embedding(21, feat_dim)
self.torsion_embed = PositionalEncoding... | binding-ddg-predictor-main | models/residue.py |
import torch
import torch.nn as nn
from utils.protein import ATOM_CA, ATOM_CB
def get_pos_CB(pos14, atom_mask):
"""
Args:
pos14: (N, L, 14, 3)
atom_mask: (N, L, 14)
"""
N, L = pos14.shape[:2]
mask_CB = atom_mask[:, :, ATOM_CB] # (N, L)
mask_CB = mask_CB[:, :, None].expand(N... | binding-ddg-predictor-main | models/common.py |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import argparse
import torch
from models.predictor import DDGPredictor
from utils.misc import *
from utils.data import *
from utils.protein import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argume... | binding-ddg-predictor-main | scripts/predict.py |
from setuptools import setup, find_packages
setup(
name = 'vit-pytorch',
packages = find_packages(exclude=['examples']),
version = '1.4.5 ',
license='MIT',
description = 'Vision Transformer (ViT) - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrai... | vit-pytorch-main | setup.py |
import torch
from vit_pytorch import ViT
def test():
v = ViT(
image_size = 256,
patch_size = 32,
num_classes = 1000,
dim = 1024,
depth = 6,
heads = 16,
mlp_dim = 2048,
dropout = 0.1,
emb_dropout = 0.1
)
img = torch.randn(1, 3, 256, 25... | vit-pytorch-main | tests/test.py |
from functools import partial
import torch
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange, Reduce
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, length = 1):
return... | vit-pytorch-main | vit_pytorch/max_vit.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# feedforward
class FeedForward(nn.Module):
d... | vit-pytorch-main | vit_pytorch/cross_vit.py |
from functools import partial
import torch
from torch import nn, einsum
from einops import rearrange
from einops.layers.torch import Rearrange, Reduce
# helpers
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else ((val,) * depth)
# classes
class LayerNorm(nn.Module):
def __init__(self, di... | vit-pytorch-main | vit_pytorch/nest.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pair(t):
return t if isinstance(t, tuple) ... | vit-pytorch-main | vit_pytorch/mp3.py |
import torch
from torch import nn, einsum
from einops import rearrange
from einops.layers.torch import Rearrange, Reduce
import torch.nn.functional as F
# helpers
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else ((val,) * length)
# cross embed layer
class CrossEmbedLayer(nn.Module):
... | vit-pytorch-main | vit_pytorch/crossformer.py |
import torch
import torch.nn as nn
from einops import rearrange
from einops.layers.torch import Reduce
# helpers
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.SiLU()
)
def conv_nxn_bn(inp, oup, kernel_size=3, stride... | vit-pytorch-main | vit_pytorch/mobile_vit.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Laye... | vit-pytorch-main | vit_pytorch/deepvit.py |
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
def pair(t):
return t if isinstance(t, tuple) else (t, t)
class ViT(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, transformer, pool = 'cls', channels = 3):
sup... | vit-pytorch-main | vit_pytorch/efficient.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import repeat
from vit_pytorch.vit import Transformer
class MAE(nn.Module):
def __init__(
self,
*,
encoder,
decoder_dim,
masking_ratio = 0.75,
decoder_depth = 1,
decoder_heads = 8,... | vit-pytorch-main | vit_pytorch/mae.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helper methods
def group_dict_by_key(cond, d):
return_val = [dict(), dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not ma... | vit-pytorch-main | vit_pytorch/cvt.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
def posemb_sincos_2d(h, w, dim, temperature: int = 10000, dtype = torch.float32):
y, x = torch.meshgrid... | vit-pytorch-main | vit_pytorch/simple_vit_with_qk_norm.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helper methods
def group_dict_by_key(cond, d):
return_val = [dict(), dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not ma... | vit-pytorch-main | vit_pytorch/twins_svt.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# CCT Models
__all__ = ['cct... | vit-pytorch-main | vit_pytorch/cct.py |
from functools import wraps
import torch
from torch import nn
from vit_pytorch.vit import Attention
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module, type)]
class Recorder(nn.Module):
def __init__(self, vit, device = None):
super().__init__()
... | vit-pytorch-main | vit_pytorch/recorder.py |
from math import ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, l = 3):... | vit-pytorch-main | vit_pytorch/levit.py |
from math import sqrt
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class FeedForward(nn.Module):
def __init__(self, dim, hidden_d... | vit-pytorch-main | vit_pytorch/vit_for_small_dataset.py |
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
def posemb_sincos_3d(patches, temperature = 10000, dtype = torch.float32):
_, f, h, w, dim, device, dty... | vit-pytorch-main | vit_pytorch/simple_vit_3d.py |
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from vit_pytorch.vit import ViT
from vit_pytorch.simple_vit import SimpleViT
from vit_pytorch.mae import MAE
f... | vit-pytorch-main | vit_pytorch/__init__.py |
from collections import namedtuple
from packaging import version
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange
from einops.layers.torch import Rearrange
# constants
Config = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
... | vit-pytorch-main | vit_pytorch/simple_flash_attn_vit.py |
import torch
from torch import nn, einsum
from einops import rearrange
from einops.layers.torch import Rearrange, Reduce
import torch.nn.functional as F
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, length = 1):
return val if ... | vit-pytorch-main | vit_pytorch/regionvit.py |
import torch
from torch import nn
def exists(val):
return val is not None
def identity(t):
return t
def clone_and_detach(t):
return t.clone().detach()
def apply_tuple_or_single(fn, val):
if isinstance(val, tuple):
return tuple(map(fn, val))
return fn(val)
class Extractor(nn.Module):
... | vit-pytorch-main | vit_pytorch/extractor.py |
from functools import partial
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange, Reduce
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pair(t):
return t if isinstance(t, tuple) el... | vit-pytorch-main | vit_pytorch/scalable_vit.py |
import torch
from torch import nn
from einops import rearrange
from einops.layers.torch import Rearrange
# helpers
def posemb_sincos_1d(patches, temperature = 10000, dtype = torch.float32):
_, n, dim, device, dtype = *patches.shape, patches.device, patches.dtype
n = torch.arange(n, device = device)
asse... | vit-pytorch-main | vit_pytorch/simple_vit_1d.py |
import torch
from torch import nn
from einops import rearrange
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
def posemb_sincos_2d(patches, temperature = 10000, dtype = torch.float32):
_, h, w, dim, device, dtype = *patches.shape, patches.device... | vit-pytorch-main | vit_pytorch/simple_vit_with_patch_dropout.py |
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
... | vit-pytorch-main | vit_pytorch/vit.py |
from math import sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **k... | vit-pytorch-main | vit_pytorch/local_vit.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# controlling freezing of layers
def set_module_requi... | vit-pytorch-main | vit_pytorch/learnable_memory_vit.py |
import torch
from torch import nn
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class FeedForward(nn.Module):
def __init__(self, dim, hidden_di... | vit-pytorch-main | vit_pytorch/vivit.py |
import torch
from torch import nn
from einops import rearrange
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
def posemb_sincos_2d(h, w, dim, temperature: int = 10000, dtype = torch.float32):
y, x = torch.meshgrid(torch.arange(h), torch.arange(w... | vit-pytorch-main | vit_pytorch/simple_vit.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# CCT Models
__all__ = ['cct... | vit-pytorch-main | vit_pytorch/cct_3d.py |
from functools import partial
import torch
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange, Reduce
# helpers
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else ((val,) * length)
# helper classes
class ChanLayerNorm(nn.Module):... | vit-pytorch-main | vit_pytorch/sep_vit.py |
import torch
import torch.nn.functional as F
from torch import nn
from vit_pytorch.vit import ViT
from vit_pytorch.t2t import T2TViT
from vit_pytorch.efficient import ViT as EfficientViT
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
# classes
class DistillMixin:
def... | vit-pytorch-main | vit_pytorch/distill.py |
import math
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
# helpers
def exists(val):
return val is not None
def prob_mask_like(t, prob):
batch, seq_length, _ = t.shape
return torch.zeros((batch, seq_length)).float().uniform_(0, 1) < prob
... | vit-pytorch-main | vit_pytorch/mpp.py |
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class PatchDropout(nn.Module):
def __init__(self, prob):
super().__init__()
assert 0 <= prob < 1.
... | vit-pytorch-main | vit_pytorch/vit_with_patch_dropout.py |
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class Parallel(nn.Module):
def __init__(self, *fns):
super().__init__()
self.fns = nn.ModuleList(fns... | vit-pytorch-main | vit_pytorch/parallel_vit.py |
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# ... | vit-pytorch-main | vit_pytorch/ats_vit.py |
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange, Reduce
# helpers
def exists(val):
return val is not None
def default(val ,d):
return val if exists(val) else d
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# patch merger clas... | vit-pytorch-main | vit_pytorch/vit_with_patch_merger.py |
import math
import torch
from torch import nn
from vit_pytorch.vit import Transformer
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def conv_output_size(image_size, kernel_size, stride, padding):
return int(((image_size - ke... | vit-pytorch-main | vit_pytorch/t2t.py |
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
... | vit-pytorch-main | vit_pytorch/vit_3d.py |
from functools import partial
from typing import List, Union
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torch.nn.utils.rnn import pad_sequence as orig_pad_sequence
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return... | vit-pytorch-main | vit_pytorch/na_vit.py |
import copy
import random
from functools import wraps, partial
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms as T
# helper functions
def exists(val):
return val is not None
def default(val, default):
return val if exists(val) else default
def singleto... | vit-pytorch-main | vit_pytorch/dino.py |
import torch
from torch import nn
from einops import rearrange, repeat, pack, unpack
from einops.layers.torch import Rearrange
# classes
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Layernorm(dim),
... | vit-pytorch-main | vit_pytorch/vit_1d.py |
from math import sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def cast_tuple(val, num):
return val if isinstance(val, tuple) else (val,) * num
def conv_output_size(image_size, kernel_size,... | vit-pytorch-main | vit_pytorch/pit.py |
from math import sqrt, pi, log
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# rotary embeddings
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
... | vit-pytorch-main | vit_pytorch/rvt.py |
from random import randrange
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def dropout_layers(layers, dropout):
if dropout == 0:
return layers
... | vit-pytorch-main | vit_pytorch/cait.py |
import copy
import random
from functools import wraps, partial
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torchvision import transforms as T
from einops import rearrange, reduce, repeat
# helper functions
def exists(val):
return val is not None
def default(val, default):
... | vit-pytorch-main | vit_pytorch/es_vit.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import repeat
class SimMIM(nn.Module):
def __init__(
self,
*,
encoder,
masking_ratio = 0.5
):
super().__init__()
assert masking_ratio > 0 and masking_ratio < 1, 'masking ratio must be k... | vit-pytorch-main | vit_pytorch/simmim.py |
from setuptools import setup, find_packages
setup(
name = 'aoa_pytorch',
packages = find_packages(exclude=['examples']),
version = '0.0.2',
license='MIT',
description = 'Attention on Attention - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/... | AoA-pytorch-main | setup.py |
from aoa_pytorch.aoa_pytorch import AttentionOnAttention
AoA = AttentionOnAttention
| AoA-pytorch-main | aoa_pytorch/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class AttentionOnAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head... | AoA-pytorch-main | aoa_pytorch/aoa_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'RIN-pytorch',
packages = find_packages(exclude=[]),
version = '0.7.9',
license='MIT',
description = 'RIN - Recurrent Interface Network - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'tex... | recurrent-interface-network-pytorch-main | setup.py |
from rin_pytorch.rin_pytorch import GaussianDiffusion, RIN, Trainer
| recurrent-interface-network-pytorch-main | rin_pytorch/__init__.py |
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
# constants
FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_me... | recurrent-interface-network-pytorch-main | rin_pytorch/attend.py |
import math
from pathlib import Path
from random import random
from functools import partial
from multiprocessing import cpu_count
import torch
from torch import nn, einsum
from torch.special import expm1
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
fro... | recurrent-interface-network-pytorch-main | rin_pytorch/rin_pytorch.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.