python_code stringlengths 0 1.02M | repo_name stringlengths 9 48 | file_path stringlengths 5 114 |
|---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/tests/test_equivariance.py |
RFdiffusion-main | env/SE3Transformer/tests/__init__.py | |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/tests/utils.py |
"""Helper class for handle symmetric assemblies."""
from pyrsistent import v
from scipy.spatial.transform import Rotation
import functools as fn
import torch
import string
import logging
import numpy as np
import pathlib
format_rots = lambda r: torch.tensor(r).float()
T3_ROTATIONS = [
torch.Tensor([
[ 1.,... | RFdiffusion-main | inference/symmetry.py |
import torch
import numpy as np
from omegaconf import DictConfig, OmegaConf
from RoseTTAFoldModel import RoseTTAFoldModule
from kinematics import get_init_xyz, xyz_to_t2d
from diffusion import Diffuser
from chemical import seq2chars
from util_module import ComputeAllAtomCoords
from contigs import ContigMap
from inferen... | RFdiffusion-main | inference/model_runners.py |
import numpy as np
import os
import sys
from omegaconf import DictConfig
from kinematics import xyz_to_t2d
import torch
import torch.nn.functional as nn
from diffusion import get_beta_schedule
from scipy.spatial.transform import Rotation as scipy_R
from util import rigid_from_3_points
from util_module import ComputeAll... | RFdiffusion-main | inference/utils.py |
#!/usr/bin/env python
import os,sys,glob,torch,random
import numpy as np
import argparse
try:
import pyrosetta
pyrosetta.init()
APPROX = False
except:
print("WARNING: pyRosetta not found, will use an approximate SSE calculation")
APPROX = True
def main():
args=get_args()
assert args.input_p... | RFdiffusion-main | helper_scripts/make_secstruc_adj.py |
from setuptools import setup, find_packages
setup(
name = 'voicebox-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.34',
license='MIT',
description = 'Voicebox - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url ... | voicebox-pytorch-main | setup.py |
from voicebox_pytorch.voicebox_pytorch import (
Transformer,
EncodecVoco,
VoiceBox,
DurationPredictor,
ConditionalFlowMatcherWrapper,
)
| voicebox-pytorch-main | voicebox_pytorch/__init__.py |
import math
from random import random
from functools import partial
import logging
import torch
from torch import nn, Tensor, einsum, IntTensor, FloatTensor, BoolTensor
from torch.nn import Module
import torch.nn.functional as F
import torchode as to
from torchdiffeq import odeint
from beartype import beartype
from... | voicebox-pytorch-main | voicebox_pytorch/voicebox_pytorch.py |
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
# constants
FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_me... | voicebox-pytorch-main | voicebox_pytorch/attend.py |
from setuptools import setup, find_packages
setup(
name = 'x-transformers',
packages = find_packages(exclude=['examples']),
version = '1.21.2',
license='MIT',
description = 'X-Transformers - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/x-tr... | x-transformers-main | setup.py |
from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
from x_transformers.autoregressive_wrapper import top_p, top_k, eval_decorator
# helper functions
def exists(val):
return val is not None
def divisible_by(numer, denom):
return... | x-transformers-main | x_transformers/xl_autoregressive_wrapper.py |
from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self,... | x-transformers-main | x_transformers/autoregressive_wrapper.py |
import math
from random import random
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from functools import partial, wraps
from inspect import isfunction
from collections import namedtuple
from dataclasses import dataclass
from typing import List, Callable, Optional
from einops impo... | x-transformers-main | x_transformers/x_transformers.py |
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from x_transformers.x_transformers import XTransformer, Encoder, Decoder, CrossAttender, Attention, Transformer... | x-transformers-main | x_transformers/__init__.py |
import torch
from torch import nn
import torch.nn.functional as F
def exists(val):
return val is not None
class ContinuousAutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.net = net
self.max_seq_len = net.max_seq_len
... | x-transformers-main | x_transformers/continuous_autoregressive_wrapper.py |
from functools import partial
from typing import Optional, Tuple
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange, repeat
# con... | x-transformers-main | x_transformers/attend.py |
import math
from random import random
from contextlib import nullcontext
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange, repeat, pack, unpack
from x_transformers.x_transformers import TransformerWrapper
from typing import Optional
# ... | x-transformers-main | x_transformers/nonautoregressive_wrapper.py |
import tqdm
import torch
import torch.optim as optim
from x_transformers import XTransformer
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 32
LEARNING_RATE = 3e-4
GENERATE_EVERY = 100
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 32
DEC_SEQ_LEN = 64 + 1
# helpers
def cycle():
while True:
prefix = torch.ones((BAT... | x-transformers-main | examples/toy_tasks/enc_dec_copy.py |
from x_transformers import (
TransformerWrapper,
Encoder,
NonAutoregressiveWrapper
)
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e8)
B... | x-transformers-main | examples/enwik8_simple/train_nar.py |
from x_transformers import TransformerWrapper, Decoder
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# const... | x-transformers-main | examples/enwik8_simple/train.py |
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline
model = AutoModelForCausalLM.from_pretrained("lightonai/RITA_s", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained("lightonai/RITA_s")
rita_gen = pipeline('text-generation', model=model, tokenizer=tokenizer... | RITA-master | example.py |
import argparse
from itertools import product
import torch
from torch import einsum
assert torch.cuda.is_available(), 'cuda must be available to run benchmark'
from flash_cosine_sim_attention.benchmark import benchmark
from flash_cosine_sim_attention import flash_cosine_sim_attention, l2norm_tensors
# helper functio... | flash-cosine-sim-attention-main | benchmark.py |
import sys
from functools import lru_cache
from subprocess import DEVNULL, call
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
# the following code was taken from
# https://github.com/teddykoker/torchsort/blob/main/setup.py
# which in turn ... | flash-cosine-sim-attention-main | setup.py |
from flash_cosine_sim_attention.transformer import CosineSimCausalTransformer
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from torch.cuda.amp import autocast, Gra... | flash-cosine-sim-attention-main | train.py |
flash-cosine-sim-attention-main | tests/__init__.py | |
import torch
import pytest
from flash_cosine_sim_attention import plain_cosine_sim_attention, flash_cosine_sim_attention
assert torch.cuda.is_available(), 'cuda must be available'
# helper functions
def not_nan_or_infs(t):
return not (torch.any(torch.isnan(t)) or torch.any(torch.isinf(t)))
def allclose(a, b, at... | flash-cosine-sim-attention-main | tests/test.py |
__version__ = '0.1.40'
__cuda_pkg_name__ = f'flash_cosine_sim_attention_cuda_{__version__.replace(".", "_")}'
| flash-cosine-sim-attention-main | flash_cosine_sim_attention/version.py |
import torch
from torch.cuda import synchronize, Event
from functools import wraps, partial
timer = partial(Event, enable_timing = True)
def benchmark(
fn,
*,
num_times = 10,
warmup_iters = 10,
forwards = True,
backwards = False
):
assert forwards or backwards
@wraps(fn)
def inner... | flash-cosine-sim-attention-main | flash_cosine_sim_attention/benchmark.py |
from flash_cosine_sim_attention.flash_cosine_sim_attention import flash_cosine_sim_attention, plain_cosine_sim_attention, l2norm_tensors, debug
| flash-cosine-sim-attention-main | flash_cosine_sim_attention/__init__.py |
import torch
from functools import partial
from torch import nn, einsum
import torch.nn.functional as F
try:
from einops import rearrange
except ImportError:
print('pip install einops to use transformer')
from flash_cosine_sim_attention.flash_cosine_sim_attention import plain_cosine_sim_attention, flash_cosin... | flash-cosine-sim-attention-main | flash_cosine_sim_attention/transformer.py |
import os
import math
import importlib
from functools import partial, wraps
import torch
from torch import einsum
import torch.nn.functional as F
from torch.autograd import Function
exec(open(os.path.dirname(os.path.abspath(__file__)) + '/version.py').read())
# try to import cuda
try:
cuda_pkg = importlib.impor... | flash-cosine-sim-attention-main | flash_cosine_sim_attention/flash_cosine_sim_attention.py |
from setuptools import setup, find_packages
setup(
name = 'marge-pytorch',
packages = find_packages(),
version = '0.2.9',
license='MIT',
description = 'Marge - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/marge-pytorch',
keywords = [
... | marge-pytorch-master | setup.py |
from functools import partial
import torch
import random
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
def default(value, default):
return value if value is not None else default
def log(t, eps=1e-9):
return torch.log(t + eps)
def top_p(logits, thres = 0.9):... | marge-pytorch-master | marge_pytorch/autoregressive_wrapper.py |
from marge_pytorch.marge_pytorch import Marge, TrainingWrapper
from marge_pytorch.autoregressive_wrapper import AutoregressiveWrapper | marge-pytorch-master | marge_pytorch/__init__.py |
import faiss
import math
import numpy as np
from tqdm import tqdm
from einops import rearrange, repeat
from functools import partial
from contextlib import contextmanager
import torch
from torch.utils.data import Dataset, DataLoader
from torch import nn, einsum
import torch.nn.functional as F
from marge_pytorch.autor... | marge-pytorch-master | marge_pytorch/marge_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'deformable-attention',
packages = find_packages(exclude=[]),
version = '0.0.18',
license='MIT',
description = 'Deformable Attention - from the paper "Vision Transformer with Deformable Attention"',
long_description_content_type = 'text/markdown',
... | deformable-attention-main | setup.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
# tensor helpers
... | deformable-attention-main | deformable_attention/deformable_attention_2d.py |
from deformable_attention.deformable_attention_1d import DeformableAttention1D
from deformable_attention.deformable_attention_2d import DeformableAttention2D
from deformable_attention.deformable_attention_3d import DeformableAttention3D
DeformableAttention = DeformableAttention2D
| deformable-attention-main | deformable_attention/__init__.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
def cast_tuple(x, ... | deformable-attention-main | deformable_attention/deformable_attention_3d.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops.layers.torch import Rearrange
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
retur... | deformable-attention-main | deformable_attention/deformable_attention_1d.py |
from setuptools import setup, find_packages
setup(
name = 'hourglass-transformer-pytorch',
packages = find_packages(),
version = '0.0.6',
license='MIT',
description = 'Hourglass Transformer',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/hourglass-tra... | hourglass-transformer-pytorch-main | setup.py |
from hourglass_transformer_pytorch import HourglassTransformerLM
from hourglass_transformer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import Dat... | hourglass-transformer-pytorch-main | train.py |
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_tr... | hourglass-transformer-pytorch-main | hourglass_transformer_pytorch/autoregressive_wrapper.py |
from hourglass_transformer_pytorch.hourglass_transformer_pytorch import HourglassTransformerLM, HourglassTransformer
| hourglass-transformer-pytorch-main | hourglass_transformer_pytorch/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pad_to_multiple(tensor, multiple, dim = -1, value = 0):
seq_len = tensor.shap... | hourglass-transformer-pytorch-main | hourglass_transformer_pytorch/hourglass_transformer_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'htm-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Hierarchical Transformer Memory - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/htm-pytorch... | HTM-pytorch-main | setup.py |
from math import ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pad_to_multiple(t, multiple, dim = -2, value = 0.):
seq_len = ... | HTM-pytorch-main | htm_pytorch/htm_pytorch.py |
from htm_pytorch.htm_pytorch import HTMAttention, HTMBlock
| HTM-pytorch-main | htm_pytorch/__init__.py |
from setuptools import find_packages
import subprocess
from glob import glob
from distutils.core import setup, Extension
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
def pkgconfig(package, kw):
... | ffcv-main | setup.py |
from tempfile import NamedTemporaryFile
import torch as ch
from tqdm import tqdm
import time
from assertpy import assert_that
import numpy as np
from torch.utils.data import Dataset
from ffcv import DatasetWriter
from ffcv.fields import IntField, NDArrayField
from ffcv import Loader
from ffcv.fields.basics import IntD... | ffcv-main | tests/test_cuda_nonblocking.py |
from collections import defaultdict
from tempfile import TemporaryDirectory
from os import path
from typing import Counter
import pytest
from assertpy import assert_that
import numpy as np
from torch.utils.data import Dataset, distributed
from torch.multiprocessing import spawn, Queue
from torch.distributed import ini... | ffcv-main | tests/test_traversal_orders.py |
from dataclasses import replace
import torch as ch
from ffcv.pipeline.allocation_query import AllocationQuery
from ffcv.pipeline.compiler import Compiler
import numpy as np
from typing import Callable
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
import os
from assertpy import ass... | ffcv-main | tests/test_partial_batches.py |
import pytest
import numpy as np
from uuid import uuid4
from ffcv.fields.ndarray import NDArrayField, NDArrayDecoder
from ffcv.writer import DatasetWriter
from ffcv.loader import Loader, OrderOption
from tempfile import NamedTemporaryFile
class StringDecoder(NDArrayDecoder):
pass
class StringField(NDArrayField):
... | ffcv-main | tests/test_custom_field.py |
import numpy as np
import torch as ch
from torch.utils.data import Dataset
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from torchvision.datasets import CIFAR10
from tqdm import tqdm
from ffcv.writer import DatasetWriter
from ffcv.fields import IntField, RGBImageField
from ffcv.fields.decod... | ffcv-main | tests/test_image_normalization.py |
from dataclasses import replace
import torch as ch
from ffcv.pipeline.allocation_query import AllocationQuery
from ffcv.pipeline.compiler import Compiler
import numpy as np
from typing import Callable
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
import os
from assertpy import ass... | ffcv-main | tests/test_basic_pipeline.py |
import numpy as np
import torch as ch
from torch.utils.data import Dataset
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from torchvision.datasets import CIFAR10
from torch.utils.data import Subset
from ffcv.writer import DatasetWriter
from ffcv.fields import IntField, RGBImageField
from ffc... | ffcv-main | tests/test_image_pipeline.py |
import os
from tempfile import NamedTemporaryFile
from time import sleep, time
import os, psutil
import numpy as np
import pytest
from tqdm import tqdm
from assertpy import assert_that
from torch.utils.data import Dataset
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import By... | ffcv-main | tests/test_memory_leak.py |
import numpy as np
from assertpy import assert_that
from numpy.random import shuffle
from torch.utils.data import Dataset
import logging
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import IntFie... | ffcv-main | tests/test_writer.py |
import numpy as np
from tqdm import tqdm
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
from time import time
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields i... | ffcv-main | tests/test_memory_reader.py |
from dataclasses import replace
import torch as ch
from ffcv.pipeline.allocation_query import AllocationQuery
from ffcv.pipeline.compiler import Compiler
import numpy as np
from typing import Callable
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
import os
from assertpy import ass... | ffcv-main | tests/test_partial_pipeline.py |
import string
from ctypes import pointer
from tempfile import NamedTemporaryFile
from collections import defaultdict
from assertpy.assertpy import assert_that
from assertpy import assert_that
import numpy as np
from torch.utils.data import Dataset
from ffcv import DatasetWriter
from ffcv.fields import IntField, JSONFi... | ffcv-main | tests/test_json_field.py |
import numpy as np
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import BytesField, IntField
class DummyDatase... | ffcv-main | tests/test_memory_allocation.py |
import os
import uuid
import numpy as np
import torch as ch
from torch.utils.data import Dataset
from torchvision import transforms as tvt
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from torchvision.datasets import CIFAR10
from torchvision.utils import save_image, make_grid
from torch.util... | ffcv-main | tests/test_augmentations.py |
from dataclasses import replace
import torch as ch
from ffcv.pipeline.allocation_query import AllocationQuery
from ffcv.pipeline.compiler import Compiler
import numpy as np
from typing import Callable
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
import os
from assertpy import ass... | ffcv-main | tests/test_loader_filter.py |
from ffcv.transforms.ops import ToTensor
from ffcv.fields.rgb_image import RandomResizedCropRGBImageDecoder, SimpleRGBImageDecoder, CenterCropRGBImageDecoder
import numpy as np
import torch as ch
from torch.utils.data import Dataset
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from torchvisi... | ffcv-main | tests/test_rrc.py |
from os import path
from glob import glob
import tempfile
import numpy as np
from tempfile import TemporaryDirectory, NamedTemporaryFile
import torch as ch
from torch.utils.data import Dataset
import webdataset as wds
from ffcv import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import IntField, Floa... | ffcv-main | tests/test_webdataset.py |
import numpy as np
from tqdm import tqdm
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
from time import time
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields im... | ffcv-main | tests/test_image_read.py |
from ctypes import pointer
from tempfile import NamedTemporaryFile
from collections import defaultdict
from assertpy.assertpy import assert_that
from assertpy import assert_that
import numpy as np
from torch.utils.data import Dataset
from ffcv import DatasetWriter
from ffcv.fields import IntField, NDArrayField
from ff... | ffcv-main | tests/test_array_field.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... | ffcv-main | docs/conf.py |
from .loader import Loader
from .writer import DatasetWriter
__version__ = '0.0.3rc1'
__all__ = ['Loader']
| ffcv-main | ffcv/__init__.py |
from typing import List
import numpy as np
from .fields.base import Field
from .fields import (
FloatField, IntField, RGBImageField,
BytesField, NDArrayField, JSONField
)
CURRENT_VERSION = 2
# Note that in this file we use dtypes in the format <u4 indead of uint32. This
# forces endinaness of the data maki... | ffcv-main | ffcv/types.py |
import numpy as np
from time import sleep
from os import SEEK_END
from multiprocessing import Value
from .utils import align_to_page
import ctypes
class MemoryAllocator():
def __init__(self, fname, offset_start, page_size):
self.fname = fname
self.offset = align_to_page(offset_start, page_size)
... | ffcv-main | ffcv/memory_allocator.py |
import numpy as np
from .utils import decode_null_terminated_string
from .types import (ALLOC_TABLE_TYPE, HeaderType, CURRENT_VERSION,
FieldDescType, get_handlers, get_metadata_type)
class Reader:
def __init__(self, fname, custom_handlers={}):
self._fname = fname
self._custom_... | ffcv-main | ffcv/reader.py |
import numpy as np
from numba import types
from numba.extending import intrinsic
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
def is_power_of_2(n):
return (n & (n-1) == 0) and n != 0
def align_to_page(ptr, page_size):
# If we are not aligned with the start of a page:
... | ffcv-main | ffcv/utils.py |
import ctypes
from numba import njit
import numpy as np
from ctypes import CDLL, c_int64, c_uint8, c_uint64, POINTER, c_void_p, c_uint32, c_bool, cdll
import ffcv._libffcv
lib = CDLL(ffcv._libffcv.__file__)
libc = cdll.LoadLibrary('libc.so.6')
read_c = libc.pread
read_c.argtypes = [c_uint32, c_void_p, c_uint64, c_uin... | ffcv-main | ffcv/libffcv.py |
from functools import partial
from typing import Callable, List, Mapping
from os import SEEK_END, path
import numpy as np
from time import sleep
import ctypes
from multiprocessing import (shared_memory, cpu_count, Queue, Process, Value)
from tqdm import tqdm
from tqdm.contrib.concurrent import thread_map
from .utils ... | ffcv-main | ffcv/writer.py |
import pdb
from numba import njit, set_num_threads, prange, warnings as nwarnings, get_num_threads
from numba.core.errors import NumbaPerformanceWarning
from multiprocessing import cpu_count
import torch as ch
import warnings
class Compiler:
@classmethod
def set_enabled(cls, b):
cls.is_enabled = b
... | ffcv-main | ffcv/pipeline/compiler.py |
from .pipeline import Pipeline
__all__ = ['Pipeline'] | ffcv-main | ffcv/pipeline/__init__.py |
from typing import Optional, Sequence, Tuple, Union
from dataclasses import dataclass
import numpy as np
import torch as ch
@dataclass(frozen=True)
class AllocationQuery:
shape: Tuple[int, ...]
dtype: Union[np.dtype, ch.dtype]
device: Optional[ch.device] = None
Allocation = Union[AllocationQuery, Seque... | ffcv-main | ffcv/pipeline/allocation_query.py |
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
from typing import Callable, Optional, Tuple
import numpy as np
from .state import State
from .allocation_query import AllocationQuery
if TYPE_CHECKING:
from ..fields.base import Field
class Operation(ABC):
def __init__(self):
sel... | ffcv-main | ffcv/pipeline/operation.py |
from typing import Any, Optional, Sequence, Mapping
import torch as ch
import numpy as np
from .state import State
from .operation import Operation
from .allocation_query import Allocation, AllocationQuery
BAD_COLLATION_MESSAGE: str = "Each pipeline needs one and one only Collate operation"
class Pipeline:
de... | ffcv-main | ffcv/pipeline/pipeline.py |
from dataclasses import dataclass
from typing import Literal, Tuple
import torch as ch
import numpy as np
@dataclass
class State:
jit_mode: bool
device: ch.device
shape: Tuple[int, ...]
dtype: np.dtype
# Assess the validity of a pipeline stage
def __post_init__(self):
if self.jit... | ffcv-main | ffcv/pipeline/state.py |
from .base import MemoryManager, MemoryContext
from .process_cache import ProcessCacheManager
from .os_cache import OSCacheManager
__all__ = ['OSCacheManager', 'ProcessCacheManager',
'MemoryManager', 'MemoryContext']
| ffcv-main | ffcv/memory_managers/__init__.py |
from typing import Sequence, TYPE_CHECKING
BATCHES_TYPE = Sequence[Sequence[int]] | ffcv-main | ffcv/memory_managers/common.py |
from abc import abstractmethod, ABCMeta, ABC
from contextlib import AbstractContextManager
from collections import defaultdict
from typing import Callable, Mapping, Sequence, Set
import numpy as np
from numba.typed import Dict
from numba import types
from ..reader import Reader
from ..pipeline.compiler import Compile... | ffcv-main | ffcv/memory_managers/base.py |
from typing import TYPE_CHECKING
import numpy as np
import numba as nb
from .base import MemoryManager, MemoryContext
from ..pipeline.compiler import Compiler
if TYPE_CHECKING:
from ..reader import Reader
class OSCacheContext(MemoryContext):
def __init__(self, manager:MemoryManager):
self.manager =... | ffcv-main | ffcv/memory_managers/os_cache.py |
from .context import ProcessCacheContext
from .manager import ProcessCacheManager
__all__ = ['ProcessCacheContext', 'ProcessCacheManager'] | ffcv-main | ffcv/memory_managers/process_cache/__init__.py |
from threading import Thread
from queue import Queue
import numpy as np
from ...libffcv import read
class PageReader(Thread):
def __init__(self, fname:str, queries: Queue, loaded: Queue,
memory: np.ndarray):
self.fname: str = fname
self.queries: Queue = queries
self.mem... | ffcv-main | ffcv/memory_managers/process_cache/page_reader.py |
from collections import defaultdict
import numpy as np
from ..base import MemoryManager, MemoryContext
from ..common import BATCHES_TYPE
from .schedule import Schedule, ScheduleExecutor, compute_schedule
class ProcessCacheContext(MemoryContext):
def __init__(self, manager: MemoryManager, batches: BATCHES_TYPE)... | ffcv-main | ffcv/memory_managers/process_cache/context.py |
from collections import defaultdict
from dataclasses import dataclass
from typing import Mapping
from queue import Queue
import numpy as np
from .page_reader import PageReader
@dataclass
class Schedule:
# Number of slots needed
num_slots: int
# Which slot to use for each page
page_to_slot: Mapping[i... | ffcv-main | ffcv/memory_managers/process_cache/schedule.py |
import numba as nb
import numpy as np
from .context import ProcessCacheContext
from ...pipeline.compiler import Compiler
from ..base import MemoryManager, MemoryContext
from ..common import BATCHES_TYPE
class ProcessCacheManager(MemoryManager):
def schedule_epoch(self, batches: BATCHES_TYPE) -> MemoryContext:
... | ffcv-main | ffcv/memory_managers/process_cache/manager.py |
import random
from typing import Sequence, TYPE_CHECKING
from numba import njit
import numpy as np
from torch.utils.data import DistributedSampler
from .base import TraversalOrder
if TYPE_CHECKING:
from ..loader.loader import Loader
@njit(parallel=False)
def generate_order_inner(seed, page_to_samples_array, pa... | ffcv-main | ffcv/traversal_order/quasi_random.py |
from .sequential import Sequential
from .random import Random
from .quasi_random import QuasiRandom
__all__ = ['Sequential', 'Random', "QuasiRandom"] | ffcv-main | ffcv/traversal_order/__init__.py |
from typing import Sequence
import numpy as np
from torch.utils.data import DistributedSampler
from .base import TraversalOrder
class Random(TraversalOrder):
def __init__(self, loader:'Loader'):
super().__init__(loader)
if self.distributed:
self.sampler = DistributedSampler(self.ind... | ffcv-main | ffcv/traversal_order/random.py |
from typing import Sequence, TYPE_CHECKING
import numpy as np
from torch.utils.data import DistributedSampler
from .base import TraversalOrder
if TYPE_CHECKING:
from ..loader.loader import Loader
class Sequential(TraversalOrder):
def __init__(self, loader:'Loader'):
super().__init__(loader... | ffcv-main | ffcv/traversal_order/sequential.py |
from abc import ABC, abstractmethod
from typing import Sequence
from ..reader import Reader
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..loader.main_thread import Loader
class TraversalOrder(ABC):
def __init__(self, loader: 'Loader'):
self.loader = loader
self.indices = self.load... | ffcv-main | ffcv/traversal_order/base.py |
"""
Random translate
"""
import numpy as np
from numpy.random import randint
from typing import Callable, Optional, Tuple
from dataclasses import replace
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.compiler import ... | ffcv-main | ffcv/transforms/translate.py |
"""
Mixup augmentation for images and labels (https://arxiv.org/abs/1710.09412)
"""
from typing import Tuple
from numba import objmode
import numpy as np
import torch as ch
import torch.nn.functional as F
from dataclasses import replace
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query impo... | ffcv-main | ffcv/transforms/mixup.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.