python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
from memory_transformer_xl import MemoryTransformerXL from memory_transformer_xl.autoregressive_wrapper import AutoregressiveWrapper import random import tqdm import gzip import numpy as np import torch import torch.optim as optim from torch.nn import functional as F from torch.utils.data import DataLoader, Dataset #...
memory-transformer-xl-master
examples/enwik8_simple/train.py
import math from functools import partial from collections import namedtuple import torch from torch import nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence # structs Return = namedtuple('Return', ['loss', 'is_last_batch']) # helper functions def top_p(logits, thres = 0.9): sorted...
memory-transformer-xl-master
memory_transformer_xl/autoregressive_wrapper.py
from memory_transformer_xl.memory_transformer_xl import MemoryTransformerXL
memory-transformer-xl-master
memory_transformer_xl/__init__.py
import torch from torch import nn import torch.nn.functional as F from mogrifier import Mogrifier import math from collections import namedtuple from functools import partial from inspect import isfunction # structs Memory = namedtuple('Memory', ['short', 'long']) # helper functions def to(t): return {'dtype'...
memory-transformer-xl-master
memory_transformer_xl/memory_transformer_xl.py
from setuptools import setup, find_packages setup( name = 'linformer', packages = find_packages(), version = '0.2.1', license='MIT', description = 'Linformer implementation in Pytorch', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', url = 'https://github.com/lucidrains/linformer', keywo...
linformer-master
setup.py
import torch import torch.nn as nn from operator import itemgetter from torch.autograd.function import Function from torch.utils.checkpoint import get_device_states, set_device_states # for routing arguments into the functions of the reversible layer def route_args(router, args, depth): routed_args = [(dict(), dic...
linformer-master
linformer/reversible.py
from linformer.linformer import LinformerLM, Linformer, LinformerSelfAttention
linformer-master
linformer/__init__.py
import math import torch from torch import nn import torch.nn.functional as F from linformer.reversible import ReversibleSequence, SequentialSequence # helper functions def default(val, default_val): return val if val is not None else default_val def init_(tensor): dim = tensor.shape[-1] std = 1 / math....
linformer-master
linformer/linformer.py
from setuptools import setup, find_packages setup( name = 'medical-chatgpt', packages = find_packages(exclude=[]), version = '0.0.1', license='MIT', description = 'Medical ChatGPT', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', long_description_content_type = 'text/markdown', url = 'ht...
medical-chatgpt-main
setup.py
medical-chatgpt-main
medical_chatgpt/__init__.py
import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange def exists(val): return val is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d # attention class Attention(nn.Module): def __init__( se...
medical-chatgpt-main
medical_chatgpt/medical_chatgpt.py
import numpy as np from string import ascii_uppercase, ascii_lowercase import urllib.parse import urllib.request import time def parse_a3m(a3m_lines=None, a3m_file=None, filter_qid=0.15, filter_cov=0.5, N=100000): def seqid(a, b): return sum(c1 == c2 for c1, c2 in zip(a, b)) def nongaps(a): return su...
ColabFold-main
beta/pairmsa.py
# fmt: off ############################################ # imports ############################################ import jax import requests import hashlib import tarfile import time import pickle import os import re import random import tqdm.notebook import numpy as np import matplotlib.pyplot as plt import matplotlib...
ColabFold-main
beta/colabfold.py
import os from urllib import request from concurrent import futures import pickle import jax from alphafold.data.tools import jackhmmer from alphafold.data import parsers from alphafold.data import pipeline from alphafold.common import protein from alphafold.model import config from alphafold.model import model from ...
ColabFold-main
beta/colabfold_alphafold.py
import pytest from colabfold.batch import get_queries def test_get_queries_fasta_dir(pytestconfig, caplog): dir_path = pytestconfig.rootpath.joinpath("test-data/batch/input") queries, is_complex = get_queries(dir_path) assert queries == [("5AWL_1", "YYDPETGTWY", None), ("6A5J", "IKKILSKIKKLLK", None)] ...
ColabFold-main
tests/test_utils.py
import json from pathlib import Path if __name__ == "__main__": for notebook in Path(".").rglob("*.ipynb"): print(notebook) data = json.loads(open(notebook).read()) open(notebook, "w").write(json.dumps(data, indent=2, ensure_ascii=False))
ColabFold-main
tests/reindent_ipynb.py
from unittest import mock import haiku import logging import pytest import re from absl import logging as absl_logging from functools import lru_cache from zipfile import ZipFile from alphafold.model.data import get_model_haiku_params from alphafold.model.tf import utils from colabfold.batch import msa_to_str, unseri...
ColabFold-main
tests/test_colabfold.py
from unittest import mock from colabfold.batch import get_msa_and_templates from tests.mock import MMseqs2Mock def test_get_msa_and_templates(pytestconfig, caplog, tmp_path): Q60262 = "MEIIALLIEEGIIIIKDKKVAERFLKDLESSQGMDWKEIRERAERAKKQLEEGIEWAKKTKL" for msa_mode, tag, lines in [ ("MMseqs2 (UniRef+Env...
ColabFold-main
tests/test_msa.py
ColabFold-main
tests/__init__.py
import json import lzma import os import pickle from pathlib import Path from typing import List, Tuple, Mapping, Any, Dict import numpy from alphafold.model.features import FeatureDict from alphafold.model.model import RunModel from colabfold.colabfold import run_mmseqs2 # Copy the original method before mocking or...
ColabFold-main
tests/mock.py
import logging from pathlib import Path logger = logging.getLogger(__name__) citations = { "Mirdita2021": """@article{Mirdita2021, author= {Mirdita, Milot and Schütze, Konstantin and Moriwaki, Yoshitaka and Heo, Lim and Ovchinnikov, Sergey and Steinegger, Martin }, doi = {10.1101/2021.08.15.456425v2}, journal = {...
ColabFold-main
colabfold/citations.py
from pathlib import Path import numpy as np from matplotlib import pyplot as plt def plot_predicted_alignment_error( jobname: str, num_models: int, outs: dict, result_dir: Path, show: bool = False ): plt.figure(figsize=(3 * num_models, 2), dpi=100) for n, (model_name, value) in enumerate(outs.items()): ...
ColabFold-main
colabfold/plot.py
import logging import tarfile from pathlib import Path import appdirs import requests import tqdm logger = logging.getLogger(__name__) # The data dir location logic switches between a version with and one without "params" because alphafold # always internally joins "params". (We should probably patch alphafold) defa...
ColabFold-main
colabfold/download.py
import os from Bio.PDB import MMCIFParser os.environ["TF_FORCE_UNIFIED_MEMORY"] = "1" os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "2.0" import json import logging import math import random import sys import time import zipfile import io from argparse import ArgumentParser from pathlib import Path from typing imp...
ColabFold-main
colabfold/batch.py
ColabFold-main
colabfold/__init__.py
def show_pdb( use_amber: bool, jobname: str, homooligomer, model_num=1, show_sidechains=False, show_mainchains=False, color="lDDT", ): import py3Dmol model_name = f"model_{model_num}" if use_amber: pdb_filename = f"{jobname}_relaxed_{model_name}.pdb" else: pd...
ColabFold-main
colabfold/pdb.py
# fmt: off # @formatter:off ############################################ # imports ############################################ import jax import requests import hashlib import tarfile import time import os from typing import Tuple, List import random from tqdm import tqdm import numpy as np import matplotlib.pyplot...
ColabFold-main
colabfold/colabfold.py
import json import logging import warnings from pathlib import Path from typing import Optional from absl import logging as absl_logging from importlib_metadata import distribution from tqdm import TqdmExperimentalWarning NO_GPU_FOUND = """ERROR: Jax could not find GPU. This can be either because your machine doesn't...
ColabFold-main
colabfold/utils.py
# fmt: off # @formatter:off import os from urllib import request from concurrent import futures import pickle import jax from alphafold.data.tools import jackhmmer from alphafold.data import parsers from alphafold.data import pipeline from alphafold.common import protein from alphafold.model import config from alpha...
ColabFold-main
colabfold/colabfold_alphafold.py
from typing import Mapping, Any import numpy as np import tensorflow as tf from alphafold.model.features import FeatureDict from alphafold.model.tf import shape_placeholders NUM_RES = shape_placeholders.NUM_RES NUM_MSA_SEQ = shape_placeholders.NUM_MSA_SEQ NUM_EXTRA_SEQ = shape_placeholders.NUM_EXTRA_SEQ NUM_TEMPLATE...
ColabFold-main
colabfold/alphafold/msa.py
from pathlib import Path from functools import wraps, partialmethod from typing import Tuple, List, Optional import haiku from alphafold.model import model, config, data from alphafold.model.modules import AlphaFold from alphafold.model.modules_multimer import AlphaFold as AlphaFoldMultimer def load_models_and_para...
ColabFold-main
colabfold/alphafold/models.py
ColabFold-main
colabfold/alphafold/__init__.py
""" colabdfold_search produces two a3m files with null separated msa in them. We merge the two searches and then split into one a3m file per msa. """ import logging from argparse import ArgumentParser from pathlib import Path from subprocess import check_call from tqdm import tqdm logger = logging.getLogger(__name__)...
ColabFold-main
colabfold/mmseqs/merge_and_split_msas.py
ColabFold-main
colabfold/mmseqs/__init__.py
""" colabdfold_search produces two a3m files with null separated msa in them. We merge the two searches and then split into one a3m file per msa. """ import logging from argparse import ArgumentParser from pathlib import Path from tqdm import tqdm logger = logging.getLogger(__name__) def split_msa(merged_msa: Path,...
ColabFold-main
colabfold/mmseqs/split_msas.py
""" Functionality for running mmseqs locally. Takes in a fasta file, outputs final.a3m Note: Currently needs mmseqs compiled from source """ import logging import math import shutil import subprocess from argparse import ArgumentParser from pathlib import Path from typing import List, Union from colabfold.batch impo...
ColabFold-main
colabfold/mmseqs/search.py
from setuptools import setup, find_packages setup( name = 'mixture-of-attention', packages = find_packages(exclude=[]), version = '0.0.24', license='MIT', description = 'Mixture of Attention', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', long_description_content_type = 'text/markdown', ...
mixture-of-attention-main
setup.py
import gzip import random import tqdm import numpy as np import torch from torch.optim import Adam from torch.nn import functional as F from torch.utils.data import DataLoader, Dataset from mixture_of_attention.transformer import Transformer from mixture_of_attention.autoregressive_wrapper import AutoregressiveWrappe...
mixture-of-attention-main
train.py
import torch from torch import nn import torch.nn.functional as F from einops import rearrange # helper function def exists(val): return val is not None def eval_decorator(fn): def inner(model, *args, **kwargs): was_training = model.training model.eval() out = fn(model, *args, **kwar...
mixture-of-attention-main
mixture_of_attention/autoregressive_wrapper.py
from mixture_of_attention.mixture_of_attention import ( MixtureOfAttention, MixtureOfAutoregressiveAttention, Attention )
mixture-of-attention-main
mixture_of_attention/__init__.py
from collections import namedtuple from functools import wraps from packaging import version import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange # constants EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_me...
mixture-of-attention-main
mixture_of_attention/attend.py
import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange from mixture_of_attention.mixture_of_attention import MixtureOfAutoregressiveAttention from mixture_of_attention.rotary_emb import RotaryEmbedding # helper functions def exists(val): return val is not None #...
mixture-of-attention-main
mixture_of_attention/transformer.py
import math import torch import torch.nn.functional as F from torch import Tensor, nn, einsum from typing import Tuple, Optional from einops import rearrange, repeat, reduce, pack, unpack from mixture_of_attention.attend import Attend from mixture_of_attention.rotary_emb import apply_rotary_pos_emb from local_atten...
mixture-of-attention-main
mixture_of_attention/mixture_of_attention.py
import os import re import sys from setuptools import setup, find_packages install_requires = ['torch>=1.1.0'] PY36 = (3, 6, 0) if sys.version_info < PY36: raise RuntimeError('torch-optimizer requires Python 3.6.0+') def read(f): return open(os.path.join(os.path.dirname(__file__), f)).read().strip() de...
pytorch-optimizer-master
setup.py
import functools from copy import deepcopy import torch import torch_optimizer as optim from torch.autograd import Variable from torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau, StepLR from tests.utils import assert_dict_equal def _build_params_dict(weight, bias, **kwargs): return [{'params': [...
pytorch-optimizer-master
tests/test_optimizer.py
import torch import pytest import torch_optimizer as optim def rosenbrock(tensor): x, y = tensor return (1 - x) ** 2 + 1 * (y - x ** 2) ** 2 def quadratic(tensor): x, y = tensor a = 1.0 b = 1.0 return (x ** 2) / a + (y ** 2) / b def beale(tensor): x, y = tensor f = ( (1.5 ...
pytorch-optimizer-master
tests/test_basic.py
import torch def assert_dict_equal(a, b, precision=0.000001): if isinstance(a, dict) and isinstance(b, dict): assert set(a.keys()) == set(b.keys()) for k in a.keys(): assert_dict_equal(a[k], b[k], precision) elif isinstance(a, list) and isinstance(b, list): assert len(a) ==...
pytorch-optimizer-master
tests/conftest.py
import torch import pytest import torch_optimizer as optim def assert_sparse_not_supported(optimizer_class, err_msg=None): param = torch.randn(1, 1).to_sparse().requires_grad_(True) grad = torch.randn(1, 1).to_sparse() param.grad = grad optimizer = optimizer_class([param]) optimizer.zero_grad() ...
pytorch-optimizer-master
tests/test_param_validation.py
import numpy as np import pytest import torch import torch_optimizer as optim from torch import nn def make_dataset(seed=42): rng = np.random.RandomState(seed) N = 100 D = 2 X = rng.randn(N, D) * 2 # center the first N/2 points at (-2,-2) mid = N // 2 X[: mid, :] = X[: mid, :] - 2 * np....
pytorch-optimizer-master
tests/test_optimizer_with_nn.py
import torch def assert_dict_equal(a, b, precision=0.000001): if isinstance(a, dict) and isinstance(b, dict): assert set(a.keys()) == set(b.keys()) for k in a.keys(): assert_dict_equal(a[k], b[k], precision) elif isinstance(a, list) and isinstance(b, list): assert len(a) ==...
pytorch-optimizer-master
tests/utils.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
pytorch-optimizer-master
docs/conf.py
import math import numpy as np import torch_optimizer as optim import torch from hyperopt import fmin, tpe, hp import matplotlib.pyplot as plt plt.style.use('seaborn-white') def rosenbrock(tensor): # https://en.wikipedia.org/wiki/Test_functions_for_optimization x, y = tensor return (1 - x) ** 2 + 100 * ...
pytorch-optimizer-master
examples/viz_optimizers.py
import torch import torch.nn as nn import torch.nn.functional as F import torch_optimizer as optim from torchvision import datasets, transforms, utils from torch.optim.lr_scheduler import StepLR from torch.utils.tensorboard import SummaryWriter from dataclasses import dataclass class Net(nn.Module): def __init__...
pytorch-optimizer-master
examples/mnist.py
import torch from torch.optim.optimizer import Optimizer from .types import OptFloat, OptLossClosure, Params, State __all__ = ('SGDW',) class SGDW(Optimizer): r"""Implements SGDW algorithm. It has been proposed in `Decoupled Weight Decay Regularization`__. Arguments: params: iterable of param...
pytorch-optimizer-master
torch_optimizer/sgdw.py
import torch from torch.optim.optimizer import Optimizer from .types import Betas2, OptFloat, OptLossClosure, Params __all__ = ('Lamb',) class Lamb(Optimizer): r"""Implements Lamb algorithm. It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`__. Argum...
pytorch-optimizer-master
torch_optimizer/lamb.py
from .accsgd import AccSGD from .adabound import AdaBound from .adamod import AdaMod from .diffgrad import DiffGrad from .lamb import Lamb from .lookahead import Lookahead from .novograd import NovoGrad from .radam import RAdam from .sgdw import SGDW from .yogi import Yogi __all__ = ( 'AccSGD', 'AdaBound', ...
pytorch-optimizer-master
torch_optimizer/__init__.py
from typing import Iterable, Union, Callable, Dict, Optional, Tuple, Any from torch import Tensor Params = Union[Iterable[Tensor], Iterable[dict]] LossClosure = Callable[[], float] OptLossClosure = Optional[LossClosure] Betas2 = Tuple[float, float] State = Dict[str, Any] OptFloat = Optional[float]
pytorch-optimizer-master
torch_optimizer/types.py
import math import torch from torch.optim.optimizer import Optimizer from .types import Betas2, OptFloat, OptLossClosure, Params __all__ = ('AdaMod',) class AdaMod(Optimizer): r"""Implements AccSGD algorithm. It has been proposed in `Adaptive and Momental Bounds for Adaptive Learning Rate Methods`__. ...
pytorch-optimizer-master
torch_optimizer/adamod.py
import torch from torch.optim.optimizer import Optimizer from .types import Betas2, OptFloat, OptLossClosure, Params __all__ = ('NovoGrad',) class NovoGrad(Optimizer): r"""Implements Novograd optimization algorithm. It has been proposed in `Stochastic Gradient Methods with Layer-wise Adaptive Moments ...
pytorch-optimizer-master
torch_optimizer/novograd.py
import math import torch from torch.optim.optimizer import Optimizer from .types import Betas2, OptFloat, OptLossClosure, Params __all__ = ('DiffGrad',) class DiffGrad(Optimizer): r"""Implements DiffGrad algorithm. It has been proposed in `DiffGrad: An Optimization Method for Convolutional Neural Netw...
pytorch-optimizer-master
torch_optimizer/diffgrad.py
import math import torch from torch.optim.optimizer import Optimizer from .types import Betas2, OptFloat, OptLossClosure, Params __all__ = ('Yogi',) class Yogi(Optimizer): r"""Implements Yogi optimization algorithm. It has been proposed in `Adaptive Methods for Nonconvex Optimization`__. Arguments: ...
pytorch-optimizer-master
torch_optimizer/yogi.py
import math import torch from torch.optim.optimizer import Optimizer from .types import Betas2, OptFloat, OptLossClosure, Params __all__ = ('RAdam',) class RAdam(Optimizer): r"""Implements RAdam optimization algorithm. It has been proposed in `On the Variance of the Adaptive Learning Rate and Beyond`_...
pytorch-optimizer-master
torch_optimizer/radam.py
import math import torch from torch.optim.optimizer import Optimizer from .types import Betas2, OptLossClosure, Params, State, OptFloat __all__ = ('AdaBound',) class AdaBound(Optimizer): r"""Implements AdaBound algorithm. It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learnin...
pytorch-optimizer-master
torch_optimizer/adabound.py
from collections import defaultdict import torch from torch.optim.optimizer import Optimizer from .types import OptLossClosure, OptFloat, State __all__ = ('Lookahead',) class Lookahead(Optimizer): r"""Implements Lookahead optimization algorithm. It has been proposed in `Lookahead Optimizer: k steps forwa...
pytorch-optimizer-master
torch_optimizer/lookahead.py
import copy from torch.optim.optimizer import Optimizer from .types import OptLossClosure, Params, OptFloat __all__ = ('AccSGD',) class AccSGD(Optimizer): r"""Implements AccSGD algorithm. It has been proposed in `On the insufficiency of existing momentum schemes for Stochastic Optimization`__ and `Acc...
pytorch-optimizer-master
torch_optimizer/accsgd.py
from pathlib import Path from typing import List from datetime import timedelta from dalle2_pytorch.trainer import DecoderTrainer from dalle2_pytorch.dataloaders import create_image_embedding_dataloader from dalle2_pytorch.trackers import Tracker from dalle2_pytorch.train_configs import DecoderConfig, TrainDecoderConf...
DALLE2-pytorch-main
train_decoder.py
import click import torch from torch import nn from typing import List from accelerate import Accelerator from accelerate.utils import set_seed from torch.utils.data import DataLoader from embedding_reader import EmbeddingReader from accelerate.utils import dataclasses as accelerate_dataclasses from dalle2_pytorch.ut...
DALLE2-pytorch-main
train_diffusion_prior.py
from setuptools import setup, find_packages exec(open('dalle2_pytorch/version.py').read()) setup( name = 'dalle2-pytorch', packages = find_packages(exclude=[]), include_package_data = True, entry_points={ 'console_scripts': [ 'dalle2_pytorch = dalle2_pytorch.cli:main', 'dream = dalle2_pytorch.c...
DALLE2-pytorch-main
setup.py
import math import random from tqdm.auto import tqdm from functools import partial, wraps from contextlib import contextmanager from collections import namedtuple from pathlib import Path import torch import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from torch import nn, einsum import torc...
DALLE2-pytorch-main
dalle2_pytorch/dalle2_pytorch.py
import json from torchvision import transforms as T from pydantic import BaseModel, validator, model_validator from typing import List, Optional, Union, Tuple, Dict, Any, TypeVar from x_clip import CLIP as XCLIP from open_clip import list_pretrained from coca_pytorch import CoCa from dalle2_pytorch.dalle2_pytorch imp...
DALLE2-pytorch-main
dalle2_pytorch/train_configs.py
__version__ = '1.15.1'
DALLE2-pytorch-main
dalle2_pytorch/version.py
from math import sqrt import copy from random import choice from pathlib import Path from shutil import rmtree from PIL import Image import torch from torch import nn from torch.cuda.amp import autocast, GradScaler from torch.utils.data import Dataset, DataLoader, random_split import torchvision.transforms as T from ...
DALLE2-pytorch-main
dalle2_pytorch/vqgan_vae_trainer.py
import torch from packaging import version if version.parse(torch.__version__) >= version.parse('2.0.0'): from einops._torch_specific import allow_ops_in_compiled_graph allow_ops_in_compiled_graph() from dalle2_pytorch.version import __version__ from dalle2_pytorch.dalle2_pytorch import DALLE2, DiffusionPrior...
DALLE2-pytorch-main
dalle2_pytorch/__init__.py
# take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py # to give users a quick easy start to training DALL-E without doing BPE import torch import html import os import ftfy import regex as re from functools import lru_cache from pathlib import Path from dalle2_pytorch.utils import import_or_p...
DALLE2-pytorch-main
dalle2_pytorch/tokenizer.py
import click import torch import torchvision.transforms as T from functools import reduce from pathlib import Path from dalle2_pytorch import DALLE2, Decoder, DiffusionPrior def safeget(dictionary, keys, default = None): return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.sp...
DALLE2-pytorch-main
dalle2_pytorch/cli.py
import urllib.request import os import json from pathlib import Path import shutil from itertools import zip_longest from typing import Any, Optional, List, Union from pydantic import BaseModel import torch from dalle2_pytorch.dalle2_pytorch import Decoder, DiffusionPrior from dalle2_pytorch.utils import import_or_pri...
DALLE2-pytorch-main
dalle2_pytorch/trackers.py
import time import importlib # helper functions def exists(val): return val is not None # time helpers class Timer: def __init__(self): self.reset() def reset(self): self.last_time = time.time() def elapsed(self): return time.time() - self.last_time # print helpers def pr...
DALLE2-pytorch-main
dalle2_pytorch/utils.py
from torch.optim import AdamW, Adam def separate_weight_decayable_params(params): wd_params, no_wd_params = [], [] for param in params: param_list = no_wd_params if param.ndim < 2 else wd_params param_list.append(param) return wd_params, no_wd_params def get_optimizer( params, lr =...
DALLE2-pytorch-main
dalle2_pytorch/optimizer.py
import copy import math from math import sqrt from functools import partial, wraps from vector_quantize_pytorch import VectorQuantize as VQ import torch from torch import nn, einsum import torch.nn.functional as F from torch.autograd import grad as torch_grad import torchvision from einops import rearrange, reduce, ...
DALLE2-pytorch-main
dalle2_pytorch/vqgan_vae.py
import time import copy from pathlib import Path from math import ceil from functools import partial, wraps from contextlib import nullcontext from collections.abc import Iterable import torch import torch.nn.functional as F from torch import nn from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR from tor...
DALLE2-pytorch-main
dalle2_pytorch/trainer.py
import os import webdataset as wds import torch from torch.utils.data import DataLoader import numpy as np import fsspec import shutil def get_shard(filename): """ Filenames with shards in them have a consistent structure that we can take advantage of Standard structure: path/to/file/prefix_string_00001.ex...
DALLE2-pytorch-main
dalle2_pytorch/dataloaders/decoder_loader.py
from math import ceil from clip import tokenize from embedding_reader import EmbeddingReader from torch import from_numpy from torch.utils.data import IterableDataset, DataLoader class PriorEmbeddingDataset(IterableDataset): """ PriorEmbeddingDataset is a wrapper of EmbeddingReader. It enables one to sim...
DALLE2-pytorch-main
dalle2_pytorch/dataloaders/prior_loader.py
from dalle2_pytorch.dataloaders.decoder_loader import ImageEmbeddingDataset, create_image_embedding_dataloader from dalle2_pytorch.dataloaders.prior_loader import make_splits, get_reader, PriorEmbeddingDataset
DALLE2-pytorch-main
dalle2_pytorch/dataloaders/__init__.py
from pathlib import Path import torch from torch.utils import data from torchvision import transforms, utils from PIL import Image # helpers functions def cycle(dl): while True: for data in dl: yield data # dataset and dataloader class Dataset(data.Dataset): def __init__( self,...
DALLE2-pytorch-main
dalle2_pytorch/dataloaders/simple_image_only_dataloader.py
from setuptools import setup, find_packages setup( name = 'chroma-pytorch', packages = find_packages(exclude=[]), version = '0.0.1', license='MIT', description = 'Chroma - Pytorch', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', long_description_content_type = 'text/markdown', url = 'ht...
chroma-pytorch-main
setup.py
import torch import os import logging from transformers import AutoTokenizer, AutoModelForMaskedLM, logging from tf_bind_transformer.cache_utils import cache_fn, run_once logging.set_verbosity_error() def exists(val): return val is not None def map_values(fn, dictionary): return {k: fn(v) for k, v in diction...
chroma-pytorch-main
chroma_pytorch/semantic_conditioner.py
from chroma_pytorch.chroma_pytorch import Chroma
chroma-pytorch-main
chroma_pytorch/__init__.py
import torch from torch import nn, einsum from einops import rearrange, repeat import math from pathlib import Path from random import random from functools import partial from multiprocessing import cpu_count import torch from torch import nn, einsum from torch.special import expm1 import torch.nn.functional as F f...
chroma-pytorch-main
chroma_pytorch/chroma_pytorch.py
from setuptools import setup, find_packages setup( name = 'nwt-pytorch', packages = find_packages(), version = '0.0.4', license='MIT', description = 'NWT - Pytorch', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', url = 'https://github.com/lucidrains/NWT-pytorch', keywords = [ 'artif...
NWT-pytorch-main
setup.py
import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat from einops.layers.torch import EinMix as Mix class Memcodes(nn.Module): def __init__( self, *, dim, num_codes, heads = 8, temperature = 1., ): ...
NWT-pytorch-main
nwt_pytorch/nwt_pytorch.py
from nwt_pytorch.nwt_pytorch import Memcodes
NWT-pytorch-main
nwt_pytorch/__init__.py
from setuptools import setup, find_packages setup( name = 'n-grammer-pytorch', packages = find_packages(exclude=[]), version = '0.0.14', license='MIT', description = 'N-Grammer - Pytorch', long_description_content_type = 'text/markdown', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', ur...
n-grammer-pytorch-main
setup.py
from n_grammer_pytorch.n_grammer_pytorch import VQNgrammer, Ngrammer, get_ngrammer_parameters, get_ngrammer_param_groups
n-grammer-pytorch-main
n_grammer_pytorch/__init__.py
# based off the jax code # https://github.com/tensorflow/lingvo/blob/master/lingvo/jax/layers/ngrammer.py import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat import sympy # helper functions def exists(val): return val is not None def sum_squares(t, dim ...
n-grammer-pytorch-main
n_grammer_pytorch/n_grammer_pytorch.py
from setuptools import setup with open("README.md", "r") as fh: long_description = fh.read() setup( name = 'revtorch', packages = ['revtorch'], version = '0.2.3', license='bsd-3-clause', description = 'Framework for creating (partially) reversible neural networks with PyTorch', long_description=long...
RevTorch-master
setup.py
from revtorch.revtorch import ReversibleBlock, ReversibleSequence
RevTorch-master
revtorch/__init__.py
import torch import torch.nn as nn #import torch.autograd.function as func import sys import random class ReversibleBlock(nn.Module): ''' Elementary building block for building (partially) reversible architectures Implementation of the Reversible block described in the RevNet paper (https://arxiv.org/...
RevTorch-master
revtorch/revtorch.py
import os import pkg_resources from setuptools import setup, find_packages from pathlib import Path if __name__ == "__main__": # Read description from README with Path(Path(__file__).parent, "README.md").open(encoding="utf-8") as file: long_description = file.read() setup( name="clip-any...
CLIP-main
setup.py
from .clip import *
CLIP-main
clip/__init__.py
from collections import OrderedDict from typing import Tuple, Union import numpy as np import torch import torch.nn.functional as F from torch import nn class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1): super().__init__() # all conv layers have strid...
CLIP-main
clip/model.py