python_code stringlengths 0 4.04M | repo_name stringlengths 7 58 | file_path stringlengths 5 147 |
|---|---|---|
from typing import Callable
import dotenv
import hydra
from omegaconf import OmegaConf, DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
OmegaConf.register_new_resolver('eval', eval)
OmegaCo... | fly-master | run.py |
import math
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
max_bound = 1.0
nsteps = 1000
dots = torch.linspace(-max_bound, max_bound, nsteps)
d = 16
m = int(d * math.log(d)) # 44
seqlen = 1024
n_hashes = 4 # This is L in the LSH notation
nbuckets = 23 # This is k in... | fly-master | analysis/mse_plot.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.aan import AAN
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestAAN:
@pytest.mark.parametrize('append_eos', [False, True])
@pytest.mark.parametrize('a... | fly-master | tests/datamodules/test_aan.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.cifar import CIFAR10, CIFAR100
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestCIFAR:
@pytest.mark.parametrize('normalize', [False, True])
@pytest.ma... | fly-master | tests/datamodules/test_cifar.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.listops import ListOps
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestListOps:
@pytest.mark.parametrize('append_eos', [False, True])
@pytest.mark.pa... | fly-master | tests/datamodules/test_listops.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.imdb import IMDB
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestIMDB:
@pytest.mark.parametrize('val_split', [0.2, 0.0])
@pytest.mark.parametrize('ap... | fly-master | tests/datamodules/test_imdb.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.pathfinder import PathFinder
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestPathFinder:
@pytest.mark.parametrize('test_split', [0.1, 0.0])
@pytest.m... | fly-master | tests/datamodules/test_pathfinder.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
from munch import Munch
import torch
from src.datamodules.language_modeling import WikiText2, WikiText103
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestWikiText2:
@pytest.mark.parametr... | fly-master | tests/datamodules/test_language_modeling.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.masked_language_modeling import MLMDataModule
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestMLMDataModule:
def test_wikitext2(self):
batch_size... | fly-master | tests/datamodules/test_masked_language_modeling.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
from src.datamodules.language_modeling_hf import LMDataModule
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
class TestLMDataModule:
def test_wikitext2(self):
batch_size = 7
... | fly-master | tests/datamodules/test_language_modeling_hf.py |
import pytest
import torch
from timm.scheduler import CosineLRScheduler
from src.optim.timm_lr_scheduler import TimmCosineLRScheduler
def test_lr():
n_epochs = 310
model = torch.nn.Linear(3, 3)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3, weight_decay=0.03)
kwargs = dict(t_initial=30... | fly-master | tests/optim/test_timm_lr_schedulers.py |
import math
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat
# from triton.ops.blocksparse import matmul, softmax
from triton.ops.blocksparse import softmax
from deepspeed.ops.sparse_attention import FixedSparsityConfig
from src.models.modules.masking import... | fly-master | tests/models/attention/test_blocksparse_attention.py |
import pytest
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import LengthMask, TriangularCausalMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.sbsmyrf_attention import S... | fly-master | tests/models/attention/test_sbsmyrf_attention.py |
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.linformer_attention import LinformerAttention
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.c... | fly-master | tests/models/attention/test_linformer_attention.py |
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.reformer_attention import ReformerAttention
def seed_cpu_cuda(seed):
torch.manual_seed(seed)
torch.cud... | fly-master | tests/models/attention/test_reformer_attention.py |
import torch
import triton
import pytest
from src.models.attention.blocksparse_sum import blocksparse_sum
from src.models.attention.blocksparse_utils import sparsify_tensor, mask_tensor
@pytest.mark.parametrize(
"BLOCK, WIDTH",
[(block, width) for block in [16, 32] for width in [256, 576, 1024, 1792]],
)
def... | fly-master | tests/models/attention/test_blocksparse_sum.py |
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.local_attention import LocalAttention
def seed_c... | fly-master | tests/models/attention/test_local_attention.py |
import torch
import triton
import pytest
# from triton.ops.blocksparse import matmul
from src.models.attention.blocksparse_matmul import matmul
@pytest.mark.parametrize(
"MODE, TRANS_A, TRANS_B, BLOCK, DTYPE",
[
(mode, at, bt, block, dtype) for dtype in ["float32"] for mode in ["sdd"]
for at ... | fly-master | tests/models/attention/test_triton.py |
import torch
import triton
import pytest
from src.models.attention.blocksparse_logsumexp import logsumexp
from src.models.attention.blocksparse_utils import sparsify_tensor, mask_tensor
@pytest.mark.parametrize(
"BLOCK, WIDTH",
[(block, width) for block in [16, 32] for width in [256, 576, 1024, 1792]],
)
def... | fly-master | tests/models/attention/test_blocksparse_logsumexp.py |
import pytest
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from fast_transformers.masking import LengthMask, TriangularCausalMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.sbblocksparse_attention im... | fly-master | tests/models/attention/test_sbblocksparse_attention.py |
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import FullMask, LengthMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.smyrf_attention import SmyrfAttention
def seed_c... | fly-master | tests/models/attention/test_smyrf_attention.py |
import pytest
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat, rearrange
from src.models.modules.masking import LengthMask, TriangularCausalMask
from src.models.attention.full_attention import FullAttention
from src.models.attention.sblocal_attention import S... | fly-master | tests/models/attention/test_sblocal_attention.py |
import math
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from deepspeed.ops.sparse_attention import FixedSparsityConfig
from src.models.layers.blocksparse_linear import BlockSparseLinear
from src.models.layers.fastlinear import NinjaTurtleLine... | fly-master | tests/models/layers/test_blocksparse_linear.py |
import math
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from src.models.layers.blocksparse_linear import BlockSparseLinear, FlatBlockButterflySparsityConfig
class TestFlatBlockButterflySparsityConfig:
@pytest.mark.parametrize('butterfly... | fly-master | tests/models/layers/test_flatblockbutterfly_sparsity.py |
import math
import torch
import pytest
from src.models.layers.block_butterfly_multiply import block_butterfly_multiply
from src.models.layers.block_butterfly_multiply import block_butterfly_factor_multiply
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
@pytest.mark.parametrize('nstacks', [1, 2, 3])
@pytest.mar... | fly-master | tests/models/layers/test_block_butterfly_multiply.py |
import math
import torch
import pytest
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply_reference
@pytest.mark.parametrize('dtype', [torch.float32, torch.complex64])
@pytest.mark.parametriz... | fly-master | tests/models/layers/test_blockdiag_butterfly_multiply.py |
import pytest
import torch
import torch.nn as nn
from einops import rearrange, reduce
from fast_transformers.masking import FullMask, LengthMask
from src.models.modules.multihead_attention import MultiheadAttention
from src.models.attention.full_attention import FullAttention
def seed_cpu_cuda(seed):
torch.ma... | fly-master | tests/models/modules/test_multihead_attention.py |
import math
import torch
import pytest
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply_reference
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
from src.ops.blockdiag_butterfly_einsum import (
blockdiag_butterfly_multiply_einsum_simp... | fly-master | tests/ops/test_blockdiag_butterfly_einsum.py |
import math
import torch
import pytest
from einops import rearrange
from src.models.layers.blockdiag_butterfly_multiply import blockdiag_butterfly_multiply
from src.ops.blockdiag_butterfly_projection import blockdiag_butterfly_project, factors
from src.ops.blockdiag_butterfly_projection import ButterflyFFT, Butterfl... | fly-master | tests/ops/test_blockdiag_butterfly_projection.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange, repeat
from src.ops.fused_softmax_dropout import _fused_softmax_dropout
@pytest.mark.parametrize('dtype', [torch.float16])
@pytest.mark.parametrize('seqlen', [128, 512, 1024])
def test_softmax_dropout(seqlen, dtyp... | fly-master | tests/ops/test_fused_softmax_dropout.py |
import math
import torch
import pytest
from einops import rearrange
from src.ops.blockdiag_multiply import blockdiag_multiply_reference, blockdiag_multiply
@pytest.mark.parametrize('dtype', [torch.float32, torch.complex64])
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_blockdiag_multiply(device, dty... | fly-master | tests/ops/test_blockdiag_multiply.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange, repeat
from src.ops.triton.softmax_dropout import softmax_dropout
@pytest.mark.parametrize('dtype', [torch.float32, torch.float16])
@pytest.mark.parametrize('seqlen', [128, 512, 1024])
def test_softmax_dropout(seq... | fly-master | tests/ops/triton/test_softmax_dropout.py |
"""Convert T2T-ViT checkpoints to be compatible with our rewrite
"""
import re
import sys
import shutil
from pathlib import Path
import numpy as np
import torch
def main():
for file_name in sys.argv[1:]:
path = Path(file_name).expanduser()
if not str(path).endswith('.og'): # Back up original che... | fly-master | scripts/convert_checkpoint_t2t_vit.py |
fly-master | src/__init__.py | |
from typing import List, Optional
from pathlib import Path
import hydra
from omegaconf import OmegaConf, DictConfig
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
from src.utils i... | fly-master | src/train.py |
from typing import List, Optional
from pathlib import Path
import torch
import hydra
from omegaconf import OmegaConf, DictConfig
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
fr... | fly-master | src/eval.py |
# Inspired by https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/metrics/sequence_perplexity.py
# But we compute the perplexity correctly: exp(average(nll)), not average(exp(nll))
import torch
import torch.nn.functional as F
from torchmetrics import Metric
__all__ = ['Perplexity']
class Perplexity(Metric... | fly-master | src/metrics/perplexity.py |
import torch
from torch import Tensor
from torchmetrics import Metric, Accuracy
class AccuracyMine(Accuracy):
"""Wrap torchmetrics.Accuracy to take argmax of y in case of Mixup.
"""
def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
super().update(preds, target.argmax(dim=-1... | fly-master | src/metrics/accuracy.py |
import torch
import torch.nn as nn
from einops import rearrange
class RelativeL2(nn.Module):
def forward(self, x, y):
x = rearrange(x, 'b ... -> b (...)')
y = rearrange(y, 'b ... -> b (...)')
diff_norms = torch.linalg.norm(x - y, ord=2, dim=-1)
y_norms = torch.linalg.norm(y, ord=... | fly-master | src/losses/relative_l2.py |
# Copied from https://github.com/HobbitLong/SupContrast/blob/master/losses.py
"""
Author: Yonglong Tian (yonglong@mit.edu)
Date: May 07, 2020
"""
from __future__ import print_function
import torch
import torch.nn as nn
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11... | fly-master | src/losses/supcon.py |
from typing import Any, List
import torch
import hydra
from pytorch_lightning import LightningModule, LightningDataModule
from torchmetrics import MetricCollection
from einops import rearrange
from omegaconf import OmegaConf
from src.utils.utils import get_logger
from src.optim.param_grouping import group_parameter... | fly-master | src/tasks/seq.py |
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/benchmark.py
from typing import Any, List, Sequence
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from sr... | fly-master | src/callbacks/flop_count.py |
import subprocess
from pathlib import Path
from typing import List
import matplotlib.pyplot as plt
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from pytorch_lightning.utilities import rank_zero_only
fr... | fly-master | src/callbacks/wandb_callbacks.py |
import torch
from pytorch_lightning import Callback, Trainer, LightningModule
import logging
log = logging.getLogger(__name__) # We want a logger for each process, not just the rank 0
def l2_promote():
import ctypes
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
... | fly-master | src/callbacks/gpu_affinity.py |
# Inspired by https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/utilities/grads.py
# However, they compute grad at every iteration (I think), and the .item() calls incur a lot of overhead
# (6-7% slow down on GPT-2 small). Instead we only compute for iterations where we need to log, and don't
... | fly-master | src/callbacks/norm_monitor.py |
# Inspired by https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/callbacks/stochastic_weight_avg.py
# https://github.com/PyTorchLightning/Lightning-Bolts/blob/master/pl_bolts/callbacks/byol_updates.py
# https://forums.pytorchlightning.ai/t/adopting-exponential-moving-average-ema-for-pl-... | fly-master | src/callbacks/ema.py |
# Adapted from https://pytorch-lightning.readthedocs.io/en/latest/_modules/pytorch_lightning/callbacks/gpu_stats_monitor.html#GPUStatsMonitor
# We only need the speed monitoring, not the GPU monitoring
import time
from typing import Any
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities i... | fly-master | src/callbacks/speed_monitor.py |
fly-master | src/callbacks/__init__.py | |
# Adapted from https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/fault_tolerance.py
from typing import Any
from pathlib import Path
import pytorch_lightning as pl
class ModelCheckpointMine(pl.callbacks.model_checkpoint.ModelCheckpoint):
def __init__(self, *args, fault_toleran... | fly-master | src/callbacks/model_checkpoint.py |
from typing import Any
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
class ParamsLog(Callback):
"""Log the number of parameters of the model
"""
def __init__(self, total... | fly-master | src/callbacks/params_log.py |
# Adapted from https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/lr_monitor.py.
from typing import Any
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities import rank_zero_only
class LossScaleMonitor(Callback):
"""Monitor the loss scale for AMP (fp... | fly-master | src/callbacks/loss_scale_monitor.py |
from typing import Any, List, Dict, Tuple, Union, Optional, Callable, cast
from pathlib import Path, PurePath
from PIL import Image
from fs.tarfs import TarFS
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset, random_split, get_worker_info
from einops.layers.torch import Rearrange,... | fly-master | src/datamodules/pathfinder.py |
# Adapted from https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py
from itertools import chain
from pathlib import Path
import pickle
from typing import Any, List, Union
from multiprocessing.shared_memory import SharedMemory
import numpy as np
import torch
from torch... | fly-master | src/datamodules/language_modeling_hf.py |
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pickle
import logging
from typing import Any, List, Union
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torchtext
from datasets import load_dataset, DatasetDict, Value
from pytorch_lightning i... | fly-master | src/datamodules/aan.py |
# Adapted from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/data_utils.py
# https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/data_utils.py
# https://github.com/pytorch/examples/blob/master/word_language_model/main.py
# https://github.com/HazyR... | fly-master | src/datamodules/language_modeling.py |
# Adapted from https://github.com/PyTorchLightning/lightning-bolts/blob/master/pl_bolts/datamodules/imagenet_datamodule.py
import os
from pathlib import Path
from typing import Any, List, Union, Callable, Optional
from torch.utils.data import Dataset, DataLoader
from pytorch_lightning import LightningDataModule
from ... | fly-master | src/datamodules/imagenet.py |
fly-master | src/datamodules/__init__.py | |
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pickle
import logging
from typing import Any, List, Union
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torchtext
from datasets import load_dataset, DatasetDict
from pytorch_lightning import L... | fly-master | src/datamodules/listops.py |
import torch
from timm.data import Mixup
from timm.data.mixup import mixup_target
class TimmMixup(Mixup):
""" Wrap timm.data.Mixup that avoids the assert that batch size must be even.
"""
def __call__(self, x, target):
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.... | fly-master | src/datamodules/timm_mixup.py |
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pickle
import logging
from typing import Any, List, Union
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torchtext
from datasets import load_dataset, DatasetDict
from pytorch_lightning import L... | fly-master | src/datamodules/imdb.py |
# Adapted from https://github.com/Lightning-AI/lightning/blob/2845e7565dbe6b765ae32870e7d2bc456529c30a/tests/tests_pytorch/utilities/test_auto_restart.py#L1397
from typing import Iterator
import torch
from torch.utils.data import RandomSampler, DistributedSampler
class RandomFaultTolerantSampler(RandomSampler):
... | fly-master | src/datamodules/fault_tolerant_sampler.py |
from typing import Optional, Tuple
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split
from torchvision.datasets import MNIST
from torchvision.transforms import transforms
class MNISTDataModule(LightningDataModule):
"""
... | fly-master | src/datamodules/mnist_datamodule.py |
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
# [2021-06-30] TD: Somehow I get segfault if I import pl_bolts *after* torchvision
from pl_bolts.datamodules import CIFAR10DataModule
from torchvision import transforms,... | fly-master | src/datamodules/cifar.py |
# Adapted from https://github.com/NVIDIA/DALI/blob/main/docs/examples/use_cases/pytorch/resnet50/main.py
# and https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Classification/ConvNets/image_classification/dataloaders.py
# and https://docs.nvidia.com/deeplearning/dali/user-guide/docs/examples/framework... | fly-master | src/datamodules/imagenet_dali_loader.py |
# Copied from https://github.com/jotaf98/simple-tar-dataset/blob/master/tardataset.py
import tarfile
from io import BytesIO
from PIL import Image, ImageFile
from torch.utils.data import Dataset, get_worker_info
try: # make torchvision optional
from torchvision.transforms.functional import to_tensor
except:
to_t... | fly-master | src/datamodules/datasets/tardataset.py |
# Copied from https://github.com/stanford-crfm/mistral/blob/main/src/corpora/detokenization.py
# Which was originally from https://github.com/NVIDIA/Megatron-LM/blob/aed2f75e209e525c842aec7c044af7acae2a4614/tasks/zeroshot_gpt/detokenizer.py
"""
Handle detokenization for different dataset for zero-shot LM evaluation.
"... | fly-master | src/datamodules/datasets/detokenizer.py |
from typing import Any, List, Dict, Tuple, Optional, Callable, cast
import logging
import time
import pickle
from pathlib import Path, PurePath
from PIL import Image
from fs.tarfs import TarFS
from fs.zipfs import ZipFS
from torch.utils.data import get_worker_info
from torchvision.datasets import ImageFolder
from t... | fly-master | src/datamodules/datasets/archive_imagefolder.py |
fly-master | src/datamodules/datasets/__init__.py | |
# Copied from https://github.com/jotaf98/simple-tar-dataset/blob/master/tarimagefolder.py
from .tardataset import TarDataset
try: # make torchvision optional
from torchvision.transforms.functional import to_tensor
except:
to_tensor = None
class TarImageFolder(TarDataset):
"""Dataset that supports Tar archive... | fly-master | src/datamodules/datasets/tarimagefolder.py |
# Inspired by https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt/datasets.py
# Except we don't pad the last block and don't use overlapping eval
# And we return both the input and the target
import math
import numpy as np
import torch
class LMDataset(torch.utils.data.Dataset):
def __init__(self,... | fly-master | src/datamodules/datasets/lm_dataset.py |
# Copied from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/vocabulary.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in comp... | fly-master | src/datamodules/datasets/vocabulary.py |
import torch
from torch.optim import Optimizer
def InvSqrt(optimizer: Optimizer, num_warmup_steps: int):
""" Originally used for Transformer (in Attention is all you need)
We use the formula from the original paper.
Refer to other implementations:
- Nvidia: https://github.com/NVIDIA/DeepLearningExampl... | fly-master | src/optim/lr_scheduler.py |
import inspect
import torch.nn as nn
import hydra
try:
from apex.contrib.layer_norm import FastLayerNorm
except ImportError:
FastLayerNorm = None
from src.models.modules.seq_common import PositionalEncoding
def group_parameters_for_optimizer(model, optimizer_cfg, bias_weight_decay=False,
... | fly-master | src/optim/param_grouping.py |
import torch
from torch.optim import Optimizer
from timm.scheduler import CosineLRScheduler
# We need to subclass torch.optim.lr_scheduler._LRScheduler, or Pytorch-lightning will complain
class TimmCosineLRScheduler(CosineLRScheduler, torch.optim.lr_scheduler._LRScheduler):
""" Wrap timm.scheduler.CosineLRSchedu... | fly-master | src/optim/timm_lr_scheduler.py |
# Credits to DeepVoltaire
# github:DeepVoltaire/AutoAugment
from PIL import Image, ImageEnhance, ImageOps
import random
class ShearX(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(
x.size, Im... | fly-master | src/utils/transforms.py |
from itertools import repeat
import collections.abc
# Copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/helpers.py
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(... | fly-master | src/utils/tuples.py |
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affini... | fly-master | src/utils/gpu_affinity.py |
import re
from pathlib import Path
import torch
def load_checkpoint(path, device='cpu'):
path = Path(path).expanduser()
is_deepspeed = False
if path.is_dir(): # DeepSpeed checkpoint
is_deepspeed = True
latest_path = path / 'latest'
if latest_path.is_file():
with open(... | fly-master | src/utils/checkpoint.py |
# Copied from https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py
from __future__ import division
from __future__ import unicode_literals
from typing import Iterable, Optional
import weakref
import copy
import contextlib
import torch
def to_float_maybe(x):
return x.float() if x.dtype in [torch.flo... | fly-master | src/utils/ema.py |
fly-master | src/utils/__init__.py | |
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/benchmark.py
import torch
try:
from deepspeed.profiling.flops_profiler import get_model_profile
has_deepspeed_profiling = True
except ImportError as e:
has_deepspeed_profiling = False
try:
from fvcore.nn import FlopCountAnaly... | fly-master | src/utils/flops.py |
# Copied from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/distributed.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in com... | fly-master | src/utils/distributed.py |
import logging
import warnings
from typing import List, Sequence
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_only
# Copied from https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-ma... | fly-master | src/utils/utils.py |
# Credits to DeepVoltaire
# github:DeepVoltaire/AutoAugment
import numpy as np
from src.utils.transforms import *
class ImageNetPolicy(object):
""" Randomly choose one of the best 24 Sub-policies on ImageNet.
Example:
>>> policy = ImageNetPolicy()
>>> transformed = policy(image)
... | fly-master | src/utils/autoaug.py |
import torch
import torch.nn.functional as F
# Adapted from https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/autopadder.py
def pad_to_multiple(tensor, multiple, dims=-1, value=0):
try:
dims = list(dims) # If dims is an iterable (e.g., List, Tuple)
except:
dims = [di... | fly-master | src/utils/padding.py |
from typing import Any, List
import torch
from pytorch_lightning import LightningModule
from torchmetrics.classification.accuracy import Accuracy
from src.models.modules.simple_dense_net import SimpleDenseNet
class MNISTLitModel(LightningModule):
"""
Example of LightningModule for MNIST classification.
... | fly-master | src/models/mnist_model.py |
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License... | fly-master | src/models/gpt2.py |
# Copied from https://github.com/HobbitLong/SupContrast/blob/master/networks/resnet_big.py
"""ResNet in PyTorch.
ImageNet-Style ResNet
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Adapted from: https://github.com/bearpaw/pytorch-classification
... | fly-master | src/models/resnet_supcon.py |
import copy
from typing import Optional, Union, Callable
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch import Tensor
from einops import rearrange
import hydra
from src.models.modules.masking import LengthMask
from src.models.modules.seq_common import ClassificationHead, Position... | fly-master | src/models/s4_seq.py |
from torch import nn
class SimpleDenseNet(nn.Module):
def __init__(self, hparams: dict):
super().__init__()
self.model = nn.Sequential(
nn.Linear(hparams["input_size"], hparams["lin1_size"]),
nn.BatchNorm1d(hparams["lin1_size"]),
nn.ReLU(),
nn.Linea... | fly-master | src/models/simple_dense_net.py |
fly-master | src/models/__init__.py | |
# Adapted from https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# -------------------------------... | fly-master | src/models/swin_mlp.py |
import functools
import math
import copy
from typing import Optional, Union, Callable
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch import Tensor
from src.models.modules.masking import FullMask, LengthMask
from einops import repeat
import hydra
from src.models.modules.seq_commo... | fly-master | src/models/transformer.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | fly-master | src/models/bert.py |
# Adapted from https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# -------------------------------... | fly-master | src/models/swin_transformer.py |
# Copied from https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_1d.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from src.models.layers.spectral_conv import SpectralConv1d, SpectralConv2d
class FourierOperator1d(... | fly-master | src/models/fno.py |
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py
""" MLP-Mixer, ResMLP, and gMLP in PyTorch
This impl originally based on MLP-Mixer paper.
Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py
Paper: 'MLP-Mixer... | fly-master | src/models/mlp_mixer.py |
# Adapted from https://github.com/lucidrains/performer-pytorch/blob/main/performer_pytorch/performer_pytorch.py
import math
import torch
from torch import nn
from torch.cuda.amp import autocast
from einops import rearrange, repeat
from functools import partial
from contextlib import contextmanager
try:
from apex ... | fly-master | src/models/attention/performer_utils.py |
# Adapted from https://github.com/lucidrains/linformer/blob/master/linformer/linformer.py
# and https://github.com/tatp22/linformer-pytorch
import math
import torch
import torch.nn as nn
from einops import rearrange
class LinformerAttention(nn.Module):
"""
Arguments
---------
softmax_temp: The t... | fly-master | src/models/attention/linformer_attention.py |
# This is a copy of https://github.com/openai/triton/blob/master/python/triton/ops/blocksparse/matmul.py
# with a one-line fix the bug https://github.com/openai/triton/issues/266
import triton
import triton.language as tl
import triton._C.libtriton as libtriton
import torch
@triton.jit
def _kernel(
A, B, C, strid... | fly-master | src/models/attention/blocksparse_matmul.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.