python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/valid_layouts.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/memory_estimator_test.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/__init__.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/layout_optimizer_test.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/api.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/print_cp_model_solution.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/memory_estimator.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/valid_layouts_test.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/scheduler.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/graph_interface_test.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/api_test.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
mesh_tensorflow/auto_mtf/layout_optimizer.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
examples/mnist_dataset.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
examples/toy_model_tpu.py
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab...
mesh-master
examples/mnist.py
from setuptools import setup, find_packages setup( name = 'autoregressive-linear-attention-cuda', packages = find_packages(exclude=[]), version = '0.0.1', license='MIT', description = 'Autoregressive Linear Attention CUDA kernel', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', long_descri...
autoregressive-linear-attention-cuda-main
setup.py
autoregressive-linear-attention-cuda-main
autoregressive_linear_attention_cuda/__init__.py
autoregressive-linear-attention-cuda-main
autoregressive_linear_attention_cuda/autoregressive_linear_attention_cuda.py
from setuptools import setup, find_packages setup( name = 'retro-pytorch', packages = find_packages(exclude=[]), version = '0.3.8', license='MIT', description = 'RETRO - Retrieval Enhanced Transformer - Pytorch', long_description_content_type = 'text/markdown', author = 'Phil Wang', author_email = 'luc...
RETRO-pytorch-main
setup.py
from functools import partial import torch import torch.nn.functional as F from torch import nn, einsum from retro_pytorch.retrieval import BERT_VOCAB_SIZE from einops import rearrange, repeat # constants MIN_DIM_HEAD = 32 # helper functions def exists(val): return val is not None def default(val, d): re...
RETRO-pytorch-main
retro_pytorch/retro_pytorch.py
from retro_pytorch.retro_pytorch import RETRO from retro_pytorch.data import RETRODataset from retro_pytorch.training import TrainingWrapper
RETRO-pytorch-main
retro_pytorch/__init__.py
import os import numpy as np from pathlib import Path from shutil import rmtree from contextlib import contextmanager def is_true_env_flag(env_flag): return os.getenv(env_flag, 'false').lower() in ('true', '1', 't') def reset_folder_(p): path = Path(p) rmtree(path, ignore_errors = True) path.mkdir(ex...
RETRO-pytorch-main
retro_pytorch/utils.py
from pathlib import Path from math import ceil import torch import torch.nn.functional as F import logging import numpy as np from einops import rearrange import faiss from autofaiss import build_index from retro_pytorch.utils import memmap, reset_folder_ # constants SOS_ID = 101 EOS_ID = 102 BERT_MODEL_DIM = 768 ...
RETRO-pytorch-main
retro_pytorch/retrieval.py
from torch.optim import AdamW def separate_weight_decayable_params(params): no_wd_params = set([param for param in params if param.ndim < 2]) wd_params = set(params) - no_wd_params return wd_params, no_wd_params def get_optimizer(params, lr = 3e-4, wd = 1e-1, filter_by_requires_grad = False): if filte...
RETRO-pytorch-main
retro_pytorch/optimizer.py
import numpy as np from functools import partial import json from pathlib import Path import torch from torch import nn import torch.nn.functional as F from torch.utils.data import DataLoader from retro_pytorch import RETRO, RETRODataset from retro_pytorch.data import knn_to_retrieved_chunks from retro_pytorch.optimi...
RETRO-pytorch-main
retro_pytorch/training.py
from functools import partial import numpy as np import torch from torch.utils.data import Dataset from retro_pytorch.retrieval import EOS_ID from retro_pytorch.utils import memmap # knn to retrieved chunks def knn_to_retrieved_chunks( knns, chunks_memmap, *, add_continuations, num_chunks, pa...
RETRO-pytorch-main
retro_pytorch/data.py
from setuptools import setup, find_packages setup( name = 'fast-transformer-pytorch', packages = find_packages(), version = '0.0.4', license='MIT', description = 'Fast Transformer - Pytorch', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', url = 'https://github.com/lucidrains/fast-transfor...
fast-transformer-pytorch-main
setup.py
import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, reduce from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding # helper functions def exists(val): return val is not None def default(val, d): return val if exists(val) else d # helper class...
fast-transformer-pytorch-main
fast_transformer_pytorch/fast_transformer_pytorch.py
from fast_transformer_pytorch.fast_transformer_pytorch import FastTransformer
fast-transformer-pytorch-main
fast_transformer_pytorch/__init__.py
from setuptools import setup, find_packages setup( name = 'uformer-pytorch', packages = find_packages(), version = '0.0.8', license='MIT', description = 'Uformer - Pytorch', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', url = 'https://github.com/lucidrains/uformer-pytorch', keywords = ...
uformer-pytorch-main
setup.py
from uformer_pytorch.uformer_pytorch import Uformer
uformer-pytorch-main
uformer_pytorch/__init__.py
import math from math import log, pi, sqrt from functools import partial import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat # constants List = nn.ModuleList # helpers def exists(val): return val is not None def default(val, d): return val if exi...
uformer-pytorch-main
uformer_pytorch/uformer_pytorch.py
import os import gzip import click import re import random from math import ceil from functools import partial from itertools import islice, chain from operator import itemgetter from pyfaidx import Faidx import numpy as np from random import random from pathlib import Path import toml from google.cloud import stora...
progen-main
generate_data.py
from dotenv import load_dotenv load_dotenv() import click import humanize from jinja2 import Template from pathlib import Path import tqdm import numpy as np import toml import jax from jax import nn, random, jit, tree_util, tree_map from optax import adamw, clip_by_global_norm, chain, apply_updates, apply_every fr...
progen-main
train.py
from dotenv import load_dotenv load_dotenv() import click import humanize import jax from jax import nn, random, jit, tree_util, numpy as np from haiku import PRNGSequence from progen_transformer import ProGen from progen_transformer.data import decode_tokens, encode_tokens from progen_transformer.utils import samp...
progen-main
sample.py
import time import os, errno from pathlib import Path from functools import partial from google.cloud import storage from cloudpickle import pickle from progen_transformer.utils import clear_directory_, silentremove # filesystem checkpoint fns def file_reset_checkpoint(path): clear_directory_(path) def file_get...
progen-main
progen_transformer/checkpoint.py
from functools import partial import jax from jax import random from jax import nn from jax.lax import stop_gradient import jax.numpy as np import jmp import haiku as hk from haiku import initializers from einops import rearrange, repeat from progen_transformer.utils import exists # constants ATTN_MASK_VALUE = -1e...
progen-main
progen_transformer/progen.py
from progen_transformer.progen import ProGen
progen-main
progen_transformer/__init__.py
from math import ceil import os, errno from shutil import rmtree import jax from jax import random, nn, value_and_grad, vmap, pmap, jit, lax from jax.lax import top_k import jax.numpy as np from einops import rearrange # helper functions def noop(x): return x def exists(val): return val is not None def lo...
progen-main
progen_transformer/utils.py
import tensorflow as tf import numpy as np from functools import partial from pathlib import Path from contextlib import contextmanager # writing tfrecords def write(writer, values): record_bytes = tf.train.Example(features = tf.train.Features(feature={ 'seq': tf.train.Feature(bytes_list = tf.train.BytesL...
progen-main
progen_transformer/data.py
from setuptools import setup, find_packages setup( name = 'einops-exts', packages = find_packages(exclude=[]), version = '0.0.4', license='MIT', description = 'Einops Extensions', long_description_content_type = 'text/markdown', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', url = 'http...
einops-exts-main
setup.py
from einops_exts.einops_exts import check_shape from einops_exts.einops_exts import rearrange_many, repeat_many, reduce_many from einops_exts.einops_exts import rearrange_with_anon_dims, repeat_with_anon_dims, reduce_with_anon_dims
einops-exts-main
einops_exts/__init__.py
import re from torch import nn from functools import wraps, partial from einops import rearrange, reduce, repeat # checking shape # @nils-werner # https://github.com/arogozhnikov/einops/issues/168#issuecomment-1042933838 def check_shape(tensor, pattern, **kwargs): return rearrange(tensor, f"{pattern} -> {pattern...
einops-exts-main
einops_exts/einops_exts.py
from torch import nn from einops import rearrange # for rearranging to and from a pattern class EinopsToAndFrom(nn.Module): def __init__(self, from_einops, to_einops, fn): super().__init__() self.from_einops = from_einops self.to_einops = to_einops self.fn = fn if '...' in...
einops-exts-main
einops_exts/torch.py
from setuptools import setup, find_packages setup( name = 'phenaki-pytorch', packages = find_packages(exclude=[]), version = '0.3.1', license='MIT', description = 'Phenaki - Pytorch', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', long_description_content_type = 'text/markdown', url = '...
phenaki-pytorch-main
setup.py
import math import torch import torch.nn.functional as F from torch import nn, einsum from beartype import beartype from typing import Tuple from einops import rearrange, repeat # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def leaky_relu(p = 0.1):...
phenaki-pytorch-main
phenaki_pytorch/attention.py
import math import copy from pathlib import Path from random import random, choices from functools import partial from collections import namedtuple from multiprocessing import cpu_count from beartype import beartype from beartype.door import is_bearable from beartype.vale import Is from typing import Optional, List, ...
phenaki-pytorch-main
phenaki_pytorch/phenaki_trainer.py
from pathlib import Path import copy import math from functools import wraps import torch import torch.nn.functional as F from torch import nn, einsum from torch.autograd import grad as torch_grad import torchvision from einops import rearrange, repeat, pack, unpack from einops.layers.torch import Rearrange from ve...
phenaki-pytorch-main
phenaki_pytorch/cvivit.py
from math import sqrt from random import choice from pathlib import Path from shutil import rmtree from beartype import beartype import torch from torch import nn from torch.utils.data import Dataset, DataLoader, random_split import torchvision.transforms as T from torchvision.datasets import ImageFolder from torchv...
phenaki-pytorch-main
phenaki_pytorch/cvivit_trainer.py
import torch import transformers from transformers import T5Tokenizer, T5EncoderModel, T5Config # less warning messages since only using encoder transformers.logging.set_verbosity_error() # helper functions def exists(val): return val is not None # config MAX_LENGTH = 256 DEFAULT_T5_NAME = 'google/t5-v1_1-ba...
phenaki-pytorch-main
phenaki_pytorch/t5.py
import math import functools from contextlib import nullcontext from functools import partial, wraps from typing import Optional, List, Union from beartype import beartype import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat, pack, unpack from einops.layers.t...
phenaki-pytorch-main
phenaki_pytorch/phenaki_pytorch.py
from phenaki_pytorch.phenaki_pytorch import Phenaki, CViViT, MaskGit, TokenCritic, make_video from phenaki_pytorch.cvivit_trainer import CViViTTrainer from phenaki_pytorch.phenaki_trainer import PhenakiTrainer
phenaki-pytorch-main
phenaki_pytorch/__init__.py
from torch.optim import AdamW, Adam def separate_weight_decayable_params(params): wd_params, no_wd_params = [], [] for param in params: param_list = no_wd_params if param.ndim < 2 else wd_params param_list.append(param) return wd_params, no_wd_params def get_optimizer( params, lr =...
phenaki-pytorch-main
phenaki_pytorch/optimizer.py
from pathlib import Path import cv2 from PIL import Image from functools import partial from typing import Tuple, List from beartype.door import is_bearable import numpy as np import torch import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader as PytorchDataLoader from torchvision import t...
phenaki-pytorch-main
phenaki_pytorch/data.py
from setuptools import setup, find_packages setup( name = 'adjacent-attention-pytorch', packages = find_packages(), version = '0.0.12', license='MIT', description = 'Adjacent Attention Network - Pytorch', long_description_content_type = 'text/markdown', author = 'Phil Wang', author_email = 'lucidrains@...
adjacent-attention-network-main
setup.py
from adjacent_attention_network.adjacent_attention_network import AdjacentAttentionNetwork
adjacent-attention-network-main
adjacent_attention_network/__init__.py
import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat from isab_pytorch import ISAB # helpers def exists(val): return val is not None def batched_index_select(values, indices): last_dim = values.shape[-1] return values.gather(1, indices[:, :, None...
adjacent-attention-network-main
adjacent_attention_network/adjacent_attention_network.py
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py import sys import warnings import os from pathlib import Path from setuptools import setup, find_packages import subprocess import torch from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME with open("README.m...
flash-attention-main
setup.py
# Copied from https://github.com/NVIDIA/apex/tree/master/csrc/megatron # We add the case where seqlen = 4k and seqlen = 8k import os import subprocess import torch from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME def get_cuda_bare_metal_version(cuda_dir): ...
flash-attention-main
csrc/fused_softmax/setup.py
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py import torch from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME from setuptools import setup, find_packages import subprocess import sys import warnings import os # ninja build does not work unless include_dir...
flash-attention-main
csrc/xentropy/setup.py
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py import torch from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME from setuptools import setup, find_packages import subprocess import sys import warnings import os # ninja build does not work unless include_dir...
flash-attention-main
csrc/layer_norm/setup.py
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py import torch from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME from setuptools import setup, find_packages import subprocess import sys import warnings import os # ninja build does not work unless include_dir...
flash-attention-main
csrc/rotary/setup.py
import os import subprocess import torch from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME def get_cuda_bare_metal_version(cuda_dir): raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True) output = raw_output.spl...
flash-attention-main
csrc/fused_dense_lib/setup.py
import math import torch import torch.nn.functional as F import pytest from einops import rearrange from flash_attn.layers.rotary import apply_rotary_emb_func, apply_rotary_emb_torch is_sm8x = torch.cuda.get_device_capability('cuda') >= (8, 0) @pytest.mark.parametrize('dtype', ([torch.float16] if not is_sm8x else...
flash-attention-main
tests/test_rotary.py
import math from functools import partial import torch import torch.nn.functional as F import pytest from einops import rearrange, repeat from flash_attn.flash_attn_interface import flash_attn_func, flash_attn_unpadded_qkvpacked_func, _get_block_size, flash_attn_unpadded_kvpacked_func, flash_attn_unpadded_func from...
flash-attention-main
tests/test_flash_attn.py
# Run test with: # torchrun --no_python --nproc_per_node=8 pytest -q -s tests/losses/test_cross_entropy_parallel.py import math import torch import torch.nn.functional as F import pytest from apex.transformer import parallel_state from apex.transformer import tensor_parallel from flash_attn.losses.cross_entropy_par...
flash-attention-main
tests/losses/test_cross_entropy_parallel.py
import math import torch import torch.nn.functional as F import pytest from einops import rearrange from flass_attn.losses.cross_entropy_apex import CrossEntropyLossApex is_sm8x = torch.cuda.get_device_capability('cuda')[0] >= 8 @pytest.mark.parametrize('dtype', [torch.float16, torch.float32] + ([torch.bfloat16] ...
flash-attention-main
tests/losses/test_cross_entropy_apex.py
import math import torch import torch.nn.functional as F import pytest from einops import rearrange from flash_attn.ops.layer_norm import DropoutAddLayerNorm, dropout_add_layer_norm is_sm8x = torch.cuda.get_device_capability('cuda')[0] >= 8 @pytest.mark.parametrize('has_rowscale', [True, False]) # @pytest.mark.pa...
flash-attention-main
tests/ops/test_dropout_layer_norm.py
import math import torch import torch.nn.functional as F import pytest from einops import rearrange from flash_attn.ops.fused_dense import FusedDenseTD, FusedDenseGeluDenseTD from flash_attn.ops.fused_dense import FusedDenseResidual, FusedDenseResGeluDense @pytest.mark.parametrize('dtype', [torch.float16, torch.bf...
flash-attention-main
tests/ops/test_fused_dense.py
from functools import partial import math import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange, repeat from flash_attn.utils.benchmark import benchmark_forward, benchmark_all, pytorch_profiler from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func # f...
flash-attention-main
benchmarks/benchmark_causal.py
from functools import partial import math import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange, repeat from flash_attn.utils.benchmark import benchmark_all, benchmark_forward, benchmark_backward, benchmark_combined from flash_attn.bert_padding import unpad_input, pad_input f...
flash-attention-main
benchmarks/benchmark_flash_attention.py
# [2022-10-23] Copied from https://github.com/NVIDIA/apex/blob/master/apex/transformer/functional/fused_softmax.py # for benchmarking. # We added support for seqlen=2k and seqlen=4k # coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "L...
flash-attention-main
flash_attn/fused_softmax.py
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/fmha.py import torch import torch.nn as nn import flash_attn_cuda def convert_blockmask(blockmask, causal): """Convert from the 0-1 format to the format used by the CUDA code. 0 means th...
flash-attention-main
flash_attn/flash_blocksparse_attn_interface.py
import math import torch import torch.nn as nn from einops import rearrange import hydra from flash_attn.flash_blocksparse_attn_interface import flash_blocksparse_attn_func from flash_attn.flash_blocksparse_attn_interface import convert_blockmask from flash_attn.bert_padding import unpad_input, pad_input, index_firs...
flash-attention-main
flash_attn/flash_blocksparse_attention.py
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py import torch import torch.nn.functional as F from einops import rearrange, repeat class IndexFirstAxis(torch.autograd.Function): @staticmethod def forward(ctx, input, indice...
flash-attention-main
flash_attn/bert_padding.py
flash-attention-main
flash_attn/__init__.py
# [2022-10-23] Downloaded from https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py # for benchmarking. # We fixed a few dtype cast to make it work for bf16 """ Fused Attention =============== This is a Triton implementation of the Flash Attention algorithm (see: Dao et al., https://arxi...
flash-attention-main
flash_attn/flash_attn_triton_og.py
import math import torch import torch.nn as nn from einops import rearrange from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func from flash_attn.bert_padding import unpad_input, pad_input, index_first_axis class FlashAttention(nn.Module): """Implement the scaled dot product attention w...
flash-attention-main
flash_attn/flash_attention.py
""" *Experimental* implementation of FlashAttention in Triton. We use the FlashAttention implementation from Phil Tillet a starting point. https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py Changes: - Implement both causal and non-causal attention. - Implement both self-attention and ...
flash-attention-main
flash_attn/flash_attn_triton.py
import torch import torch.nn as nn import torch.nn.functional as F import flash_attn_cuda def _get_block_size(device, head_dim, is_dropout): assert head_dim % 8 == 0 and head_dim <= 128 return 256 if head_dim <= 64 else 128 def _flash_attn_forward(q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max...
flash-attention-main
flash_attn/flash_attn_interface.py
import torch import torch.nn as nn import xentropy_cuda_lib # https://github.com/NVIDIA/apex/blob/master/apex/contrib/xentropy/softmax_xentropy.py class SoftmaxCrossEntropyLossFn(torch.autograd.Function): @staticmethod def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, inplace_backward=False): ...
flash-attention-main
flash_attn/losses/cross_entropy_apex.py
# Inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/cross_entropy.py # But we make it much faster: we compute the local loss and the LSE, and by exchanging the LSE and # the losses we can get the global loss. There's no need to do it step by step # (compute local max, exchange, com...
flash-attention-main
flash_attn/losses/cross_entropy_parallel.py
# Inspired by https://github.com/facebookresearch/xformers/blob/main/xformers/components/positional_embedding/rotary.py from typing import Tuple import math import torch from einops import rearrange, repeat import rotary_emb def rotate_half(x): x1, x2 = x.chunk(2, dim=-1) return torch.cat((-x2, x1), dim=-...
flash-attention-main
flash_attn/layers/rotary.py
# Copyright (c) 2022, Tri Dao. """ Useful functions for writing test code. """ import torch import torch.utils.benchmark as benchmark def benchmark_forward(fn, *inputs, repeats=10, desc='', verbose=True, amp=False, amp_dtype=torch.float16, **kwinputs): """ Use Pytorch Benchmark on the forwa...
flash-attention-main
flash_attn/utils/benchmark.py
# Copyright (c) 2022, Tri Dao. # Inspired by / adapted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py import math from functools import partial from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.init import tr...
flash-attention-main
flash_attn/models/vit.py
# Copyright (c) 2022, Tri Dao. import math from functools import partial from collections import namedtuple from collections.abc import Sequence import torch import torch.nn as nn import torch.nn.functional as F from transformers.models.gpt2.configuration_gpt2 import GPT2Config from flash_attn.modules.mha import M...
flash-attention-main
flash_attn/models/gpt.py
# Adapted from https://github.com/NVIDIA/apex/blob/master/apex/fused_dense/fused_dense.py # We make it work with pytorch amp and with bfloat16. import torch import torch.nn as nn import torch.nn.functional as F from torch.cuda.amp import custom_bwd, custom_fwd # import fused_dense_cuda # from apex import fused_dense...
flash-attention-main
flash_attn/ops/fused_dense.py
# Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py import torch from torch.nn import init # from apex._autocast_utils import _cast_if_autocast_enabled import dropout_layer_norm def _dropout_add_layer_norm_forward(x0, x1, gamma, beta, rowscale, dropout_p, epsilon, ...
flash-attention-main
flash_attn/ops/layer_norm.py
# Copied from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/model/layers/activations.py import math import torch from torch import nn # 1/sqrt(2*pi)-> 0.3989423 # 1/sqrt(2) -> 0.70710678 # sqrt(2/pi) -> 0.79788456 # this function is tanh approximation...
flash-attention-main
flash_attn/ops/gelu_activation.py
# Adapted on https://github.com/ELS-RD/kernl/blob/main/src/kernl/implementations/linear_layer.py # and https://github.com/openai/triton/blob/master/python/triton/ops/matmul.py from typing import Optional import torch import triton import triton.language as tl from torch.autograd.function import FunctionCtx from torch....
flash-attention-main
flash_attn/ops/triton/linear.py
# Adapted from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_activations.py # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. import math from e...
flash-attention-main
flash_attn/ops/triton/k_activations.py
# The triton fused matmul + sqrelu is faster for fp16 but slower for bf16, compared # to naive implementation. import torch import torch.nn as nn import torch.nn.functional as F from torch.cuda.amp import custom_bwd, custom_fwd import fused_dense_lib as fused_dense_cuda from flash_attn.ops.triton.linear import triton...
flash-attention-main
flash_attn/ops/triton/mlp.py
# Copyright (c) 2022, Tri Dao. import torch import torch.nn as nn from einops import repeat class GPT2Embeddings(nn.Module): def __init__(self, embed_dim, vocab_size, max_position_embeddings, padding_idx=None): """ If max_position_embeddings <= 0, there's no position embeddings """ ...
flash-attention-main
flash_attn/modules/embedding.py
# Copyright (c) 2022, Tri Dao. import torch import torch.nn as nn import torch.nn.functional as F try: from flash_attn.ops.fused_dense import fused_dense_gelu_dense_function_td from flash_attn.ops.fused_dense import fused_dense_res_gelu_dense_function_td except ImportError: fused_dense_gelu_dense_function...
flash-attention-main
flash_attn/modules/mlp.py
# Copyright (c) 2022, Tri Dao. from typing import Optional from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torchvision.ops import StochasticDepth from flash_attn.modules.mha import MHA from flash_attn.modules.mlp import Mlp try: fro...
flash-attention-main
flash_attn/modules/block.py
# Copyright (c) 2022, Tri Dao. import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange try: from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func from flash_attn.flash_attn_interface import flash_attn_...
flash-attention-main
flash_attn/modules/mha.py
from setuptools import setup, find_packages setup( name = 'feedback-transformer-pytorch', packages = find_packages(), version = '0.0.11', license='MIT', description = 'Implementation of Feedback Transformer in Pytorch', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', url = ...
feedback-transformer-pytorch-main
setup.py
from feedback_transformer_pytorch.feedback_transformer_pytorch import FeedbackTransformer
feedback-transformer-pytorch-main
feedback_transformer_pytorch/__init__.py
import math from collections import namedtuple import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange # constants Memory = namedtuple('Memory', ['keys', 'values']) # helpers def exists(val): return val is not None def default(val, d): return val if exists(va...
feedback-transformer-pytorch-main
feedback_transformer_pytorch/feedback_transformer_pytorch.py
from setuptools import setup, find_packages setup( name = 'memory-transformer-xl', packages = find_packages(exclude=['examples']), version = '0.1.0', license='MIT', description = 'Memory Transformer-XL, a variant of Transformer-XL that uses linear attention update long term memory', author = 'Phil Wang', ...
memory-transformer-xl-master
setup.py