python_code stringlengths 0 1.02M | repo_name stringlengths 9 48 | file_path stringlengths 5 114 |
|---|---|---|
from keopscore.formulas.maths.Sqrt import Sqrt
from keopscore.formulas.maths.Scalprod import Scalprod
class Norm2:
def __new__(cls, arg):
return Sqrt(Scalprod(arg, arg))
enable_test = False
| keops-main | keopscore/keopscore/formulas/maths/Norm2.py |
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp
from keopscore.formulas.maths.Log import Log
from keopscore.utils.math_functions import keops_xlogx
class XLogX(VectorizedScalarOp):
"""the x*log(x) vectorized operation"""
string_id = "XLogX"
ScalarOpFun = keops_xlogx
@staticmeth... | keops-main | keopscore/keopscore/formulas/maths/XLogX.py |
from keopscore.formulas.maths.Inv import Inv
from keopscore.formulas.variables.IntCst import IntCst
class IntInv:
def __new__(cls, arg):
return Inv(IntCst(arg))
enable_test = False
| keops-main | keopscore/keopscore/formulas/maths/IntInv.py |
from keopscore.formulas.Chunkable_Op import Chunkable_Op
from keopscore.formulas.variables.Zero import Zero
from keopscore.utils.code_gen_utils import c_zero_float, VectApply
##########################
###### Sum #####
##########################
class Sum_Impl(Chunkable_Op):
# the summation operation
... | keops-main | keopscore/keopscore/formulas/maths/Sum.py |
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp
from keopscore.formulas.variables.Zero import Zero
##########################
###### Square #####
##########################
class Square_Impl(VectorizedScalarOp):
"""the square vectorized operation"""
string_id = "Square"
pri... | keops-main | keopscore/keopscore/formulas/maths/Square.py |
from keopscore.formulas.maths.ElemT import ElemT
from keopscore.formulas.maths.Concat import Concat
from keopscore.formulas.variables.IntCst import IntCst
from keopscore.formulas.variables.Var import Var
from keopscore.utils.code_gen_utils import GetInds
# /////////////////////////////////////////////////////////////... | keops-main | keopscore/keopscore/formulas/maths/GradMatrix.py |
from keopscore.formulas.Chunkable_Op import Chunkable_Op
from keopscore.formulas.maths.Sum import Sum
from keopscore.utils.code_gen_utils import (
c_zero_float,
VectApply,
)
from keopscore.utils.math_functions import keops_fma
from keopscore.utils.misc_utils import KeOps_Error
##########################
##### ... | keops-main | keopscore/keopscore/formulas/maths/Scalprod.py |
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp
from keopscore.formulas.variables.Zero import Zero
from keopscore.utils.math_functions import keops_sign
##########################
###### Sign #####
##########################
class Sign(VectorizedScalarOp):
"""the sign vectorized o... | keops-main | keopscore/keopscore/formulas/maths/Sign.py |
from keopscore.formulas.Operation import Operation
from keopscore.utils.code_gen_utils import (
c_variable,
c_for_loop,
)
####################################
###### Tensor product #####
####################################
class TensorProd(Operation):
string_id = "TensorProd"
def __init__... | keops-main | keopscore/keopscore/formulas/maths/TensorProd.py |
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp
from keopscore.utils.math_functions import keops_mod
class Mod(VectorizedScalarOp):
"""the Modulo vectorized operation
Mod(x,n,d) = x - n * Floor((x - d)/n)
"""
string_id = "Mod"
ScalarOpFun = keops_mod
def DiffT(self, v, ... | keops-main | keopscore/keopscore/formulas/maths/Mod.py |
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp
from keopscore.utils.math_functions import keops_exp
##########################
###### Exp #####
##########################
class Exp(VectorizedScalarOp):
"""the exponential vectorized operation"""
string_id = "Exp"
ScalarO... | keops-main | keopscore/keopscore/formulas/maths/Exp.py |
from keopscore.formulas.Operation import Operation
from keopscore.utils.code_gen_utils import c_array, VectCopy
from keopscore.utils.misc_utils import KeOps_Error
# //////////////////////////////////////////////////////////////
# //// VECTOR EXTRACTION : Extract<F,START,DIM> ////
# ////////////////////////... | keops-main | keopscore/keopscore/formulas/maths/Extract.py |
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp
from keopscore.formulas.maths.Cos import Cos
from keopscore.formulas.maths.Sin import Sin
from keopscore.utils.math_functions import keops_sinxdivx
class SinXDivX(VectorizedScalarOp):
"""
the sin(x)/x vectorized operation
"""
string... | keops-main | keopscore/keopscore/formulas/maths/SinXDivX.py |
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp
from keopscore.utils.math_functions import keops_atan
class Atan(VectorizedScalarOp):
"""the arc-tangent vectorized operation"""
string_id = "Atan"
ScalarOpFun = keops_atan
@staticmethod
def Derivative(f):
return 1 / (... | keops-main | keopscore/keopscore/formulas/maths/Atan.py |
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp
from keopscore.formulas.maths.Rsqrt import Rsqrt
from keopscore.utils.math_functions import keops_acos
class Acos(VectorizedScalarOp):
"""the arc-cosine vectorized operation"""
string_id = "Acos"
ScalarOpFun = keops_acos
@staticme... | keops-main | keopscore/keopscore/formulas/maths/Acos.py |
from keopscore.formulas.maths.SqNormDiag import SqNormDiag
from keopscore.formulas.maths.SqNormIso import SqNormIso
from keopscore.formulas.maths.SymSqNorm import SymSqNorm
class WeightedSqNorm:
"""
WeightedSqNorm(A,X) : redirects to SqNormIso, SqNormDiag or SymSqNorm depending on dimension of A.
"""
... | keops-main | keopscore/keopscore/formulas/maths/WeightedSqNorm.py |
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp
from keopscore.utils.math_functions import keops_sin
class Sin(VectorizedScalarOp):
"""the Sine vectorized operation"""
string_id = "Sin"
ScalarOpFun = keops_sin
@staticmethod
def Derivative(f):
from .Cos import Cos
... | keops-main | keopscore/keopscore/formulas/maths/Sin.py |
from keopscore.formulas.Operation import Operation
from keopscore.formulas.variables.Zero import Zero
from keopscore.utils.code_gen_utils import (
c_zero_float,
c_for_loop,
c_if,
value,
c_variable,
)
from keopscore.utils.misc_utils import KeOps_Error
############################
###### ArgMin ... | keops-main | keopscore/keopscore/formulas/maths/ArgMin.py |
from keopscore.utils.code_gen_utils import VectCopy
from keopscore.formulas.reductions.Min_ArgMin_Reduction_Base import (
Min_ArgMin_Reduction_Base,
)
from keopscore.formulas.reductions.Zero_Reduction import Zero_Reduction
class ArgMin_Reduction(Min_ArgMin_Reduction_Base):
"""Implements the argmin reduction o... | keops-main | keopscore/keopscore/formulas/reductions/ArgMin_Reduction.py |
from keopscore.utils.code_gen_utils import VectCopy
from keopscore.formulas.reductions.Max_ArgMax_Reduction_Base import (
Max_ArgMax_Reduction_Base,
)
from keopscore.formulas.reductions.Zero_Reduction import Zero_Reduction
class ArgMax_Reduction(Max_ArgMax_Reduction_Base):
"""Implements the argmax reduction o... | keops-main | keopscore/keopscore/formulas/reductions/ArgMax_Reduction.py |
from keopscore.formulas.maths import Concat, Exp, Extract
from keopscore.formulas.reductions.Reduction import Reduction
from keopscore.formulas.reductions.Sum_Reduction import Sum_Reduction
from keopscore.formulas.variables.IntCst import IntCst
from keopscore.utils.code_gen_utils import (
neg_infinity,
c_zero_f... | keops-main | keopscore/keopscore/formulas/reductions/Max_SumShiftExpWeight_Reduction.py |
from keopscore.formulas.reductions.KMin_ArgKMin_Reduction import KMin_ArgKMin_Reduction
from keopscore.utils.code_gen_utils import (
infinity,
cast_to,
c_zero_float,
c_for_loop,
c_variable,
new_c_varname,
c_if,
)
class KMin_Reduction(KMin_ArgKMin_Reduction):
"""Implements the k-min red... | keops-main | keopscore/keopscore/formulas/reductions/KMin_Reduction.py |
from keopscore.utils.code_gen_utils import VectApply, VectCopy
from keopscore.utils.Tree import Tree
class Reduction(Tree):
"""Base class for all KeOps final reductions over a formula"""
def __init__(self, formula, tagI):
"""- formula is an object of type Operation, it is the formula on which we appl... | keops-main | keopscore/keopscore/formulas/reductions/Reduction.py |
from keopscore.utils.code_gen_utils import (
neg_infinity,
c_zero_float,
VectApply,
c_if,
)
from keopscore.formulas.reductions.Reduction import Reduction
from keopscore.utils.misc_utils import KeOps_Error
class Max_ArgMax_Reduction_Base(Reduction):
"""max+argmax reduction : base class"""
def ... | keops-main | keopscore/keopscore/formulas/reductions/Max_ArgMax_Reduction_Base.py |
from keopscore.utils.code_gen_utils import (
infinity,
c_zero_float,
VectApply,
c_if,
)
from keopscore.formulas.reductions.Reduction import Reduction
from keopscore.utils.misc_utils import KeOps_Error
class Min_ArgMin_Reduction_Base(Reduction):
"""min+argmin reduction : base class"""
def __in... | keops-main | keopscore/keopscore/formulas/reductions/Min_ArgMin_Reduction_Base.py |
from keopscore.utils.code_gen_utils import VectCopy
from keopscore.formulas.reductions.Max_ArgMax_Reduction_Base import (
Max_ArgMax_Reduction_Base,
)
class Max_ArgMax_Reduction(Max_ArgMax_Reduction_Base):
"""Implements the max+argmax reduction operation : for each i or each j, find the maximal value of Fij a... | keops-main | keopscore/keopscore/formulas/reductions/Max_ArgMax_Reduction.py |
from keopscore.formulas.reductions.KMin_ArgKMin_Reduction import KMin_ArgKMin_Reduction
from keopscore.formulas.reductions.Zero_Reduction import Zero_Reduction
from keopscore.utils.code_gen_utils import (
c_for_loop,
new_c_varname,
c_variable,
)
class ArgKMin_Reduction(KMin_ArgKMin_Reduction):
"""Impl... | keops-main | keopscore/keopscore/formulas/reductions/ArgKMin_Reduction.py |
from keopscore.utils.code_gen_utils import (
c_array,
c_zero_float,
c_if,
c_variable,
)
class Sum_Scheme:
def __init__(self, red_formula, dtype, dimred=None):
self.red_formula = red_formula
if dimred is None:
self.dimred = red_formula.dimred
else:
se... | keops-main | keopscore/keopscore/formulas/reductions/sum_schemes.py |
from .ArgKMin_Reduction import ArgKMin_Reduction
from .ArgMax_Reduction import ArgMax_Reduction
from .ArgMin_Reduction import ArgMin_Reduction
from .KMin_ArgKMin_Reduction import KMin_ArgKMin_Reduction
from .KMin_Reduction import KMin_Reduction
from .Max_ArgMax_Reduction import Max_ArgMax_Reduction
from .Max_Reduction ... | keops-main | keopscore/keopscore/formulas/reductions/__init__.py |
from keopscore.formulas.variables import Zero
from keopscore.formulas.reductions.Reduction import Reduction
class Zero_Reduction(Reduction):
"""Implements the zero reduction operation (fills output with zeros).
N.B. The actual code for filling zeros is not here ; when a Zero_reduction is detected,
the map... | keops-main | keopscore/keopscore/formulas/reductions/Zero_Reduction.py |
from keopscore.utils.code_gen_utils import VectCopy
from keopscore.formulas.reductions.Min_ArgMin_Reduction_Base import (
Min_ArgMin_Reduction_Base,
)
class Min_ArgMin_Reduction(Min_ArgMin_Reduction_Base):
"""Implements the min+argmin reduction operation : for each i or each j, find the minimal value of Fij
... | keops-main | keopscore/keopscore/formulas/reductions/Min_ArgMin_Reduction.py |
from keopscore.utils.code_gen_utils import (
c_zero_float,
c_for_loop,
c_variable,
new_c_varname,
)
from keopscore.formulas.reductions.Reduction import Reduction
class Sum_Reduction(Reduction):
"""Sum reduction class"""
string_id = "Sum_Reduction"
def __init__(self, formula, tagIJ):
... | keops-main | keopscore/keopscore/formulas/reductions/Sum_Reduction.py |
from keopscore.utils.code_gen_utils import infinity, c_if
from keopscore.formulas.reductions.Reduction import Reduction
from keopscore.utils.misc_utils import KeOps_Error
class Min_Reduction(Reduction):
"""Implements the min reduction operation : for each i or each j, find the minimal value of Fij
operation i... | keops-main | keopscore/keopscore/formulas/reductions/Min_Reduction.py |
from keopscore.utils.code_gen_utils import neg_infinity, c_if
from keopscore.formulas.reductions.Reduction import Reduction
from keopscore.utils.misc_utils import KeOps_Error
class Max_Reduction(Reduction):
"""Implements the max reduction operation : for each i or each j, find the
maximal value of Fij operati... | keops-main | keopscore/keopscore/formulas/reductions/Max_Reduction.py |
from keopscore.utils.code_gen_utils import (
infinity,
cast_to,
c_zero_float,
c_for_loop,
c_variable,
new_c_varname,
c_if,
c_array,
use_pragma_unroll,
)
from keopscore.formulas.reductions.Reduction import Reduction
from keopscore.utils.misc_utils import KeOps_Error
class KMin_ArgKM... | keops-main | keopscore/keopscore/formulas/reductions/KMin_ArgKMin_Reduction.py |
"""
PyTorch, on the GPU
===========================
"""
####################################
# Blabla
#
import torch
import numpy as np
from time import time
nits = 100
Ns, D = [10000, 100000, 1000000], 3
def KP(x, y, b):
D_xx = (x * x).sum(-1).unsqueeze(1) # (N,1)
D_xy = torch.matmul(x, y.permute(1, 0)... | keops-main | benchmarks/PyTorch_GPU.py |
"""
PyTorch, on a TPU
============================
"""
################################################################
# This code should be run on a Google Colab session, with TPU acceleration.
#
import os
assert os.environ[
"COLAB_TPU_ADDR"
], "Make sure to select TPU from Edit > Notebook settings > Hardware... | keops-main | benchmarks/PyTorch_TPU.py |
"""
KeOps specific
========================================
"""
#########################
# (N.B.: with data on device would be slightly better!)
#
import torch
import numpy as np
from time import time
######################################################################
# Benchmark specifications:
#
nits = 1
Ns,... | keops-main | benchmarks/KeOps_specific.py |
"""
KeOps
=====
"""
import torch
import numpy as np
from time import time
nits = 10
Ns, D = [10000, 100000, 1000000], 3
from pykeops.torch import generic_sum
KP = generic_sum(
"Exp(-SqDist(X,Y)) * B", # Formula
"A = Vi(1)", # Output
"X = Vi({})".format(D), # 1st argument
"Y = Vj({})".format(D), ... | keops-main | benchmarks/KeOps.py |
"""
TVM
===============
"""
##########################
# If you're running this script on Google Colab, use the following lines to install TVM on your session:
#%matplotlib inline
#
# try:
# import google.colab
# IN_COLAB = True
# except:
# IN_COLAB = False
#
# if IN_COLAB:
# ! gsutil cp "gs://tvm-fcrc-binarie... | keops-main | benchmarks/TVM.py |
"""
TensorFlow, with an XLA backend
====================================
"""
import tensorflow as tf
# tf.config.optimizer.set_jit(True)
from time import time
# Make sure that we're using the v2.0.0
print(tf.__version__)
# Our function, that XLA is going to compile
def KP(x, y, p):
D_ij = tf.math.reduce_sum((... | keops-main | benchmarks/TF_XLA.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# KeOps documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 13 14:50:06 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# auto... | keops-main | doc/conf.py |
#! /usr/bin/env python
"""
Convert empty IPython notebook to a sphinx doc page.
"""
import sys
from subprocess import check_call as sh
def convert_nb(nbname):
# Execute the notebook
sh(["jupyter", "nbconvert", "--to", "notebook", "--execute", "--inplace", nbname])
# Convert to .rst for Sphinx
sh(
... | keops-main | doc/tools/nb_to_doc.py |
from setuptools import setup, find_packages
setup(
name = 'res-mlp-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.6',
license='MIT',
description = 'ResMLP - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/res-mlp-pytorch',
ke... | res-mlp-pytorch-main | setup.py |
from res_mlp_pytorch.res_mlp_pytorch import ResMLP
| res-mlp-pytorch-main | res_mlp_pytorch/__init__.py |
import torch
from torch import nn, einsum
from einops.layers.torch import Rearrange, Reduce
# helpers
def pair(val):
return (val, val) if not isinstance(val, tuple) else val
# classes
class Affine(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, 1, ... | res-mlp-pytorch-main | res_mlp_pytorch/res_mlp_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'BS-RoFormer',
packages = find_packages(exclude=[]),
version = '0.0.2',
license='MIT',
description = 'BS-RoFormer - Band-Split Rotary Transformer for SOTA Music Source Separation',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
lon... | BS-RoFormer-main | setup.py |
from bs_roformer.bs_roformer import BSRoformer
| BS-RoFormer-main | bs_roformer/__init__.py |
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
# constants
FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_me... | BS-RoFormer-main | bs_roformer/attend.py |
import torch
from torch import nn, einsum, Tensor
from torch.nn import Module, ModuleList
import torch.nn.functional as F
from bs_roformer.attend import Attend
from beartype.typing import Tuple, Optional, List
from beartype import beartype
from rotary_embedding_torch import RotaryEmbedding
from einops import rearra... | BS-RoFormer-main | bs_roformer/bs_roformer.py |
from setuptools import setup, find_packages
setup(
name = 'invariant-point-attention',
packages = find_packages(),
version = '0.2.2',
license='MIT',
description = 'Invariant Point Attention',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
... | invariant-point-attention-main | setup.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.optim import Adam
from einops import rearrange, repeat
import sidechainnet as scn
from invariant_point_attention import IPATransformer
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY = 16
def cycle(loader, len_thres = 200):
while True:... | invariant-point-attention-main | denoise.py |
import torch
from torch import nn
from einops import repeat
from invariant_point_attention import InvariantPointAttention, IPABlock
from invariant_point_attention.utils import rot
def test_ipa_invariance():
attn = InvariantPointAttention(
dim = 64,
heads = 8,
scalar_key_dim = 16,
sc... | invariant-point-attention-main | tests/invariance.py |
import torch
import torch.nn.functional as F
from torch.cuda.amp import autocast
from contextlib import contextmanager
from torch import nn, einsum
from einops.layers.torch import Rearrange
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val... | invariant-point-attention-main | invariant_point_attention/invariant_point_attention.py |
from invariant_point_attention.invariant_point_attention import InvariantPointAttention, IPABlock, IPATransformer
| invariant-point-attention-main | invariant_point_attention/__init__.py |
import torch
from torch import sin, cos, atan2, acos
from functools import wraps
def cast_torch_tensor(fn):
@wraps(fn)
def inner(t):
if not torch.is_tensor(t):
t = torch.tensor(t, dtype = torch.get_default_dtype())
return fn(t)
return inner
@cast_torch_tensor
def rot_z(gamma):
... | invariant-point-attention-main | invariant_point_attention/utils.py |
from setuptools import setup, find_packages
setup(
name = 'toolformer-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.27',
license='MIT',
description = 'Toolformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
... | toolformer-pytorch-main | setup.py |
import torch
from torch import nn, einsum
from einops import rearrange
from x_clip.tokenizer import tokenizer
# helpers
def exists(val):
return val is not None
# normalization
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
... | toolformer-pytorch-main | toolformer_pytorch/palm.py |
import os
try:
from dotenv import load_dotenv
load_dotenv()
import requests
import calendar
import wolframalpha
import datetime
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from operator import pow, truediv, mul, add, sub
# Optional imports
from googleapiclien... | toolformer-pytorch-main | toolformer_pytorch/tools.py |
from toolformer_pytorch.palm import PaLM
from toolformer_pytorch.toolformer_pytorch import (
Toolformer,
filter_tokens_with_api_response,
sample,
sample_with_api_call,
has_api_calls,
invoke_tools,
replace_all_but_first
)
| toolformer-pytorch-main | toolformer_pytorch/__init__.py |
DEFAULT_PROMPT_INPUT_TAG = '[input]'
calculator_prompt = f"""
Your task is to add calls to a Calculator API to a piece of text.
The calls should help you get information required to complete the text.
You can call the API by writing "[Calculator(expression)]" where "expression" is the expression to be computed.
Here... | toolformer-pytorch-main | toolformer_pytorch/prompts.py |
import re
from functools import partial, wraps
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce
from toolformer_pytorch.palm import P... | toolformer-pytorch-main | toolformer_pytorch/toolformer_pytorch.py |
from torch.optim import AdamW, Adam
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr =... | toolformer-pytorch-main | toolformer_pytorch/optimizer.py |
from setuptools import setup, find_packages
setup(
name = 'electra-pytorch',
packages = find_packages(),
version = '0.1.2',
license='MIT',
description = 'Electra - Pytorch',
author = 'Erik Nijkamp, Phil Wang',
author_email = 'erik.nijkamp@gmail.com, lucidrains@gmail.com',
url = 'https://github.com/luci... | electra-pytorch-master | setup.py |
import torch
from torch import nn
from reformer_pytorch import ReformerLM
from electra_pytorch import Electra
def test_electra():
generator = ReformerLM(
num_tokens = 20000,
dim = 512,
depth = 1,
max_seq_len = 1024
)
discriminator = ReformerLM(
num_tokens = 20000,
... | electra-pytorch-master | tests/test_electra_pytorch.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | electra-pytorch-master | examples/glue/metrics.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | electra-pytorch-master | examples/glue/run.py |
''' Script for downloading all GLUE data.
Note: for legal reasons, we are unable to host MRPC.
You can either use the version hosted by the SentEval team, which is already tokenized,
or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParap... | electra-pytorch-master | examples/glue/download.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | electra-pytorch-master | examples/glue/utils.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | electra-pytorch-master | examples/glue/processors.py |
from electra_pytorch.electra_pytorch import Electra
| electra-pytorch-master | electra_pytorch/__init__.py |
import math
from functools import reduce
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
# constants
Results = namedtuple('Results', [
'loss',
'mlm_loss',
'disc_loss',
'gen_acc',
'disc_acc',
'disc_labels',
'disc_predictions'
])
# helpe... | electra-pytorch-master | electra_pytorch/electra_pytorch.py |
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0, parent_dir_path)
import random
import logging
from time import time
from dataclasses import dataclass
import numpy as np
import torch
from torch.optim.l... | electra-pytorch-master | pretraining/openwebtext/pretrain.py |
import logging
import logging
import math
import multiprocessing
import os
import random
import tarfile
from dataclasses import dataclass
from itertools import chain
from functools import partial
from pathlib import Path
import numpy as np
import torch
import torch.utils.data
from pretraining.openwebtext import arg
... | electra-pytorch-master | pretraining/openwebtext/preprocess.py |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicab... | electra-pytorch-master | pretraining/openwebtext/tokenization.py |
import math
import os
import random
from dataclasses import dataclass
from itertools import chain
from functools import partial
from pathlib import Path
import numpy as np
import torch
import torch.utils.data
from openwebtext import tokenization
class ExampleBuilder:
"""Given a stream of input text, creates pr... | electra-pytorch-master | pretraining/openwebtext/dataset.py |
import argparse
import dataclasses
__all__ = ('Arg', 'Int', 'Float', 'Bool', 'Str', 'Choice', 'parse_to')
class Arg:
def __init__(self, **kwargs):
super().__init__()
self.kwargs = kwargs
class Int(Arg):
def __init__(self, **kwargs):
super().__init__(type=int, **kwargs)
class Float(... | electra-pytorch-master | pretraining/openwebtext/arg.py |
from setuptools import setup, find_packages
setup(
name = 'flash-genomics-model',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Flash Genomics Model (FGM)',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdo... | flash-genomics-model-main | setup.py |
from flash_genomics_model.flash_genomics_model import FlashGenomicsModel
| flash-genomics-model-main | flash_genomics_model/__init__.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_me... | flash-genomics-model-main | flash_genomics_model/attend.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from einops import rearrange, reduce
from flash_genomics_model.attend import Attend
# functions
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
fl... | flash-genomics-model-main | flash_genomics_model/flash_genomics_model.py |
from setuptools import setup, find_packages
setup(
name = 'bioseq-clasp',
packages = find_packages(),
version = '0.0.1',
license='MIT',
description = 'CLASP - CLIP for biosequences and their annotation data',
author = 'MicPie',
author_email = '',
url = 'https://github.com/MicPie/clasp',
keywords = [
... | clasp-main | setup.py |
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dic... | clasp-main | clasp/reversible.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
class CLASP(nn.Module):
def __init__(
self,
*,
text_encoder,
bioseq_encoder
):
super().__init__()
self.text_encoder = text_encoder
self.bioseq_encoder = bioseq_encoder
self.... | clasp-main | clasp/clasp.py |
from clasp.clasp import CLASP
from clasp.transformer import Transformer
from clasp.simple_tokenizer import tokenize, VOCAB_SIZE
from clasp.utils import basic_rand_sampler, basic_aa_tokenizer, CLASPDataset
| clasp-main | clasp/__init__.py |
import torch
from torch.utils.data import Dataset, DataLoader
import random
import time
def basic_rand_sampler(seq, sample_len):
"""
Basic random text sampler.
If sample_len is greater than the length of the seq, the seq is returned.
"""
seq_len = len(seq)
if seq_len > sample_len:
st... | clasp-main | clasp/utils.py |
from functools import partial
from itertools import islice, cycle
from inspect import isfunction
from math import ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from clasp.positional import SinuEmb, apply_rotary_pos_emb
from clasp.reversible import... | clasp-main | clasp/transformer.py |
# take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py
import torch
import html
import os
from functools import lru_cache
from pathlib import Path
import ftfy
import regex as re
VOCAB_SIZE = 49408
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)... | clasp-main | clasp/simple_tokenizer.py |
import torch
from torch import nn, einsum
from einops import rearrange, repeat
# rotary positional embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
de... | clasp-main | clasp/positional.py |
from datetime import datetime
import os
from pathlib import Path
import subprocess
import copy
# Note: Run preprocess_data.py file in the main repository directory or the preproc directory of the repository.
urls_download = ["https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/unipro... | clasp-main | preproc/preprocess_data.py |
from datetime import datetime
import os
from pathlib import Path
import subprocess
import copy
import re
# Note: Run preprocess_data.py file in the main repository directory or the preproc directory of the repository.
urls_download = ["https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/compl... | clasp-main | preproc/preprocess_data_reduced.py |
from setuptools import setup, find_packages
setup(
name = 'memorizing-transformers-pytorch',
packages = find_packages(exclude=[]),
version = '0.4.1',
license='MIT',
description = 'Memorizing Transformer - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'l... | memorizing-transformers-pytorch-main | setup.py |
from memorizing_transformers_pytorch import MemorizingTransformer
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 16
SEQ_LEN = 512
SE... | memorizing-transformers-pytorch-main | train.py |
from memorizing_transformers_pytorch.memorizing_transformers_pytorch import MemorizingTransformer, KNNAttention
from memorizing_transformers_pytorch.knn_memory import KNNMemory
| memorizing-transformers-pytorch-main | memorizing_transformers_pytorch/__init__.py |
import math
from functools import partial
from contextlib import contextmanager
from pathlib import Path
from filelock import FileLock
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops_exts import repeat_many
from einops.layers.torch import Rear... | memorizing-transformers-pytorch-main | memorizing_transformers_pytorch/memorizing_transformers_pytorch.py |
import os
import math
import torch
import faiss
import numpy as np
from pathlib import Path
from functools import wraps
from contextlib import ExitStack, contextmanager
from einops import rearrange, pack, unpack
# multiprocessing
from joblib import Parallel, delayed, cpu_count
# constants
FAISS_INDEX_GPU_ID = int... | memorizing-transformers-pytorch-main | memorizing_transformers_pytorch/knn_memory.py |
from setuptools import setup, find_packages
setup(
name = 'segformer-pytorch',
packages = find_packages(),
version = '0.0.6',
license='MIT',
description = 'Segformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/segformer-pytorch',
keywo... | segformer-pytorch-main | setup.py |
from segformer_pytorch.segformer_pytorch import Segformer
| segformer-pytorch-main | segformer_pytorch/__init__.py |
from math import sqrt
from functools import partial
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def cast_tuple(val, depth):
return val if isinstance(... | segformer-pytorch-main | segformer_pytorch/segformer_pytorch.py |
from setuptools import setup, find_packages
exec(open('equiformer_pytorch/version.py').read())
setup(
name = 'equiformer-pytorch',
packages = find_packages(exclude=[]),
version = __version__,
license='MIT',
description = 'Equiformer - SE3/E3 Graph Attention Transformer for Molecules and Proteins',
author ... | equiformer-pytorch-main | setup.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.