python_code stringlengths 0 1.02M | repo_name stringlengths 9 48 | file_path stringlengths 5 114 |
|---|---|---|
import torch
import torch.nn.functional as F
from torch.optim import Adam
from einops import rearrange, repeat
import sidechainnet as scn
from equiformer_pytorch import Equiformer
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY = 16
MAX_SEQ_LEN = 512
DEFAULT_TYPE = torch.float64
torch.set_default_dtype(DEFAULT_TYPE)
def ... | equiformer-pytorch-main | denoise.py |
import pytest
import torch
from equiformer_pytorch.equiformer_pytorch import Equiformer
from equiformer_pytorch.irr_repr import rot
from equiformer_pytorch.utils import (
torch_default_dtype,
cast_tuple,
to_order,
exists
)
# test output shape
@pytest.mark.parametrize('dim', [32])
def test_transforme... | equiformer-pytorch-main | tests/test_equivariance.py |
import pytest
import torch
from equiformer_pytorch.equiformer_pytorch import Equiformer
from equiformer_pytorch.irr_repr import rot
from equiformer_pytorch.utils import torch_default_dtype
# test equivariance with edges
@pytest.mark.parametrize('l2_dist_attention', [True, False])
@pytest.mark.parametrize('reversible... | equiformer-pytorch-main | tests/test_edges.py |
import os
from itertools import product
from collections import namedtuple
import torch
from einops import rearrange, repeat, reduce, einsum
from equiformer_pytorch.irr_repr import (
irr_repr,
rot_to_euler_angles
)
from equiformer_pytorch.utils import (
torch_default_dtype,
cache_dir,
exists,
... | equiformer-pytorch-main | equiformer_pytorch/basis.py |
__version__ = '0.3.10'
| equiformer-pytorch-main | equiformer_pytorch/version.py |
import torch
from torch.nn import Module
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from beartype import beartype
from beartype.typing import List, Tuple
from einops import rearrange, reduce
from equiformer_pytorch.utils ... | equiformer-pytorch-main | equiformer_pytorch/reversible.py |
from equiformer_pytorch.equiformer_pytorch import Equiformer
| equiformer-pytorch-main | equiformer_pytorch/__init__.py |
from math import sqrt
from functools import partial
from itertools import product
from collections import namedtuple
from beartype.typing import Optional, Union, Tuple, Dict
from beartype import beartype
import torch
from torch import nn, is_tensor, Tensor
import torch.nn.functional as F
from opt_einsum import contr... | equiformer-pytorch-main | equiformer_pytorch/equiformer_pytorch.py |
from pathlib import Path
import time
import pickle
import gzip
import torch
import torch.nn.functional as F
import contextlib
from functools import wraps, lru_cache
from filelock import FileLock
from equiformer_pytorch.version import __version__
from einops import rearrange
# helper functions
def exists(val):
... | equiformer-pytorch-main | equiformer_pytorch/utils.py |
from pathlib import Path
from functools import partial
import torch
import torch.nn.functional as F
from torch import sin, cos, atan2, acos
from einops import rearrange, pack, unpack
from equiformer_pytorch.utils import (
exists,
default,
cast_torch_tensor,
to_order,
identity,
l2norm
)
DATA_... | equiformer-pytorch-main | equiformer_pytorch/irr_repr.py |
from setuptools import setup, find_packages
setup(
name = 'glom-pytorch',
packages = find_packages(),
version = '0.0.14',
license='MIT',
description = 'Glom - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/glom-pytorch',
keywords = [
'a... | glom-pytorch-main | setup.py |
from glom_pytorch.glom_pytorch import Glom
| glom-pytorch-main | glom_pytorch/__init__.py |
from math import sqrt
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# constants
TOKEN_ATTEND_SELF_VALUE = -5e-4
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if ex... | glom-pytorch-main | glom_pytorch/glom_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'holodeck-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Holodeck - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url =... | holodeck-pytorch-main | setup.py |
from holodeck_pytorch.holodeck_pytorch import Holodeck
| holodeck-pytorch-main | holodeck_pytorch/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
dim_he... | holodeck-pytorch-main | holodeck_pytorch/holodeck_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'triangle-multiplicative-module',
packages = find_packages(),
version = '0.0.3',
license='MIT',
description = 'Triangle Multiplicative Module',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/tri... | triangle-multiplicative-module-main | setup.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# classes
class TriangleMultiplicativeModule(nn.Module):
def __init__(
self,
*,
... | triangle-multiplicative-module-main | triangle_multiplicative_module/triangle_multiplicative_module.py |
from triangle_multiplicative_module.triangle_multiplicative_module import TriangleMultiplicativeModule
| triangle-multiplicative-module-main | triangle_multiplicative_module/__init__.py |
from setuptools import setup, find_packages
exec(open('naturalspeech2_pytorch/version.py').read())
setup(
name = 'naturalspeech2-pytorch',
packages = find_packages(exclude=[]),
version = __version__,
license='MIT',
description = 'Natural Speech 2 - Pytorch',
author = 'Phil Wang',
author_email = 'lucidra... | naturalspeech2-pytorch-main | setup.py |
from typing import Tuple
import numpy as np
import torch
from torch import nn, Tensor
from torch.nn import Module
import torch.nn.functional as F
from einops import rearrange, repeat
from beartype import beartype
from beartype.typing import Optional
def exists(val):
return val is not None
class AlignerNet(Modu... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/aligner.py |
__version__ = '0.1.5'
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/version.py |
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from naturalspeech2_pytorch.naturalspeech2_pytorch import (
NaturalSpeech2,
Transformer,
Wavenet,
... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/__init__.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# ... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/attend.py |
import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/naturalspeech2_pytorch.py |
naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/__init__.py | |
import re
from pathlib import Path
from naturalspeech2_pytorch.utils.expand.abbreviations import AbbreviationExpander
from naturalspeech2_pytorch.utils.expand.number_norm import NumberNormalizer
from naturalspeech2_pytorch.utils.expand.time_norm import TimeExpander
CURRENT_DIR = Path(__file__).resolve().parent
class ... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/cleaner.py |
import torch
from torch import Tensor
from typing import Callable, List, Optional, Tuple
from torch.nn.utils.rnn import pad_sequence
from naturalspeech2_pytorch.utils.cleaner import TextProcessor
from naturalspeech2_pytorch.utils.phonemizers.espeak_wrapper import ESpeak
# default phoneme set
_vowels = "iyɨʉɯuɪʏʊeøɘ... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/tokenizer.py |
import torch
from einops import repeat, rearrange
def average_over_durations(values, durs):
"""
- in:
- values: B, 1, T_de
- durs: B, T_en
- out:
- avg: B, 1, T_en
"""
durs_cums_ends = torch.cumsum(durs, dim=1).long()
durs_cums_starts = torch.nn.funct... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/utils.py |
import csv
import re
class AbbreviationExpander:
def __init__(self, abbreviations_file):
self.abbreviations = {}
self.patterns = {}
self.load_abbreviations(abbreviations_file)
def load_abbreviations(self, abbreviations_file):
with open(abbreviations_file, 'r') as file:
... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/expand/abbreviations.py |
naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/expand/__init__.py | |
import re
import inflect
from num2words import num2words
from num_to_words import num_to_word
class NumberNormalizer:
def __init__(self):
self._inflect = inflect.engine()
self._number_re = re.compile(r"-?[0-9]+")
self._currency_re = re.compile(r"([$€£¥₹])([0-9\,\.]*[0-9]+)")
self._cu... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/expand/number_norm.py |
import re
import inflect
from num2words import num2words
from num_to_words import num_to_word
class TimeExpander:
def __init__(self):
self._inflect = inflect.engine()
self._time_re = self._get_time_regex()
def _get_time_regex(self):
return re.compile(
r"""\b
((0... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/expand/time_norm.py |
""" from https://github.com/coqui-ai/TTS/"""
import logging
import re
import subprocess
from typing import Dict, List
from packaging.version import Version
from naturalspeech2_pytorch.utils.phonemizers.base import BasePhonemizer
from naturalspeech2_pytorch.utils.phonemizers.punctuation import Punctuation
def is_to... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/phonemizers/espeak_wrapper.py |
""" from https://github.com/coqui-ai/TTS/"""
import collections
import re
from enum import Enum
import six
_DEF_PUNCS = ';:,.!?¡¿—…"«»“”'
_PUNC_IDX = collections.namedtuple("_punc_index", ["punc", "position"])
class PuncPosition(Enum):
"""Enum for the punctuations positions"""
BEGIN = 0
END = 1
MI... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/phonemizers/punctuation.py |
naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/phonemizers/__init__.py | |
""" from https://github.com/coqui-ai/TTS/"""
import abc
from typing import List, Tuple
from naturalspeech2_pytorch.utils.phonemizers.punctuation import Punctuation
class BasePhonemizer(abc.ABC):
"""Base phonemizer class
Phonemization follows the following steps:
1. Preprocessing:
- remov... | naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/phonemizers/base.py |
from setuptools import setup, find_packages
setup(
name = 'CoLT5-attention',
packages = find_packages(),
version = '0.10.15',
license='MIT',
description = 'Conditionally Routed Attention',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
ur... | CoLT5-attention-main | setup.py |
import math
from functools import partial
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn, einsum
from typing import Tuple, Optional
from local_attention import LocalMHA
from einops import rearrange, repeat, pack, unpack
from colt5_attention.attend import... | CoLT5-attention-main | colt5_attention/transformer_block.py |
from math import log
import torch
from torch import Tensor
from torch import autograd
import torch.nn.functional as F
from colt5_attention.coor_descent import coor_descent
from einops import pack, unpack, repeat
try:
import triton
import triton.language as tl
except ImportError as e:
print('triton is not... | CoLT5-attention-main | colt5_attention/triton_coor_descent.py |
from colt5_attention.transformer_block import (
ConditionalRoutedFeedForward,
ConditionalRoutedAttention,
ConditionalRoutedImageAttention,
ConditionalRoutedAutoregressiveAttention,
ConditionalRoutedCrossAttention,
ConditionalRoutedTransformerBlock,
CoordinateDescentRouter
)
from colt5_atten... | CoLT5-attention-main | colt5_attention/__init__.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# ... | CoLT5-attention-main | colt5_attention/attend.py |
import torch
import torch.nn.functional as F
from einops import rearrange
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def coor_descent(
s,
*,
n_iters,
k,
eps = 1e-1,
e... | CoLT5-attention-main | colt5_attention/coor_descent.py |
import torch
from torch import nn
from einops import rearrange, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from colt5_attention.transformer_block import (
ConditionalRoutedImageAttention,
ConditionalRoutedFeedForward
)
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, ... | CoLT5-attention-main | colt5_attention/vit.py |
import torch
from collections import namedtuple
from colt5_attention.coor_descent import coor_descent
TopkReturn = namedtuple('TopkReturn', ['values', 'indices', 'coor_descent_values', 'gates'])
def topk(
x,
k,
coor_descent_k_ratio = 9 / 8,
n_iters = 20,
eps = 1e-1,
eps_init = None,
eps_de... | CoLT5-attention-main | colt5_attention/topk.py |
from setuptools import setup, find_packages
setup(
name = 'FLASH-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.8',
license='MIT',
description = 'FLASH - Transformer Quality in Linear Time - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_... | FLASH-pytorch-main | setup.py |
from flash_pytorch import FLASHTransformer
from flash_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BAT... | FLASH-pytorch-main | train.py |
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
from rotary_embedding_torch import RotaryEmbedding
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def padding_to_multiple_of(n... | FLASH-pytorch-main | flash_pytorch/flash_pytorch.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwar... | FLASH-pytorch-main | flash_pytorch/autoregressive_wrapper.py |
from flash_pytorch.flash_pytorch import GAU, FLASH, FLASHTransformer
| FLASH-pytorch-main | flash_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'En-transformer',
packages = find_packages(),
version = '1.2.0',
license='MIT',
description = 'E(n)-Equivariant Transformer',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/En-transformer',
ke... | En-transformer-main | setup.py |
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import Adam
from einops import rearrange, repeat
import sidechainnet as scn
from en_transformer.en_transformer import EnTransformer
torch.set_default_dtype(torch.float64)
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY = 16
def cycle(loader... | En-transformer-main | denoise.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.utils.checkpoint import checkpoint_sequential
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
# helper functions
def exists(val):
return val is not None
def max_neg_value(t):
return -t... | En-transformer-main | en_transformer/en_transformer.py |
from en_transformer.en_transformer import EquivariantAttention, EnTransformer
| En-transformer-main | en_transformer/__init__.py |
import torch
from torch import sin, cos, atan2, acos
def rot_z(gamma):
return torch.tensor([
[cos(gamma), -sin(gamma), 0],
[sin(gamma), cos(gamma), 0],
[0, 0, 1]
], dtype = gamma.dtype)
def rot_y(beta):
return torch.tensor([
[cos(beta), 0, sin(beta)],
[0, 1, 0],
... | En-transformer-main | en_transformer/utils.py |
import torch
from en_transformer.utils import rot
from en_transformer import EnTransformer
torch.set_default_dtype(torch.float64)
def test_readme():
model = EnTransformer(
dim = 512,
depth = 1,
dim_head = 64,
heads = 8,
edge_dim = 4,
neighbors = 6
)
feats =... | En-transformer-main | tests/test_equivariance.py |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from opt_einsum import contract as einsum
import copy
import dgl
from util import base_indices, RTs_by_torsion, xyzs_in_base_frame, rigid_from_3_points
def init_lecun_normal(module, scale=1.0):
def truncated_normal(uniform, mu=0.... | RFdiffusion-main | util_module.py |
import torch
import torch.nn as nn
from Embeddings import MSA_emb, Extra_emb, Templ_emb, Recycling, Timestep_emb
from Track_module import IterativeSimulator
from AuxiliaryPredictor import DistanceNetwork, MaskedTokenNetwork, ExpResolvedNetwork, LDDTNetwork
from util import INIT_CRDS
from opt_einsum import contract as e... | RFdiffusion-main | RoseTTAFoldModel.py |
# script for diffusion protocols
import torch
import pickle
import numpy as np
import os
import logging
from typing import List
from scipy.spatial.transform import Rotation as scipy_R
from util import rigid_from_3_points
from util import torsion_indices as TOR_INDICES
from util import torsion_can_flip as TOR_CAN_FLI... | RFdiffusion-main | diffusion.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from opt_einsum import contract as einsum
import torch.utils.checkpoint as checkpoint
from util import cross_product_matrix
from util_module import *
from Attention_module import *
from SE3_network import SE3TransformerWrapper
# Components for three-tr... | RFdiffusion-main | Track_module.py |
import numpy as np
import torch
from chemical import INIT_CRDS
PARAMS = {
"DMIN" : 2.0,
"DMAX" : 20.0,
"DBINS" : 36,
"ABINS" : 36,
}
# ============================================================
def get_pair_dist(a, b):
"""calculate pair distances between two sets of points
Par... | RFdiffusion-main | kinematics.py |
import torch
import torch.nn as nn
#from equivariant_attention.modules import get_basis_and_r, GSE3Res, GNormBias
#from equivariant_attention.modules import GConvSE3, GNormSE3
#from equivariant_attention.fibers import Fiber
from util_module import init_lecun_normal_param
from se3_transformer.model import SE3Transform... | RFdiffusion-main | SE3_network.py |
import sys
import numpy as np
import torch
import scipy.sparse
from chemical import *
from scoring import *
def generate_Cbeta(N, Ca, C):
# recreate Cb given N,Ca,C
b = Ca - N
c = C - Ca
a = torch.cross(b, c, dim=-1)
# Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
# fd: below matches s... | RFdiffusion-main | util.py |
##
## lk and lk term
#(LJ_RADIUS LJ_WDEPTH LK_DGFREE LK_LAMBDA LK_VOLUME)
type2ljlk = {
"CNH2":(1.968297,0.094638,3.077030,3.5000,13.500000),
"COO":(1.916661,0.141799,-3.332648,3.5000,14.653000),
"CH0":(2.011760,0.062642,1.409284,3.5000,8.998000),
"CH1":(2.011760,0.062642,-3.538387,3.5000,10.686000),
... | RFdiffusion-main | scoring.py |
import torch
import numpy as np
import random
from chemical import INIT_CRDS
from icecream import ic
def th_min_angle(start, end, radians=False):
"""
Finds the angle you would add to <start> in order to get to <end>
on the shortest path.
"""
a,b,c = (np.pi, 2*np.pi, 3*np.pi) if radians else (... | RFdiffusion-main | diff_util.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from opt_einsum import contract as einsum
from util_module import init_lecun_normal
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, r_ff, p_drop=0.1):
super(FeedForwardLayer, self).__init__()
self.norm = n... | RFdiffusion-main | Attention_module.py |
import torch
import numpy as np
num2aa=[
'ALA','ARG','ASN','ASP','CYS',
'GLN','GLU','GLY','HIS','ILE',
'LEU','LYS','MET','PHE','PRO',
'SER','THR','TRP','TYR','VAL',
'UNK','MAS',
]
# Mapping 3 letter AA to 1 letter AA (e.g. ALA to A)
one_letter = ["A", "R", "N", "D", "C", \
"Q", "E... | RFdiffusion-main | chemical.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from opt_einsum import contract as einsum
import torch.utils.checkpoint as checkpoint
from util import get_tips
from util_module import Dropout, create_custom_forward, rbf, init_lecun_normal
from Attention_module import Attention, FeedForwardLayer, Atte... | RFdiffusion-main | Embeddings.py |
#!/usr/bin/env python
"""
Inference script.
To run with base.yaml as the config,
> python run_inference.py
To specify a different config,
> python run_inference.py --config-name symmetry
where symmetry can be the filename of any other config (without .yaml extension)
See https://hydra.cc/docs/advanced/hydra-comman... | RFdiffusion-main | run_inference.py |
"""SO(3) diffusion methods."""
import numpy as np
import os
from functools import cached_property
import torch
from scipy.spatial.transform import Rotation
import scipy.linalg
### First define geometric operations on the SO3 manifold
# hat map from vector space R^3 to Lie algebra so(3)
def hat(v):
hat_v = torch.... | RFdiffusion-main | igso3.py |
import torch
import torch.nn as nn
class DistanceNetwork(nn.Module):
def __init__(self, n_feat, p_drop=0.1):
super(DistanceNetwork, self).__init__()
#
self.proj_symm = nn.Linear(n_feat, 37*2)
self.proj_asymm = nn.Linear(n_feat, 37+19)
self.reset_parameter()
def... | RFdiffusion-main | AuxiliaryPredictor.py |
import traceback
import os
from inspect import signature
import pickle
import datetime
def pickle_function_call_wrapper(func, output_dir='pickled_inputs'):
i = 0
os.makedirs(output_dir)
# pickle.dump({'args': args, 'kwargs': kwargs}, fh)
def wrapper(*args, **kwargs):
"""
Wrap th... | RFdiffusion-main | model_input_logger.py |
import sys
import numpy as np
import random
from icecream import ic
class ContigMap:
"""
Class for doing mapping.
Inherited from Inpainting. To update at some point.
Supports multichain or multiple crops from a single receptor chain.
Also supports indexing jump (+200) or not, based on contig input... | RFdiffusion-main | contigs.py |
import numpy as np
import scipy
import scipy.spatial
# calculate dihedral angles defined by 4 sets of points
def get_dihedrals(a, b, c, d):
b0 = -1.0*(b - a)
b1 = c - b
b2 = d - c
b1 /= np.linalg.norm(b1, axis=-1)[:,None]
v = b0 - np.sum(b0*b1, axis=-1)[:,None]*b1
w = b2 - np.sum(b2*b1, axis... | RFdiffusion-main | coords6d.py |
import torch
import numpy as np
from util import generate_Cbeta
class Potential:
'''
Interface class that defines the functions a potential must implement
'''
def compute(self, xyz):
'''
Given the current structure of the model prediction, return the current
potent... | RFdiffusion-main | potentials/potentials.py |
import torch
from icecream import ic
import potentials.potentials as potentials
import numpy as np
def make_contact_matrix(nchain, intra_all=False, inter_all=False, contact_string=None):
"""
Calculate a matrix of inter/intra chain contact indicators
Parameters:
nchain (int, required): How ma... | RFdiffusion-main | potentials/manager.py |
from setuptools import setup, find_packages
setup(
name='se3-transformer',
packages=find_packages(),
include_package_data=True,
version='1.0.0',
description='PyTorch + DGL implementation of SE(3)-Transformers',
author='Alexandre Milesi',
author_email='alexandrem@nvidia.com',
)
| RFdiffusion-main | env/SE3Transformer/setup.py |
RFdiffusion-main | env/SE3Transformer/se3_transformer/__init__.py | |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/data_loading/qm9.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/data_loading/data_module.py |
from .qm9 import QM9DataModule
| RFdiffusion-main | env/SE3Transformer/se3_transformer/data_loading/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/metrics.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/gpu_affinity.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/loggers.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/arguments.py |
RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/__init__.py | |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/callbacks.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/inference.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/training.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/model/basis.py |
from .transformer import SE3Transformer, SE3TransformerPooled
from .fiber import Fiber
| RFdiffusion-main | env/SE3Transformer/se3_transformer/model/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/model/transformer.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/model/fiber.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/attention.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/convolution.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/linear.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/norm.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to... | RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/pooling.py |
from .linear import LinearSE3
from .norm import NormSE3
from .pooling import GPooling
from .convolution import ConvSE3
from .attention import AttentionBlockSE3 | RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.