repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/imagedreamfusion.py | threestudio/systems/imagedreamfusion.py | import os
import random
import shutil
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
from torchmetrics import PearsonCorrCoef
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("image-condition-dreamfusion-system")
class ImageConditionDreamFusion(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
freq: dict = field(default_factory=dict)
refinement: bool = False
ambient_ratio_min: float = 0.5
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
# only used in training
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
# visualize all training images
all_images = self.trainer.datamodule.train_dataloader().dataset.get_all_images()
self.save_image_grid(
"all_training_images.png",
[
{"type": "rgb", "img": image, "kwargs": {"data_format": "HWC"}}
for image in all_images
],
name="on_fit_start",
step=self.true_global_step,
)
self.pearson = PearsonCorrCoef().to(self.device)
def training_substep(self, batch, batch_idx, guidance: str):
"""
Args:
guidance: one of "ref" (reference image supervision), "guidance"
"""
if guidance == "ref":
# bg_color = torch.rand_like(batch['rays_o'])
ambient_ratio = 1.0
shading = "diffuse"
batch["shading"] = shading
elif guidance == "guidance":
batch = batch["random_camera"]
ambient_ratio = (
self.cfg.ambient_ratio_min
+ (1 - self.cfg.ambient_ratio_min) * random.random()
)
batch["bg_color"] = None
batch["ambient_ratio"] = ambient_ratio
out = self(batch)
loss_prefix = f"loss_{guidance}_"
loss_terms = {}
def set_loss(name, value):
loss_terms[f"{loss_prefix}{name}"] = value
guidance_eval = (
guidance == "guidance"
and self.cfg.freq.guidance_eval > 0
and self.true_global_step % self.cfg.freq.guidance_eval == 0
)
if guidance == "ref":
gt_mask = batch["mask"]
gt_rgb = batch["rgb"]
# color loss
gt_rgb = gt_rgb * gt_mask.float() + out["comp_rgb_bg"] * (
1 - gt_mask.float()
)
set_loss("rgb", F.mse_loss(gt_rgb, out["comp_rgb"]))
# mask loss
set_loss("mask", F.mse_loss(gt_mask.float(), out["opacity"]))
# depth loss
if self.C(self.cfg.loss.lambda_depth) > 0:
valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)].unsqueeze(1)
valid_pred_depth = out["depth"][gt_mask].unsqueeze(1)
with torch.no_grad():
A = torch.cat(
[valid_gt_depth, torch.ones_like(valid_gt_depth)], dim=-1
) # [B, 2]
X = torch.linalg.lstsq(A, valid_pred_depth).solution # [2, 1]
valid_gt_depth = A @ X # [B, 1]
set_loss("depth", F.mse_loss(valid_gt_depth, valid_pred_depth))
# relative depth loss
if self.C(self.cfg.loss.lambda_depth_rel) > 0:
valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)] # [B,]
valid_pred_depth = out["depth"][gt_mask] # [B,]
set_loss(
"depth_rel", 1 - self.pearson(valid_pred_depth, valid_gt_depth)
)
# normal loss
if self.C(self.cfg.loss.lambda_normal) > 0:
valid_gt_normal = (
1 - 2 * batch["ref_normal"][gt_mask.squeeze(-1)]
) # [B, 3]
valid_pred_normal = (
2 * out["comp_normal"][gt_mask.squeeze(-1)] - 1
) # [B, 3]
set_loss(
"normal",
1 - F.cosine_similarity(valid_pred_normal, valid_gt_normal).mean(),
)
elif guidance == "guidance":
self.guidance.set_min_max_steps(
self.C(self.guidance.cfg.min_step_percent),
self.C(self.guidance.cfg.max_step_percent),
)
prompt_utils = self.prompt_processor()
guidance_out = self.guidance(
out["comp_rgb"],
prompt_utils,
**batch,
rgb_as_latents=False,
guidance_eval=guidance_eval,
)
set_loss("sds", guidance_out["loss_sds"])
if self.C(self.cfg.loss.lambda_normal_smooth) > 0:
if "comp_normal" not in out:
raise ValueError(
"comp_normal is required for 2D normal smooth loss, no comp_normal is found in the output."
)
normal = out["comp_normal"]
set_loss(
"normal_smooth",
(normal[:, 1:, :, :] - normal[:, :-1, :, :]).square().mean()
+ (normal[:, :, 1:, :] - normal[:, :, :-1, :]).square().mean(),
)
if self.C(self.cfg.loss.lambda_3d_normal_smooth) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for normal smooth loss, no normal is found in the output."
)
if "normal_perturb" not in out:
raise ValueError(
"normal_perturb is required for normal smooth loss, no normal_perturb is found in the output."
)
normals = out["normal"]
normals_perturb = out["normal_perturb"]
set_loss("3d_normal_smooth", (normals - normals_perturb).abs().mean())
if not self.cfg.refinement:
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
set_loss(
"orient",
(
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum()
/ (out["opacity"] > 0).sum(),
)
if guidance != "ref" and self.C(self.cfg.loss.lambda_sparsity) > 0:
set_loss("sparsity", (out["opacity"] ** 2 + 0.01).sqrt().mean())
if self.C(self.cfg.loss.lambda_opaque) > 0:
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
set_loss(
"opaque", binary_cross_entropy(opacity_clamped, opacity_clamped)
)
else:
if self.C(self.cfg.loss.lambda_normal_consistency) > 0:
set_loss("normal_consistency", out["mesh"].normal_consistency())
if self.C(self.cfg.loss.lambda_laplacian_smoothness) > 0:
set_loss("laplacian_smoothness", out["mesh"].laplacian())
loss = 0.0
for name, value in loss_terms.items():
self.log(f"train/{name}", value)
if name.startswith(loss_prefix):
loss_weighted = value * self.C(
self.cfg.loss[name.replace(loss_prefix, "lambda_")]
)
self.log(f"train/{name}_w", loss_weighted)
loss += loss_weighted
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
self.log(f"train/loss_{guidance}", loss)
if guidance_eval:
self.guidance_evaluation_save(
out["comp_rgb"].detach()[: guidance_out["eval"]["bs"]],
guidance_out["eval"],
)
return {"loss": loss}
def training_step(self, batch, batch_idx):
total_loss = 0.0
# guidance
if self.true_global_step > self.cfg.freq.ref_only_steps:
out = self.training_substep(batch, batch_idx, guidance="guidance")
total_loss += out["loss"]
# ref
out = self.training_substep(batch, batch_idx, guidance="ref")
total_loss += out["loss"]
self.log("train/loss", total_loss, prog_bar=True)
# sch = self.lr_schedulers()
# sch.step()
return {"loss": total_loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-val/{batch['index'][0]}.png",
(
[
{
"type": "rgb",
"img": batch["rgb"][0],
"kwargs": {"data_format": "HWC"},
}
]
if "rgb" in batch
else []
)
+ [
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ (
[
{
"type": "grayscale",
"img": out["depth"][0],
"kwargs": {},
}
]
if "depth" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name=f"validation_step_batchidx_{batch_idx}"
if batch_idx in [0, 7, 15, 23, 29]
else None,
step=self.true_global_step,
)
def on_validation_epoch_end(self):
filestem = f"it{self.true_global_step}-val"
self.save_img_sequence(
filestem,
filestem,
"(\d+)\.png",
save_format="mp4",
fps=30,
name="validation_epoch_end",
step=self.true_global_step,
)
shutil.rmtree(
os.path.join(self.get_save_dir(), f"it{self.true_global_step}-val")
)
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
(
[
{
"type": "rgb",
"img": batch["rgb"][0],
"kwargs": {"data_format": "HWC"},
}
]
if "rgb" in batch
else []
)
+ [
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ (
[
{
"type": "grayscale",
"img": out["depth"][0],
"kwargs": {},
}
]
if "depth" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
shutil.rmtree(
os.path.join(self.get_save_dir(), f"it{self.true_global_step}-test")
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/prolificdreamer.py | threestudio/systems/prolificdreamer.py | import os
from dataclasses import dataclass, field
import torch
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.misc import cleanup, get_device
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("prolificdreamer-system")
class ProlificDreamer(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
# in ['coarse', 'geometry', 'texture']
stage: str = "coarse"
visualize_samples: bool = False
cfg: Config
def configure(self) -> None:
# set up geometry, material, background, renderer
super().configure()
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
self.prompt_utils = self.prompt_processor()
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
if self.cfg.stage == "geometry":
render_out = self.renderer(**batch, render_rgb=False)
else:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
def training_step(self, batch, batch_idx):
out = self(batch)
if self.cfg.stage == "geometry":
guidance_inp = out["comp_normal"]
guidance_out = self.guidance(
guidance_inp, self.prompt_utils, **batch, rgb_as_latents=False
)
else:
guidance_inp = out["comp_rgb"]
guidance_out = self.guidance(
guidance_inp, self.prompt_utils, **batch, rgb_as_latents=False
)
loss = 0.0
for name, value in guidance_out.items():
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
if self.cfg.stage == "coarse":
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
loss_orient = (
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum() / (out["opacity"] > 0).sum()
self.log("train/loss_orient", loss_orient)
loss += loss_orient * self.C(self.cfg.loss.lambda_orient)
loss_sparsity = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.log("train/loss_sparsity", loss_sparsity)
loss += loss_sparsity * self.C(self.cfg.loss.lambda_sparsity)
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
loss_opaque = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.log("train/loss_opaque", loss_opaque)
loss += loss_opaque * self.C(self.cfg.loss.lambda_opaque)
# z variance loss proposed in HiFA: http://arxiv.org/abs/2305.18766
# helps reduce floaters and produce solid geometry
if "z_variance" in out:
loss_z_variance = out["z_variance"][out["opacity"] > 0.5].mean()
self.log("train/loss_z_variance", loss_z_variance)
loss += loss_z_variance * self.C(self.cfg.loss.lambda_z_variance)
# sdf loss
if "sdf_grad" in out:
loss_eikonal = (
(torch.linalg.norm(out["sdf_grad"], ord=2, dim=-1) - 1.0) ** 2
).mean()
self.log("train/loss_eikonal", loss_eikonal)
loss += loss_eikonal * self.C(self.cfg.loss.lambda_eikonal)
self.log("train/inv_std", out["inv_std"], prog_bar=True)
elif self.cfg.stage == "geometry":
loss_normal_consistency = out["mesh"].normal_consistency()
self.log("train/loss_normal_consistency", loss_normal_consistency)
loss += loss_normal_consistency * self.C(
self.cfg.loss.lambda_normal_consistency
)
if self.C(self.cfg.loss.lambda_laplacian_smoothness) > 0:
loss_laplacian_smoothness = out["mesh"].laplacian()
self.log("train/loss_laplacian_smoothness", loss_laplacian_smoothness)
loss += loss_laplacian_smoothness * self.C(
self.cfg.loss.lambda_laplacian_smoothness
)
elif self.cfg.stage == "texture":
pass
else:
raise ValueError(f"Unknown stage {self.cfg.stage}")
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
return {"loss": loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
(
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
if "comp_rgb" in out
else []
)
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="validation_step",
step=self.true_global_step,
)
if self.cfg.visualize_samples:
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}-sample.png",
[
{
"type": "rgb",
"img": self.guidance.sample(
self.prompt_utils, **batch, seed=self.global_step
)[0],
"kwargs": {"data_format": "HWC"},
},
{
"type": "rgb",
"img": self.guidance.sample_lora(self.prompt_utils, **batch)[0],
"kwargs": {"data_format": "HWC"},
},
],
name="validation_step_samples",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
(
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
if "comp_rgb" in out
else []
)
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/optimizers.py | threestudio/systems/optimizers.py | # Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
class Adan(Optimizer):
"""
Implements a pytorch variant of Adan
Adan was proposed in
Adan: Adaptive Nesterov Momentum Algorithm for
Faster Optimizing Deep Models[J].arXiv preprint arXiv:2208.06677, 2022.
https://arxiv.org/abs/2208.06677
Arguments:
params (iterable): iterable of parameters to optimize or
dicts defining parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float, flot], optional): coefficients used for
first- and second-order moments. (default: (0.98, 0.92, 0.99))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): decoupled weight decay
(L2 penalty) (default: 0)
max_grad_norm (float, optional): value used to clip
global grad norm (default: 0.0 no clip)
no_prox (bool): how to perform the decoupled weight decay
(default: False)
foreach (bool): if True would use torch._foreach implementation.
It's faster but uses slightly more memory. (default: True)
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.98, 0.92, 0.99),
eps=1e-8,
weight_decay=0.0,
max_grad_norm=0.0,
no_prox=False,
foreach: bool = True,
):
if not 0.0 <= max_grad_norm:
raise ValueError("Invalid Max grad norm: {}".format(max_grad_norm))
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= betas[2] < 1.0:
raise ValueError("Invalid beta parameter at index 2: {}".format(betas[2]))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm,
no_prox=no_prox,
foreach=foreach,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super(Adan, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("no_prox", False)
@torch.no_grad()
def restart_opt(self):
for group in self.param_groups:
group["step"] = 0
for p in group["params"]:
if p.requires_grad:
state = self.state[p]
# State initialization
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p)
# Exponential moving average of gradient difference
state["exp_avg_diff"] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step."""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
if self.defaults["max_grad_norm"] > 0:
device = self.param_groups[0]["params"][0].device
global_grad_norm = torch.zeros(1, device=device)
max_grad_norm = torch.tensor(self.defaults["max_grad_norm"], device=device)
for group in self.param_groups:
for p in group["params"]:
if p.grad is not None:
grad = p.grad
global_grad_norm.add_(grad.pow(2).sum())
global_grad_norm = torch.sqrt(global_grad_norm)
clip_global_grad_norm = torch.clamp(
max_grad_norm / (global_grad_norm + group["eps"]), max=1.0
).item()
else:
clip_global_grad_norm = 1.0
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
exp_avg_diffs = []
neg_pre_grads = []
beta1, beta2, beta3 = group["betas"]
# assume same step across group now to simplify things
# per parameter step can be easily support
# by making it tensor, or pass list into kernel
if "step" in group:
group["step"] += 1
else:
group["step"] = 1
bias_correction1 = 1.0 - beta1 ** group["step"]
bias_correction2 = 1.0 - beta2 ** group["step"]
bias_correction3 = 1.0 - beta3 ** group["step"]
for p in group["params"]:
if p.grad is None:
continue
params_with_grad.append(p)
grads.append(p.grad)
state = self.state[p]
if len(state) == 0:
state["exp_avg"] = torch.zeros_like(p)
state["exp_avg_sq"] = torch.zeros_like(p)
state["exp_avg_diff"] = torch.zeros_like(p)
if "neg_pre_grad" not in state or group["step"] == 1:
state["neg_pre_grad"] = p.grad.clone().mul_(-clip_global_grad_norm)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
exp_avg_diffs.append(state["exp_avg_diff"])
neg_pre_grads.append(state["neg_pre_grad"])
kwargs = dict(
params=params_with_grad,
grads=grads,
exp_avgs=exp_avgs,
exp_avg_sqs=exp_avg_sqs,
exp_avg_diffs=exp_avg_diffs,
neg_pre_grads=neg_pre_grads,
beta1=beta1,
beta2=beta2,
beta3=beta3,
bias_correction1=bias_correction1,
bias_correction2=bias_correction2,
bias_correction3_sqrt=math.sqrt(bias_correction3),
lr=group["lr"],
weight_decay=group["weight_decay"],
eps=group["eps"],
no_prox=group["no_prox"],
clip_global_grad_norm=clip_global_grad_norm,
)
if group["foreach"]:
_multi_tensor_adan(**kwargs)
else:
_single_tensor_adan(**kwargs)
return loss
def _single_tensor_adan(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
exp_avg_diffs: List[Tensor],
neg_pre_grads: List[Tensor],
*,
beta1: float,
beta2: float,
beta3: float,
bias_correction1: float,
bias_correction2: float,
bias_correction3_sqrt: float,
lr: float,
weight_decay: float,
eps: float,
no_prox: bool,
clip_global_grad_norm: Tensor,
):
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
exp_avg_diff = exp_avg_diffs[i]
neg_grad_or_diff = neg_pre_grads[i]
grad.mul_(clip_global_grad_norm)
# for memory saving, we use `neg_grad_or_diff`
# to get some temp variable in a inplace way
neg_grad_or_diff.add_(grad)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t
exp_avg_diff.mul_(beta2).add_(neg_grad_or_diff, alpha=1 - beta2) # diff_t
neg_grad_or_diff.mul_(beta2).add_(grad)
exp_avg_sq.mul_(beta3).addcmul_(
neg_grad_or_diff, neg_grad_or_diff, value=1 - beta3
) # n_t
denom = ((exp_avg_sq).sqrt() / bias_correction3_sqrt).add_(eps)
step_size_diff = lr * beta2 / bias_correction2
step_size = lr / bias_correction1
if no_prox:
param.mul_(1 - lr * weight_decay)
param.addcdiv_(exp_avg, denom, value=-step_size)
param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff)
else:
param.addcdiv_(exp_avg, denom, value=-step_size)
param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff)
param.div_(1 + lr * weight_decay)
neg_grad_or_diff.zero_().add_(grad, alpha=-1.0)
def _multi_tensor_adan(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
exp_avg_diffs: List[Tensor],
neg_pre_grads: List[Tensor],
*,
beta1: float,
beta2: float,
beta3: float,
bias_correction1: float,
bias_correction2: float,
bias_correction3_sqrt: float,
lr: float,
weight_decay: float,
eps: float,
no_prox: bool,
clip_global_grad_norm: Tensor,
):
if len(params) == 0:
return
torch._foreach_mul_(grads, clip_global_grad_norm)
# for memory saving, we use `neg_pre_grads`
# to get some temp variable in a inplace way
torch._foreach_add_(neg_pre_grads, grads)
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t
torch._foreach_mul_(exp_avg_diffs, beta2)
torch._foreach_add_(exp_avg_diffs, neg_pre_grads, alpha=1 - beta2) # diff_t
torch._foreach_mul_(neg_pre_grads, beta2)
torch._foreach_add_(neg_pre_grads, grads)
torch._foreach_mul_(exp_avg_sqs, beta3)
torch._foreach_addcmul_(
exp_avg_sqs, neg_pre_grads, neg_pre_grads, value=1 - beta3
) # n_t
denom = torch._foreach_sqrt(exp_avg_sqs)
torch._foreach_div_(denom, bias_correction3_sqrt)
torch._foreach_add_(denom, eps)
step_size_diff = lr * beta2 / bias_correction2
step_size = lr / bias_correction1
if no_prox:
torch._foreach_mul_(params, 1 - lr * weight_decay)
torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size)
torch._foreach_addcdiv_(params, exp_avg_diffs, denom, value=-step_size_diff)
else:
torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size)
torch._foreach_addcdiv_(params, exp_avg_diffs, denom, value=-step_size_diff)
torch._foreach_div_(params, 1 + lr * weight_decay)
torch._foreach_zero_(neg_pre_grads)
torch._foreach_add_(neg_pre_grads, grads, alpha=-1.0)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/control4d_multiview.py | threestudio/systems/control4d_multiview.py | import os
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.systems.utils import parse_optimizer
from threestudio.utils.GAN.loss import discriminator_loss, generator_loss
from threestudio.utils.misc import cleanup, get_device
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.perceptual import PerceptualLoss
from threestudio.utils.typing import *
@threestudio.register("control4d-multiview-system")
class Control4D(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
per_editing_step: int = 20
start_editing_step: int = 2000
cfg: Config
def configure(self) -> None:
# override the default configure function
self.material = threestudio.find(self.cfg.material_type)(self.cfg.material)
self.background = threestudio.find(self.cfg.background_type)(
self.cfg.background
)
self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry)
self.renderer = threestudio.find(self.cfg.renderer_type)(
self.cfg.renderer,
geometry=self.geometry,
material=self.material,
background=self.background,
)
self.perceptual_loss = PerceptualLoss().eval().to(get_device())
self.edit_frames = {}
self.per_editing_step = self.cfg.per_editing_step
self.start_editing_step = self.cfg.start_editing_step
self.automatic_optimization = False
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
# only used in training
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
def training_step(self, batch, batch_idx):
optimizer_g, optimizer_d = self.optimizers()
self.toggle_optimizer(optimizer_g)
if torch.is_tensor(batch["index"]):
batch_index = batch["index"].item()
else:
batch_index = batch["index"]
batch["multi_level_guidance"] = True
origin_gt_rgb = batch["gt_rgb"]
B, H, W, C = origin_gt_rgb.shape
if batch_index in self.edit_frames:
gt_rgb = self.edit_frames[batch_index].to(batch["gt_rgb"].device)
gt_rgb = torch.nn.functional.interpolate(
gt_rgb.permute(0, 3, 1, 2), (H, W), mode="bilinear", align_corners=False
).permute(0, 2, 3, 1)
batch["gt_rgb"] = gt_rgb
else:
gt_rgb = origin_gt_rgb
out = self(batch)
if self.per_editing_step > 0 and self.global_step > self.start_editing_step:
prompt_utils = self.prompt_processor()
if (
not batch_index in self.edit_frames
or self.global_step % self.per_editing_step == 0
):
result = self.guidance(out["comp_gan_rgb"], origin_gt_rgb, prompt_utils)
self.edit_frames[batch_index] = result["edit_images"].detach().cpu()
loss = 0.0
# loss of generator level 0
loss_l1 = F.l1_loss(out["comp_int_rgb"], out["comp_gt_rgb"])
loss_p = 0.0
loss_kl = out["posterior"].kl().mean()
loss_G = generator_loss(
self.renderer.discriminator,
gt_rgb.permute(0, 3, 1, 2),
out["comp_gan_rgb"].permute(0, 3, 1, 2),
)
generator_level = out["generator_level"]
level_ratio = 1.0 if generator_level == 2 else 0.1
loss_l1 += F.l1_loss(out["comp_gan_rgb"], gt_rgb) * level_ratio
lr_gan_rgb = F.interpolate(
out["comp_gan_rgb"].permute(0, 3, 1, 2), (H // 4, W // 4), mode="area"
)
lr_rgb = F.interpolate(
out["comp_rgb"].permute(0, 3, 1, 2), (H // 4, W // 4), mode="area"
).detach()
loss_l1 += F.l1_loss(lr_gan_rgb, lr_rgb).sum() * level_ratio * 0.25
level_ratio = 1.0 if generator_level >= 1 else 0.1
loss_p += (
self.perceptual_loss(
out["comp_gan_rgb"].permute(0, 3, 1, 2).contiguous(),
gt_rgb.permute(0, 3, 1, 2).contiguous(),
).sum()
* level_ratio
)
guidance_out = {
"loss_l1": loss_l1,
"loss_p": loss_p,
"loss_G": loss_G,
"loss_kl": loss_kl,
}
for name, value in guidance_out.items():
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
loss_orient = (
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum() / (out["opacity"] > 0).sum()
self.log("train/loss_orient", loss_orient)
loss += loss_orient * self.C(self.cfg.loss.lambda_orient)
loss_sparsity = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.log("train/loss_sparsity", loss_sparsity)
loss += loss_sparsity * self.C(self.cfg.loss.lambda_sparsity)
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
loss_opaque = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.log("train/loss_opaque", loss_opaque)
loss += loss_opaque * self.C(self.cfg.loss.lambda_opaque)
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
self.manual_backward(loss)
optimizer_g.step()
optimizer_g.zero_grad()
self.untoggle_optimizer(optimizer_g)
self.toggle_optimizer(optimizer_d)
loss_D = discriminator_loss(
self.renderer.discriminator,
gt_rgb.permute(0, 3, 1, 2),
out["comp_gan_rgb"].permute(0, 3, 1, 2),
)
loss_D *= self.C(self.cfg.loss["lambda_D"])
self.log("train/loss_D", loss_D)
self.manual_backward(loss_D)
optimizer_d.step()
optimizer_d.zero_grad()
self.untoggle_optimizer(optimizer_d)
def validation_step(self, batch, batch_idx):
out = self(batch)
if torch.is_tensor(batch["index"]):
batch_index = batch["index"].item()
else:
batch_index = batch["index"]
if batch_index in self.edit_frames:
B, H, W, C = batch["gt_rgb"].shape
rgb = torch.nn.functional.interpolate(
self.edit_frames[batch_index].permute(0, 3, 1, 2), (H, W)
).permute(0, 2, 3, 1)[0]
else:
rgb = batch["gt_rgb"][0]
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.jpg",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ [
{
"type": "rgb",
"img": out["comp_gan_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
]
+ [
{
"type": "rgb",
"img": rgb,
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
},
],
name="validation_step",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_gan_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
def configure_optimizers(self):
optimizer_g = parse_optimizer(self.cfg.optimizer, self)
optimizer_d = parse_optimizer(self.cfg.optimizer.optimizer_dis, self)
return [optimizer_g, optimizer_d], []
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/instructnerf2nerf.py | threestudio/systems/instructnerf2nerf.py | import os
from dataclasses import dataclass, field
import torch
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.misc import cleanup, get_device
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.perceptual import PerceptualLoss
from threestudio.utils.typing import *
@threestudio.register("instructnerf2nerf-system")
class Instructnerf2nerf(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
per_editing_step: int = 10
start_editing_step: int = 1000
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
self.edit_frames = {}
self.perceptual_loss = PerceptualLoss().eval().to(get_device())
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
# only used in training
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
def training_step(self, batch, batch_idx):
if torch.is_tensor(batch["index"]):
batch_index = batch["index"].item()
else:
batch_index = batch["index"]
origin_gt_rgb = batch["gt_rgb"]
B, H, W, C = origin_gt_rgb.shape
if batch_index in self.edit_frames:
gt_rgb = self.edit_frames[batch_index].to(batch["gt_rgb"].device)
gt_rgb = torch.nn.functional.interpolate(
gt_rgb.permute(0, 3, 1, 2), (H, W), mode="bilinear", align_corners=False
).permute(0, 2, 3, 1)
batch["gt_rgb"] = gt_rgb
else:
gt_rgb = origin_gt_rgb
out = self(batch)
if (
self.cfg.per_editing_step > 0
and self.global_step > self.cfg.start_editing_step
):
prompt_utils = self.prompt_processor()
if (
not batch_index in self.edit_frames
or self.global_step % self.cfg.per_editing_step == 0
):
self.renderer.eval()
full_out = self(batch)
self.renderer.train()
result = self.guidance(
full_out["comp_rgb"], origin_gt_rgb, prompt_utils
)
self.edit_frames[batch_index] = result["edit_images"].detach().cpu()
loss = 0.0
guidance_out = {
"loss_l1": torch.nn.functional.l1_loss(out["comp_rgb"], gt_rgb),
"loss_p": self.perceptual_loss(
out["comp_rgb"].permute(0, 3, 1, 2).contiguous(),
gt_rgb.permute(0, 3, 1, 2).contiguous(),
).sum(),
}
for name, value in guidance_out.items():
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
loss_orient = (
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum() / (out["opacity"] > 0).sum()
self.log("train/loss_orient", loss_orient)
loss += loss_orient * self.C(self.cfg.loss.lambda_orient)
loss_sparsity = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.log("train/loss_sparsity", loss_sparsity)
loss += loss_sparsity * self.C(self.cfg.loss.lambda_sparsity)
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
loss_opaque = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.log("train/loss_opaque", loss_opaque)
loss += loss_opaque * self.C(self.cfg.loss.lambda_opaque)
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
return {"loss": loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
if torch.is_tensor(batch["index"]):
batch_index = batch["index"].item()
else:
batch_index = batch["index"]
if batch_index in self.edit_frames:
B, H, W, C = batch["gt_rgb"].shape
rgb = torch.nn.functional.interpolate(
self.edit_frames[batch_index].permute(0, 3, 1, 2), (H, W)
).permute(0, 2, 3, 1)[0]
else:
rgb = batch["gt_rgb"][0]
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
]
+ [
{
"type": "rgb",
"img": rgb,
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
},
],
name="validation_step",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/zero123.py | threestudio/systems/zero123.py | import os
import random
import shutil
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
from PIL import Image, ImageDraw
from torchmetrics import PearsonCorrCoef
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("zero123-system")
class Zero123(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
freq: dict = field(default_factory=dict)
refinement: bool = False
ambient_ratio_min: float = 0.5
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
# no prompt processor
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
# visualize all training images
all_images = self.trainer.datamodule.train_dataloader().dataset.get_all_images()
self.save_image_grid(
"all_training_images.png",
[
{"type": "rgb", "img": image, "kwargs": {"data_format": "HWC"}}
for image in all_images
],
name="on_fit_start",
step=self.true_global_step,
)
self.pearson = PearsonCorrCoef().to(self.device)
def training_substep(self, batch, batch_idx, guidance: str):
"""
Args:
guidance: one of "ref" (reference image supervision), "zero123"
"""
if guidance == "ref":
# bg_color = torch.rand_like(batch['rays_o'])
ambient_ratio = 1.0
shading = "diffuse"
batch["shading"] = shading
elif guidance == "zero123":
batch = batch["random_camera"]
ambient_ratio = (
self.cfg.ambient_ratio_min
+ (1 - self.cfg.ambient_ratio_min) * random.random()
)
batch["bg_color"] = None
batch["ambient_ratio"] = ambient_ratio
out = self(batch)
loss_prefix = f"loss_{guidance}_"
loss_terms = {}
def set_loss(name, value):
loss_terms[f"{loss_prefix}{name}"] = value
guidance_eval = (
guidance == "zero123"
and self.cfg.freq.guidance_eval > 0
and self.true_global_step % self.cfg.freq.guidance_eval == 0
)
if guidance == "ref":
gt_mask = batch["mask"]
gt_rgb = batch["rgb"]
# color loss
gt_rgb = gt_rgb * gt_mask.float() + out["comp_rgb_bg"] * (
1 - gt_mask.float()
)
set_loss("rgb", F.mse_loss(gt_rgb, out["comp_rgb"]))
# mask loss
set_loss("mask", F.mse_loss(gt_mask.float(), out["opacity"]))
# depth loss
if self.C(self.cfg.loss.lambda_depth) > 0:
valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)].unsqueeze(1)
valid_pred_depth = out["depth"][gt_mask].unsqueeze(1)
with torch.no_grad():
A = torch.cat(
[valid_gt_depth, torch.ones_like(valid_gt_depth)], dim=-1
) # [B, 2]
X = torch.linalg.lstsq(A, valid_pred_depth).solution # [2, 1]
valid_gt_depth = A @ X # [B, 1]
set_loss("depth", F.mse_loss(valid_gt_depth, valid_pred_depth))
# relative depth loss
if self.C(self.cfg.loss.lambda_depth_rel) > 0:
valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)] # [B,]
valid_pred_depth = out["depth"][gt_mask] # [B,]
set_loss(
"depth_rel", 1 - self.pearson(valid_pred_depth, valid_gt_depth)
)
# normal loss
if self.C(self.cfg.loss.lambda_normal) > 0:
valid_gt_normal = (
1 - 2 * batch["ref_normal"][gt_mask.squeeze(-1)]
) # [B, 3]
valid_pred_normal = (
2 * out["comp_normal"][gt_mask.squeeze(-1)] - 1
) # [B, 3]
set_loss(
"normal",
1 - F.cosine_similarity(valid_pred_normal, valid_gt_normal).mean(),
)
elif guidance == "zero123":
# zero123
guidance_out = self.guidance(
out["comp_rgb"],
**batch,
rgb_as_latents=False,
guidance_eval=guidance_eval,
)
# claforte: TODO: rename the loss_terms keys
set_loss("sds", guidance_out["loss_sds"])
if self.C(self.cfg.loss.lambda_normal_smooth) > 0:
if "comp_normal" not in out:
raise ValueError(
"comp_normal is required for 2D normal smooth loss, no comp_normal is found in the output."
)
normal = out["comp_normal"]
set_loss(
"normal_smooth",
(normal[:, 1:, :, :] - normal[:, :-1, :, :]).square().mean()
+ (normal[:, :, 1:, :] - normal[:, :, :-1, :]).square().mean(),
)
if self.C(self.cfg.loss.lambda_3d_normal_smooth) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for normal smooth loss, no normal is found in the output."
)
if "normal_perturb" not in out:
raise ValueError(
"normal_perturb is required for normal smooth loss, no normal_perturb is found in the output."
)
normals = out["normal"]
normals_perturb = out["normal_perturb"]
set_loss("3d_normal_smooth", (normals - normals_perturb).abs().mean())
if not self.cfg.refinement:
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
set_loss(
"orient",
(
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum()
/ (out["opacity"] > 0).sum(),
)
if guidance != "ref" and self.C(self.cfg.loss.lambda_sparsity) > 0:
set_loss("sparsity", (out["opacity"] ** 2 + 0.01).sqrt().mean())
if self.C(self.cfg.loss.lambda_opaque) > 0:
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
set_loss(
"opaque", binary_cross_entropy(opacity_clamped, opacity_clamped)
)
else:
if self.C(self.cfg.loss.lambda_normal_consistency) > 0:
set_loss("normal_consistency", out["mesh"].normal_consistency())
if self.C(self.cfg.loss.lambda_laplacian_smoothness) > 0:
set_loss("laplacian_smoothness", out["mesh"].laplacian())
loss = 0.0
for name, value in loss_terms.items():
self.log(f"train/{name}", value)
if name.startswith(loss_prefix):
loss_weighted = value * self.C(
self.cfg.loss[name.replace(loss_prefix, "lambda_")]
)
self.log(f"train/{name}_w", loss_weighted)
loss += loss_weighted
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
self.log(f"train/loss_{guidance}", loss)
if guidance_eval:
self.guidance_evaluation_save(
out["comp_rgb"].detach()[: guidance_out["eval"]["bs"]],
guidance_out["eval"],
)
return {"loss": loss}
def training_step(self, batch, batch_idx):
if self.cfg.freq.get("ref_or_zero123", "accumulate") == "accumulate":
do_ref = True
do_zero123 = True
elif self.cfg.freq.get("ref_or_zero123", "accumulate") == "alternate":
do_ref = (
self.true_global_step < self.cfg.freq.ref_only_steps
or self.true_global_step % self.cfg.freq.n_ref == 0
)
do_zero123 = not do_ref
total_loss = 0.0
if do_zero123:
out = self.training_substep(batch, batch_idx, guidance="zero123")
total_loss += out["loss"]
if do_ref:
out = self.training_substep(batch, batch_idx, guidance="ref")
total_loss += out["loss"]
self.log("train/loss", total_loss, prog_bar=True)
# sch = self.lr_schedulers()
# sch.step()
return {"loss": total_loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-val/{batch['index'][0]}.png",
(
[
{
"type": "rgb",
"img": batch["rgb"][0],
"kwargs": {"data_format": "HWC"},
}
]
if "rgb" in batch
else []
)
+ [
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ (
[
{
"type": "grayscale",
"img": out["depth"][0],
"kwargs": {},
}
]
if "depth" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
# claforte: TODO: don't hardcode the frame numbers to record... read them from cfg instead.
name=f"validation_step_batchidx_{batch_idx}"
if batch_idx in [0, 7, 15, 23, 29]
else None,
step=self.true_global_step,
)
def on_validation_epoch_end(self):
filestem = f"it{self.true_global_step}-val"
self.save_img_sequence(
filestem,
filestem,
"(\d+)\.png",
save_format="mp4",
fps=30,
name="validation_epoch_end",
step=self.true_global_step,
)
shutil.rmtree(
os.path.join(self.get_save_dir(), f"it{self.true_global_step}-val")
)
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
(
[
{
"type": "rgb",
"img": batch["rgb"][0],
"kwargs": {"data_format": "HWC"},
}
]
if "rgb" in batch
else []
)
+ [
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ (
[
{
"type": "grayscale",
"img": out["depth"][0],
"kwargs": {},
}
]
if "depth" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
shutil.rmtree(
os.path.join(self.get_save_dir(), f"it{self.true_global_step}-test")
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/utils.py | threestudio/systems/utils.py | import sys
import warnings
from bisect import bisect_right
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
import threestudio
def get_scheduler(name):
if hasattr(lr_scheduler, name):
return getattr(lr_scheduler, name)
else:
raise NotImplementedError
def getattr_recursive(m, attr):
for name in attr.split("."):
m = getattr(m, name)
return m
def get_parameters(model, name):
module = getattr_recursive(model, name)
if isinstance(module, nn.Module):
return module.parameters()
elif isinstance(module, nn.Parameter):
return module
return []
def parse_optimizer(config, model):
if hasattr(config, "params"):
params = [
{"params": get_parameters(model, name), "name": name, **args}
for name, args in config.params.items()
]
threestudio.debug(f"Specify optimizer params: {config.params}")
else:
params = model.parameters()
if config.name in ["FusedAdam"]:
import apex
optim = getattr(apex.optimizers, config.name)(params, **config.args)
elif config.name in ["Adan"]:
from threestudio.systems import optimizers
optim = getattr(optimizers, config.name)(params, **config.args)
else:
optim = getattr(torch.optim, config.name)(params, **config.args)
return optim
def parse_scheduler_to_instance(config, optimizer):
if config.name == "ChainedScheduler":
schedulers = [
parse_scheduler_to_instance(conf, optimizer) for conf in config.schedulers
]
scheduler = lr_scheduler.ChainedScheduler(schedulers)
elif config.name == "Sequential":
schedulers = [
parse_scheduler_to_instance(conf, optimizer) for conf in config.schedulers
]
scheduler = lr_scheduler.SequentialLR(
optimizer, schedulers, milestones=config.milestones
)
else:
scheduler = getattr(lr_scheduler, config.name)(optimizer, **config.args)
return scheduler
def parse_scheduler(config, optimizer):
interval = config.get("interval", "epoch")
assert interval in ["epoch", "step"]
if config.name == "SequentialLR":
scheduler = {
"scheduler": lr_scheduler.SequentialLR(
optimizer,
[
parse_scheduler(conf, optimizer)["scheduler"]
for conf in config.schedulers
],
milestones=config.milestones,
),
"interval": interval,
}
elif config.name == "ChainedScheduler":
scheduler = {
"scheduler": lr_scheduler.ChainedScheduler(
[
parse_scheduler(conf, optimizer)["scheduler"]
for conf in config.schedulers
]
),
"interval": interval,
}
else:
scheduler = {
"scheduler": get_scheduler(config.name)(optimizer, **config.args),
"interval": interval,
}
return scheduler
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/dreamfusion.py | threestudio/systems/dreamfusion.py | from dataclasses import dataclass, field
import torch
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("dreamfusion-system")
class DreamFusion(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
pass
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
# only used in training
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
def training_step(self, batch, batch_idx):
out = self(batch)
prompt_utils = self.prompt_processor()
guidance_out = self.guidance(
out["comp_rgb"], prompt_utils, **batch, rgb_as_latents=False
)
loss = 0.0
for name, value in guidance_out.items():
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
loss_orient = (
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum() / (out["opacity"] > 0).sum()
self.log("train/loss_orient", loss_orient)
loss += loss_orient * self.C(self.cfg.loss.lambda_orient)
loss_sparsity = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.log("train/loss_sparsity", loss_sparsity)
loss += loss_sparsity * self.C(self.cfg.loss.lambda_sparsity)
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
loss_opaque = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.log("train/loss_opaque", loss_opaque)
loss += loss_opaque * self.C(self.cfg.loss.lambda_opaque)
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
return {"loss": loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="validation_step",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/__init__.py | threestudio/systems/__init__.py | from . import (
control4d_multiview,
dreamfusion,
fantasia3d,
imagedreamfusion,
instructnerf2nerf,
latentnerf,
magic3d,
magic123,
mvdream,
prolificdreamer,
sjc,
textmesh,
zero123,
zero123_simple,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/fantasia3d.py | threestudio/systems/fantasia3d.py | from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("fantasia3d-system")
class Fantasia3D(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
latent_steps: int = 1000
texture: bool = False
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch, render_rgb=self.cfg.texture)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
# only used in training
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
if not self.cfg.texture:
# initialize SDF
# FIXME: what if using other geometry types?
self.geometry.initialize_shape()
def training_step(self, batch, batch_idx):
loss = 0.0
out = self(batch)
prompt_utils = self.prompt_processor()
if not self.cfg.texture: # geometry training
if self.true_global_step < self.cfg.latent_steps:
guidance_inp = torch.cat(
[out["comp_normal"] * 2.0 - 1.0, out["opacity"]], dim=-1
)
guidance_out = self.guidance(
guidance_inp, prompt_utils, **batch, rgb_as_latents=True
)
else:
guidance_inp = out["comp_normal"]
guidance_out = self.guidance(
guidance_inp, prompt_utils, **batch, rgb_as_latents=False
)
loss_normal_consistency = out["mesh"].normal_consistency()
self.log("train/loss_normal_consistency", loss_normal_consistency)
loss += loss_normal_consistency * self.C(
self.cfg.loss.lambda_normal_consistency
)
else: # texture training
guidance_inp = out["comp_rgb"]
if isinstance(
self.guidance,
threestudio.models.guidance.controlnet_guidance.ControlNetGuidance,
):
cond_inp = out["comp_normal"]
guidance_out = self.guidance(
guidance_inp, cond_inp, prompt_utils, **batch, rgb_as_latents=False
)
else:
guidance_out = self.guidance(
guidance_inp, prompt_utils, **batch, rgb_as_latents=False
)
for name, value in guidance_out.items():
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
return {"loss": loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
(
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if self.cfg.texture
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
},
],
name="validation_step",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
(
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if self.cfg.texture
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/base.py | threestudio/systems/base.py | import os
from dataclasses import dataclass, field
import pytorch_lightning as pl
import torch.nn.functional as F
import threestudio
from threestudio.models.exporters.base import Exporter, ExporterOutput
from threestudio.systems.utils import parse_optimizer, parse_scheduler
from threestudio.utils.base import (
Updateable,
update_end_if_possible,
update_if_possible,
)
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import C, cleanup, get_device, load_module_weights
from threestudio.utils.saving import SaverMixin
from threestudio.utils.typing import *
class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
@dataclass
class Config:
loggers: dict = field(default_factory=dict)
loss: dict = field(default_factory=dict)
optimizer: dict = field(default_factory=dict)
scheduler: Optional[dict] = None
weights: Optional[str] = None
weights_ignore_modules: Optional[List[str]] = None
cleanup_after_validation_step: bool = False
cleanup_after_test_step: bool = False
cfg: Config
def __init__(self, cfg, resumed=False) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self._save_dir: Optional[str] = None
self._resumed: bool = resumed
self._resumed_eval: bool = False
self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0}
if "loggers" in cfg:
self.create_loggers(cfg.loggers)
self.configure()
if self.cfg.weights is not None:
self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules)
self.post_configure()
def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None):
state_dict, epoch, global_step = load_module_weights(
weights, ignore_modules=ignore_modules, map_location="cpu"
)
self.load_state_dict(state_dict, strict=False)
# restore step-dependent states
self.do_update_step(epoch, global_step, on_load_weights=True)
def set_resume_status(self, current_epoch: int, global_step: int):
# restore correct epoch and global step in eval
self._resumed_eval = True
self._resumed_eval_status["current_epoch"] = current_epoch
self._resumed_eval_status["global_step"] = global_step
@property
def resumed(self):
# whether from resumed checkpoint
return self._resumed
@property
def true_global_step(self):
if self._resumed_eval:
return self._resumed_eval_status["global_step"]
else:
return self.global_step
@property
def true_current_epoch(self):
if self._resumed_eval:
return self._resumed_eval_status["current_epoch"]
else:
return self.current_epoch
def configure(self) -> None:
pass
def post_configure(self) -> None:
"""
executed after weights are loaded
"""
pass
def C(self, value: Any) -> float:
return C(value, self.true_current_epoch, self.true_global_step)
def configure_optimizers(self):
optim = parse_optimizer(self.cfg.optimizer, self)
ret = {
"optimizer": optim,
}
if self.cfg.scheduler is not None:
ret.update(
{
"lr_scheduler": parse_scheduler(self.cfg.scheduler, optim),
}
)
return ret
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def on_train_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.train_dataloader.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
def on_validation_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.val_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_validation_step:
# cleanup to save vram
cleanup()
def on_validation_epoch_end(self):
raise NotImplementedError
def test_step(self, batch, batch_idx):
raise NotImplementedError
def on_test_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.test_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_test_epoch_end(self):
pass
def predict_step(self, batch, batch_idx):
raise NotImplementedError
def on_predict_batch_end(self, outputs, batch, batch_idx):
self.dataset = self.trainer.predict_dataloaders.dataset
update_end_if_possible(
self.dataset, self.true_current_epoch, self.true_global_step
)
self.do_update_step_end(self.true_current_epoch, self.true_global_step)
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_predict_epoch_end(self):
pass
def preprocess_data(self, batch, stage):
pass
"""
Implementing on_after_batch_transfer of DataModule does the same.
But on_after_batch_transfer does not support DP.
"""
def on_train_batch_start(self, batch, batch_idx, unused=0):
self.preprocess_data(batch, "train")
self.dataset = self.trainer.train_dataloader.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "validation")
self.dataset = self.trainer.val_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "test")
self.dataset = self.trainer.test_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "predict")
self.dataset = self.trainer.predict_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
pass
def on_before_optimizer_step(self, optimizer):
"""
# some gradient-related debugging goes here, example:
from lightning.pytorch.utilities import grad_norm
norms = grad_norm(self.geometry, norm_type=2)
print(norms)
"""
pass
class BaseLift3DSystem(BaseSystem):
@dataclass
class Config(BaseSystem.Config):
geometry_type: str = ""
geometry: dict = field(default_factory=dict)
geometry_convert_from: Optional[str] = None
geometry_convert_inherit_texture: bool = False
# used to override configurations of the previous geometry being converted from,
# for example isosurface_threshold
geometry_convert_override: dict = field(default_factory=dict)
material_type: str = ""
material: dict = field(default_factory=dict)
background_type: str = ""
background: dict = field(default_factory=dict)
renderer_type: str = ""
renderer: dict = field(default_factory=dict)
guidance_type: str = ""
guidance: dict = field(default_factory=dict)
prompt_processor_type: str = ""
prompt_processor: dict = field(default_factory=dict)
# geometry export configurations, no need to specify in training
exporter_type: str = "mesh-exporter"
exporter: dict = field(default_factory=dict)
cfg: Config
def configure(self) -> None:
if (
self.cfg.geometry_convert_from # from_coarse must be specified
and not self.cfg.weights # not initialized from coarse when weights are specified
and not self.resumed # not initialized from coarse when resumed from checkpoints
):
threestudio.info("Initializing geometry from a given checkpoint ...")
from threestudio.utils.config import load_config, parse_structured
prev_cfg = load_config(
os.path.join(
os.path.dirname(self.cfg.geometry_convert_from),
"../configs/parsed.yaml",
)
) # TODO: hard-coded relative path
prev_system_cfg: BaseLift3DSystem.Config = parse_structured(
self.Config, prev_cfg.system
)
prev_geometry_cfg = prev_system_cfg.geometry
prev_geometry_cfg.update(self.cfg.geometry_convert_override)
prev_geometry = threestudio.find(prev_system_cfg.geometry_type)(
prev_geometry_cfg
)
state_dict, epoch, global_step = load_module_weights(
self.cfg.geometry_convert_from,
module_name="geometry",
map_location="cpu",
)
prev_geometry.load_state_dict(state_dict, strict=False)
# restore step-dependent states
prev_geometry.do_update_step(epoch, global_step, on_load_weights=True)
# convert from coarse stage geometry
prev_geometry = prev_geometry.to(get_device())
self.geometry = threestudio.find(self.cfg.geometry_type).create_from(
prev_geometry,
self.cfg.geometry,
copy_net=self.cfg.geometry_convert_inherit_texture,
)
del prev_geometry
cleanup()
else:
self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry)
self.material = threestudio.find(self.cfg.material_type)(self.cfg.material)
self.background = threestudio.find(self.cfg.background_type)(
self.cfg.background
)
self.renderer = threestudio.find(self.cfg.renderer_type)(
self.cfg.renderer,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def on_fit_start(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Validation results will be saved to {self._save_dir}")
else:
threestudio.warn(
f"Saving directory not set for the system, visualization results will not be saved"
)
def on_test_end(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Test results saved to {self._save_dir}")
def on_predict_start(self) -> None:
self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)(
self.cfg.exporter,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def predict_step(self, batch, batch_idx):
if self.exporter.cfg.save_video:
self.test_step(batch, batch_idx)
def on_predict_epoch_end(self) -> None:
if self.exporter.cfg.save_video:
self.on_test_epoch_end()
exporter_output: List[ExporterOutput] = self.exporter()
for out in exporter_output:
save_func_name = f"save_{out.save_type}"
if not hasattr(self, save_func_name):
raise ValueError(f"{save_func_name} not supported by the SaverMixin")
save_func = getattr(self, save_func_name)
save_func(f"it{self.true_global_step}-export/{out.save_name}", **out.params)
def on_predict_end(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Export assets saved to {self._save_dir}")
def guidance_evaluation_save(self, comp_rgb, guidance_eval_out):
B, size = comp_rgb.shape[:2]
resize = lambda x: F.interpolate(
x.permute(0, 3, 1, 2), (size, size), mode="bilinear", align_corners=False
).permute(0, 2, 3, 1)
filename = f"it{self.true_global_step}-train.png"
def merge12(x):
return x.reshape(-1, *x.shape[2:])
self.save_image_grid(
filename,
[
{
"type": "rgb",
"img": merge12(comp_rgb),
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": merge12(resize(guidance_eval_out["imgs_noisy"])),
"kwargs": {"data_format": "HWC"},
}
]
)
+ (
[
{
"type": "rgb",
"img": merge12(resize(guidance_eval_out["imgs_1step"])),
"kwargs": {"data_format": "HWC"},
}
]
)
+ (
[
{
"type": "rgb",
"img": merge12(resize(guidance_eval_out["imgs_1orig"])),
"kwargs": {"data_format": "HWC"},
}
]
)
+ (
[
{
"type": "rgb",
"img": merge12(resize(guidance_eval_out["imgs_final"])),
"kwargs": {"data_format": "HWC"},
}
]
),
name="train_step",
step=self.true_global_step,
texts=guidance_eval_out["texts"],
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/magic123.py | threestudio/systems/magic123.py | from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("magic123-system")
class Magic123(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
refinement: bool = False
guidance_3d_type: str = ""
guidance_3d: dict = field(default_factory=dict)
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
self.guidance_3d = threestudio.find(self.cfg.guidance_3d_type)(
self.cfg.guidance_3d
)
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
def training_step(self, batch, batch_idx):
out_input = self(batch)
out = self(batch["random_camera"])
prompt_utils = self.prompt_processor()
guidance_out = self.guidance(
out["comp_rgb"],
prompt_utils,
**batch["random_camera"],
rgb_as_latents=False,
)
guidance_3d_out = self.guidance_3d(
out["comp_rgb"],
**batch["random_camera"],
rgb_as_latents=False,
)
loss = 0.0
loss_rgb = F.mse_loss(
out_input["comp_rgb"],
batch["rgb"] * batch["mask"].float()
+ out_input["comp_rgb_bg"] * (1.0 - batch["mask"].float()),
)
self.log("train/loss_rgb", loss_rgb)
loss += loss_rgb * self.C(self.cfg.loss.lambda_rgb)
loss_mask = F.binary_cross_entropy(
out_input["opacity"].clamp(1.0e-5, 1.0 - 1.0e-5),
batch["mask"].float(),
)
self.log("train/loss_mask", loss_mask)
loss += loss_mask * self.C(self.cfg.loss.lambda_mask)
for name, value in guidance_out.items():
if not (isinstance(value, torch.Tensor) and len(value.shape) > 0):
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
for name, value in guidance_3d_out.items():
if not (isinstance(value, torch.Tensor) and len(value.shape) > 0):
self.log(f"train/{name}_3d", value)
if name.startswith("loss_"):
loss += value * self.C(
self.cfg.loss[name.replace("loss_", "lambda_3d_")]
)
if not self.cfg.refinement:
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
loss_orient = (
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum() / (out["opacity"] > 0).sum()
self.log("train/loss_orient", loss_orient)
loss += loss_orient * self.C(self.cfg.loss.lambda_orient)
if self.C(self.cfg.loss.lambda_normal_smoothness_2d) > 0:
if "comp_normal" not in out:
raise ValueError(
"comp_normal is required for 2D normal smoothness loss, no comp_normal is found in the output."
)
normal = out["comp_normal"]
loss_normal_smoothness_2d = (
normal[:, 1:, :, :] - normal[:, :-1, :, :]
).square().mean() + (
normal[:, :, 1:, :] - normal[:, :, :-1, :]
).square().mean()
self.log("trian/loss_normal_smoothness_2d", loss_normal_smoothness_2d)
loss += loss_normal_smoothness_2d * self.C(
self.cfg.loss.lambda_normal_smoothness_2d
)
loss_sparsity = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.log("train/loss_sparsity", loss_sparsity)
loss += loss_sparsity * self.C(self.cfg.loss.lambda_sparsity)
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
loss_opaque = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.log("train/loss_opaque", loss_opaque)
loss += loss_opaque * self.C(self.cfg.loss.lambda_opaque)
else:
loss_normal_consistency = out["mesh"].normal_consistency()
self.log("train/loss_normal_consistency", loss_normal_consistency)
loss += loss_normal_consistency * self.C(
self.cfg.loss.lambda_normal_consistency
)
if self.C(self.cfg.loss.lambda_laplacian_smoothness) > 0:
loss_laplacian_smoothness = out["mesh"].laplacian()
self.log("train/loss_laplacian_smoothness", loss_laplacian_smoothness)
loss += loss_laplacian_smoothness * self.C(
self.cfg.loss.lambda_laplacian_smoothness
)
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
return {"loss": loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="validation_step",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/magic3d.py | threestudio/systems/magic3d.py | import os
from dataclasses import dataclass, field
import torch
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.misc import cleanup, get_device
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("magic3d-system")
class Magic3D(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
refinement: bool = False
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
# only used in training
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
def training_step(self, batch, batch_idx):
out = self(batch)
prompt_utils = self.prompt_processor()
guidance_out = self.guidance(
out["comp_rgb"], prompt_utils, **batch, rgb_as_latents=False
)
loss = 0.0
for name, value in guidance_out.items():
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
if not self.cfg.refinement:
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
loss_orient = (
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum() / (out["opacity"] > 0).sum()
self.log("train/loss_orient", loss_orient)
loss += loss_orient * self.C(self.cfg.loss.lambda_orient)
loss_sparsity = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.log("train/loss_sparsity", loss_sparsity)
loss += loss_sparsity * self.C(self.cfg.loss.lambda_sparsity)
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
loss_opaque = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.log("train/loss_opaque", loss_opaque)
loss += loss_opaque * self.C(self.cfg.loss.lambda_opaque)
else:
loss_normal_consistency = out["mesh"].normal_consistency()
self.log("train/loss_normal_consistency", loss_normal_consistency)
loss += loss_normal_consistency * self.C(
self.cfg.loss.lambda_normal_consistency
)
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
return {"loss": loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="validation_step",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/sjc.py | threestudio/systems/sjc.py | from dataclasses import dataclass, field
import numpy as np
import torch
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.typing import *
@threestudio.register("sjc-system")
class ScoreJacobianChaining(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
subpixel_rendering: bool = True
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
def forward(self, batch: Dict[str, Any], decode: bool = False) -> Dict[str, Any]:
render_out = self.renderer(**batch)
out = {
**render_out,
}
if decode:
if self.cfg.subpixel_rendering:
latent_height, latent_width = 128, 128
else:
latent_height, latent_width = 64, 64
out["decoded_rgb"] = self.guidance.decode_latents(
out["comp_rgb"].permute(0, 3, 1, 2),
latent_height=latent_height,
latent_width=latent_width,
).permute(0, 2, 3, 1)
return out
def on_fit_start(self) -> None:
super().on_fit_start()
# only used in training
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
def on_test_start(self) -> None:
# check if guidance is initialized, such as when loading from checkpoint
if not hasattr(self, "guidance"):
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
def training_step(self, batch, batch_idx):
out = self(batch)
prompt_utils = self.prompt_processor()
guidance_out = self.guidance(
out["comp_rgb"], prompt_utils, **batch, rgb_as_latents=True
)
loss = 0.0
for name, value in guidance_out.items():
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
loss_emptiness = (
self.C(self.cfg.loss.lambda_emptiness)
* torch.log(1 + self.cfg.loss.emptiness_scale * out["weights"]).mean()
)
self.log("train/loss_emptiness", loss_emptiness)
loss += loss_emptiness
# About the depth loss, see https://github.com/pals-ttic/sjc/issues/21
if self.C(self.cfg.loss.lambda_depth) > 0:
_, h, w, _ = out["comp_rgb"].shape
comp_depth = (out["depth"] + 10 * (1 - out["opacity"])).squeeze(-1)
center_h = int(self.cfg.loss.center_ratio * h)
center_w = int(self.cfg.loss.center_ratio * w)
border_h = (h - center_h) // 2
border_w = (h - center_w) // 2
center_depth = comp_depth[
..., border_h : border_h + center_h, border_w : border_w + center_w
]
center_depth_mean = center_depth.mean()
border_depth_mean = (comp_depth.sum() - center_depth.sum()) / (
h * w - center_h * center_w
)
log_input = center_depth_mean - border_depth_mean + 1e-12
loss_depth = (
torch.sign(log_input)
* torch.log(log_input)
* self.C(self.cfg.loss.lambda_depth)
)
self.log("train/loss_depth", loss_depth)
loss += loss_depth
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
return {"loss": loss}
def vis_depth(self, pred_depth):
depth = pred_depth.detach().cpu().numpy()
depth = np.log(1.0 + depth + 1e-12) / np.log(1 + 10.0)
return depth
def validation_step(self, batch, batch_idx):
out = self(batch, decode=True)
comp_depth = out["depth"] + 10 * (1 - out["opacity"]) # 10 for background
vis_depth = self.vis_depth(comp_depth.squeeze(-1))
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["decoded_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
]
+ [
{
"type": "grayscale",
"img": vis_depth[0],
"kwargs": {"cmap": "spectral", "data_range": (0, 1)},
},
],
align=512,
name="validation_step",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch, decode=True)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["decoded_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
align=512,
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/data/image.py | threestudio/data/image.py | import bisect
import math
import os
from dataclasses import dataclass, field
import cv2
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, IterableDataset
import threestudio
from threestudio import register
from threestudio.data.uncond import (
RandomCameraDataModuleConfig,
RandomCameraDataset,
RandomCameraIterableDataset,
)
from threestudio.utils.base import Updateable
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import get_rank
from threestudio.utils.ops import (
get_mvp_matrix,
get_projection_matrix,
get_ray_directions,
get_rays,
)
from threestudio.utils.typing import *
@dataclass
class SingleImageDataModuleConfig:
# height and width should be Union[int, List[int]]
# but OmegaConf does not support Union of containers
height: Any = 96
width: Any = 96
resolution_milestones: List[int] = field(default_factory=lambda: [])
default_elevation_deg: float = 0.0
default_azimuth_deg: float = -180.0
default_camera_distance: float = 1.2
default_fovy_deg: float = 60.0
image_path: str = ""
use_random_camera: bool = True
random_camera: dict = field(default_factory=dict)
rays_noise_scale: float = 2e-3
batch_size: int = 1
requires_depth: bool = False
requires_normal: bool = False
class SingleImageDataBase:
def setup(self, cfg, split):
self.split = split
self.rank = get_rank()
self.cfg: SingleImageDataModuleConfig = cfg
if self.cfg.use_random_camera:
random_camera_cfg = parse_structured(
RandomCameraDataModuleConfig, self.cfg.get("random_camera", {})
)
if split == "train":
self.random_pose_generator = RandomCameraIterableDataset(
random_camera_cfg
)
else:
self.random_pose_generator = RandomCameraDataset(
random_camera_cfg, split
)
elevation_deg = torch.FloatTensor([self.cfg.default_elevation_deg])
azimuth_deg = torch.FloatTensor([self.cfg.default_azimuth_deg])
camera_distance = torch.FloatTensor([self.cfg.default_camera_distance])
elevation = elevation_deg * math.pi / 180
azimuth = azimuth_deg * math.pi / 180
camera_position: Float[Tensor, "1 3"] = torch.stack(
[
camera_distance * torch.cos(elevation) * torch.cos(azimuth),
camera_distance * torch.cos(elevation) * torch.sin(azimuth),
camera_distance * torch.sin(elevation),
],
dim=-1,
)
center: Float[Tensor, "1 3"] = torch.zeros_like(camera_position)
up: Float[Tensor, "1 3"] = torch.as_tensor([0, 0, 1], dtype=torch.float32)[None]
light_position: Float[Tensor, "1 3"] = camera_position
lookat: Float[Tensor, "1 3"] = F.normalize(center - camera_position, dim=-1)
right: Float[Tensor, "1 3"] = F.normalize(torch.cross(lookat, up), dim=-1)
up = F.normalize(torch.cross(right, lookat), dim=-1)
self.c2w: Float[Tensor, "1 3 4"] = torch.cat(
[torch.stack([right, up, -lookat], dim=-1), camera_position[:, :, None]],
dim=-1,
)
self.camera_position = camera_position
self.light_position = light_position
self.elevation_deg, self.azimuth_deg = elevation_deg, azimuth_deg
self.camera_distance = camera_distance
self.fovy = torch.deg2rad(torch.FloatTensor([self.cfg.default_fovy_deg]))
self.heights: List[int] = (
[self.cfg.height] if isinstance(self.cfg.height, int) else self.cfg.height
)
self.widths: List[int] = (
[self.cfg.width] if isinstance(self.cfg.width, int) else self.cfg.width
)
assert len(self.heights) == len(self.widths)
self.resolution_milestones: List[int]
if len(self.heights) == 1 and len(self.widths) == 1:
if len(self.cfg.resolution_milestones) > 0:
threestudio.warn(
"Ignoring resolution_milestones since height and width are not changing"
)
self.resolution_milestones = [-1]
else:
assert len(self.heights) == len(self.cfg.resolution_milestones) + 1
self.resolution_milestones = [-1] + self.cfg.resolution_milestones
self.directions_unit_focals = [
get_ray_directions(H=height, W=width, focal=1.0)
for (height, width) in zip(self.heights, self.widths)
]
self.focal_lengths = [
0.5 * height / torch.tan(0.5 * self.fovy) for height in self.heights
]
self.height: int = self.heights[0]
self.width: int = self.widths[0]
self.directions_unit_focal = self.directions_unit_focals[0]
self.focal_length = self.focal_lengths[0]
self.set_rays()
self.load_images()
self.prev_height = self.height
def set_rays(self):
# get directions by dividing directions_unit_focal by focal length
directions: Float[Tensor, "1 H W 3"] = self.directions_unit_focal[None]
directions[:, :, :, :2] = directions[:, :, :, :2] / self.focal_length
rays_o, rays_d = get_rays(
directions, self.c2w, keepdim=True, noise_scale=self.cfg.rays_noise_scale
)
proj_mtx: Float[Tensor, "4 4"] = get_projection_matrix(
self.fovy, self.width / self.height, 0.1, 100.0
) # FIXME: hard-coded near and far
mvp_mtx: Float[Tensor, "4 4"] = get_mvp_matrix(self.c2w, proj_mtx)
self.rays_o, self.rays_d = rays_o, rays_d
self.mvp_mtx = mvp_mtx
def load_images(self):
# load image
assert os.path.exists(
self.cfg.image_path
), f"Could not find image {self.cfg.image_path}!"
rgba = cv2.cvtColor(
cv2.imread(self.cfg.image_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGRA2RGBA
)
rgba = (
cv2.resize(
rgba, (self.width, self.height), interpolation=cv2.INTER_AREA
).astype(np.float32)
/ 255.0
)
rgb = rgba[..., :3]
self.rgb: Float[Tensor, "1 H W 3"] = (
torch.from_numpy(rgb).unsqueeze(0).contiguous().to(self.rank)
)
self.mask: Float[Tensor, "1 H W 1"] = (
torch.from_numpy(rgba[..., 3:] > 0.5).unsqueeze(0).to(self.rank)
)
print(
f"[INFO] single image dataset: load image {self.cfg.image_path} {self.rgb.shape}"
)
# load depth
if self.cfg.requires_depth:
depth_path = self.cfg.image_path.replace("_rgba.png", "_depth.png")
assert os.path.exists(depth_path)
depth = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
depth = cv2.resize(
depth, (self.width, self.height), interpolation=cv2.INTER_AREA
)
self.depth: Float[Tensor, "1 H W 1"] = (
torch.from_numpy(depth.astype(np.float32) / 255.0)
.unsqueeze(0)
.to(self.rank)
)
print(
f"[INFO] single image dataset: load depth {depth_path} {self.depth.shape}"
)
else:
self.depth = None
# load normal
if self.cfg.requires_normal:
normal_path = self.cfg.image_path.replace("_rgba.png", "_normal.png")
assert os.path.exists(normal_path)
normal = cv2.imread(normal_path, cv2.IMREAD_UNCHANGED)
normal = cv2.resize(
normal, (self.width, self.height), interpolation=cv2.INTER_AREA
)
self.normal: Float[Tensor, "1 H W 3"] = (
torch.from_numpy(normal.astype(np.float32) / 255.0)
.unsqueeze(0)
.to(self.rank)
)
print(
f"[INFO] single image dataset: load normal {normal_path} {self.normal.shape}"
)
else:
self.normal = None
def get_all_images(self):
return self.rgb
def update_step_(self, epoch: int, global_step: int, on_load_weights: bool = False):
size_ind = bisect.bisect_right(self.resolution_milestones, global_step) - 1
self.height = self.heights[size_ind]
if self.height == self.prev_height:
return
self.prev_height = self.height
self.width = self.widths[size_ind]
self.directions_unit_focal = self.directions_unit_focals[size_ind]
self.focal_length = self.focal_lengths[size_ind]
threestudio.debug(f"Training height: {self.height}, width: {self.width}")
self.set_rays()
self.load_images()
class SingleImageIterableDataset(IterableDataset, SingleImageDataBase, Updateable):
def __init__(self, cfg: Any, split: str) -> None:
super().__init__()
self.setup(cfg, split)
def collate(self, batch) -> Dict[str, Any]:
batch = {
"rays_o": self.rays_o,
"rays_d": self.rays_d,
"mvp_mtx": self.mvp_mtx,
"camera_positions": self.camera_position,
"light_positions": self.light_position,
"elevation": self.elevation_deg,
"azimuth": self.azimuth_deg,
"camera_distances": self.camera_distance,
"rgb": self.rgb,
"ref_depth": self.depth,
"ref_normal": self.normal,
"mask": self.mask,
"height": self.cfg.height,
"width": self.cfg.width,
}
if self.cfg.use_random_camera:
batch["random_camera"] = self.random_pose_generator.collate(None)
return batch
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
self.update_step_(epoch, global_step, on_load_weights)
self.random_pose_generator.update_step(epoch, global_step, on_load_weights)
def __iter__(self):
while True:
yield {}
class SingleImageDataset(Dataset, SingleImageDataBase):
def __init__(self, cfg: Any, split: str) -> None:
super().__init__()
self.setup(cfg, split)
def __len__(self):
return len(self.random_pose_generator)
def __getitem__(self, index):
return self.random_pose_generator[index]
# if index == 0:
# return {
# 'rays_o': self.rays_o[0],
# 'rays_d': self.rays_d[0],
# 'mvp_mtx': self.mvp_mtx[0],
# 'camera_positions': self.camera_position[0],
# 'light_positions': self.light_position[0],
# 'elevation': self.elevation_deg[0],
# 'azimuth': self.azimuth_deg[0],
# 'camera_distances': self.camera_distance[0],
# 'rgb': self.rgb[0],
# 'depth': self.depth[0],
# 'mask': self.mask[0]
# }
# else:
# return self.random_pose_generator[index - 1]
@register("single-image-datamodule")
class SingleImageDataModule(pl.LightningDataModule):
cfg: SingleImageDataModuleConfig
def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> None:
super().__init__()
self.cfg = parse_structured(SingleImageDataModuleConfig, cfg)
def setup(self, stage=None) -> None:
if stage in [None, "fit"]:
self.train_dataset = SingleImageIterableDataset(self.cfg, "train")
if stage in [None, "fit", "validate"]:
self.val_dataset = SingleImageDataset(self.cfg, "val")
if stage in [None, "test", "predict"]:
self.test_dataset = SingleImageDataset(self.cfg, "test")
def prepare_data(self):
pass
def general_loader(self, dataset, batch_size, collate_fn=None) -> DataLoader:
return DataLoader(
dataset, num_workers=0, batch_size=batch_size, collate_fn=collate_fn
)
def train_dataloader(self) -> DataLoader:
return self.general_loader(
self.train_dataset,
batch_size=self.cfg.batch_size,
collate_fn=self.train_dataset.collate,
)
def val_dataloader(self) -> DataLoader:
return self.general_loader(self.val_dataset, batch_size=1)
def test_dataloader(self) -> DataLoader:
return self.general_loader(self.test_dataset, batch_size=1)
def predict_dataloader(self) -> DataLoader:
return self.general_loader(self.test_dataset, batch_size=1)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/data/uncond.py | threestudio/data/uncond.py | import bisect
import math
import random
from dataclasses import dataclass, field
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, IterableDataset
import threestudio
from threestudio import register
from threestudio.utils.base import Updateable
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import get_device
from threestudio.utils.ops import (
get_mvp_matrix,
get_projection_matrix,
get_ray_directions,
get_rays,
)
from threestudio.utils.typing import *
@dataclass
class RandomCameraDataModuleConfig:
# height, width, and batch_size should be Union[int, List[int]]
# but OmegaConf does not support Union of containers
height: Any = 64
width: Any = 64
batch_size: Any = 1
resolution_milestones: List[int] = field(default_factory=lambda: [])
eval_height: int = 512
eval_width: int = 512
eval_batch_size: int = 1
n_val_views: int = 1
n_test_views: int = 120
elevation_range: Tuple[float, float] = (-10, 90)
azimuth_range: Tuple[float, float] = (-180, 180)
camera_distance_range: Tuple[float, float] = (1, 1.5)
fovy_range: Tuple[float, float] = (
40,
70,
) # in degrees, in vertical direction (along height)
camera_perturb: float = 0.1
center_perturb: float = 0.2
up_perturb: float = 0.02
light_position_perturb: float = 1.0
light_distance_range: Tuple[float, float] = (0.8, 1.5)
eval_elevation_deg: float = 15.0
eval_camera_distance: float = 1.5
eval_fovy_deg: float = 70.0
light_sample_strategy: str = "dreamfusion"
batch_uniform_azimuth: bool = True
progressive_until: int = 0 # progressive ranges for elevation, azimuth, r, fovy
class RandomCameraIterableDataset(IterableDataset, Updateable):
def __init__(self, cfg: Any) -> None:
super().__init__()
self.cfg: RandomCameraDataModuleConfig = cfg
self.heights: List[int] = (
[self.cfg.height] if isinstance(self.cfg.height, int) else self.cfg.height
)
self.widths: List[int] = (
[self.cfg.width] if isinstance(self.cfg.width, int) else self.cfg.width
)
self.batch_sizes: List[int] = (
[self.cfg.batch_size]
if isinstance(self.cfg.batch_size, int)
else self.cfg.batch_size
)
assert len(self.heights) == len(self.widths) == len(self.batch_sizes)
self.resolution_milestones: List[int]
if (
len(self.heights) == 1
and len(self.widths) == 1
and len(self.batch_sizes) == 1
):
if len(self.cfg.resolution_milestones) > 0:
threestudio.warn(
"Ignoring resolution_milestones since height and width are not changing"
)
self.resolution_milestones = [-1]
else:
assert len(self.heights) == len(self.cfg.resolution_milestones) + 1
self.resolution_milestones = [-1] + self.cfg.resolution_milestones
self.directions_unit_focals = [
get_ray_directions(H=height, W=width, focal=1.0)
for (height, width) in zip(self.heights, self.widths)
]
self.height: int = self.heights[0]
self.width: int = self.widths[0]
self.batch_size: int = self.batch_sizes[0]
self.directions_unit_focal = self.directions_unit_focals[0]
self.elevation_range = self.cfg.elevation_range
self.azimuth_range = self.cfg.azimuth_range
self.camera_distance_range = self.cfg.camera_distance_range
self.fovy_range = self.cfg.fovy_range
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
size_ind = bisect.bisect_right(self.resolution_milestones, global_step) - 1
self.height = self.heights[size_ind]
self.width = self.widths[size_ind]
self.batch_size = self.batch_sizes[size_ind]
self.directions_unit_focal = self.directions_unit_focals[size_ind]
threestudio.debug(
f"Training height: {self.height}, width: {self.width}, batch_size: {self.batch_size}"
)
# progressive view
self.progressive_view(global_step)
def __iter__(self):
while True:
yield {}
def progressive_view(self, global_step):
r = min(1.0, global_step / (self.cfg.progressive_until + 1))
self.elevation_range = [
(1 - r) * self.cfg.eval_elevation_deg + r * self.cfg.elevation_range[0],
(1 - r) * self.cfg.eval_elevation_deg + r * self.cfg.elevation_range[1],
]
self.azimuth_range = [
(1 - r) * 0.0 + r * self.cfg.azimuth_range[0],
(1 - r) * 0.0 + r * self.cfg.azimuth_range[1],
]
# self.camera_distance_range = [
# (1 - r) * self.cfg.eval_camera_distance
# + r * self.cfg.camera_distance_range[0],
# (1 - r) * self.cfg.eval_camera_distance
# + r * self.cfg.camera_distance_range[1],
# ]
# self.fovy_range = [
# (1 - r) * self.cfg.eval_fovy_deg + r * self.cfg.fovy_range[0],
# (1 - r) * self.cfg.eval_fovy_deg + r * self.cfg.fovy_range[1],
# ]
def collate(self, batch) -> Dict[str, Any]:
# sample elevation angles
elevation_deg: Float[Tensor, "B"]
elevation: Float[Tensor, "B"]
if random.random() < 0.5:
# sample elevation angles uniformly with a probability 0.5 (biased towards poles)
elevation_deg = (
torch.rand(self.batch_size)
* (self.elevation_range[1] - self.elevation_range[0])
+ self.elevation_range[0]
)
elevation = elevation_deg * math.pi / 180
else:
# otherwise sample uniformly on sphere
elevation_range_percent = [
(self.elevation_range[0] + 90.0) / 180.0,
(self.elevation_range[1] + 90.0) / 180.0,
]
# inverse transform sampling
elevation = torch.asin(
2
* (
torch.rand(self.batch_size)
* (elevation_range_percent[1] - elevation_range_percent[0])
+ elevation_range_percent[0]
)
- 1.0
)
elevation_deg = elevation / math.pi * 180.0
# sample azimuth angles from a uniform distribution bounded by azimuth_range
azimuth_deg: Float[Tensor, "B"]
if self.cfg.batch_uniform_azimuth:
# ensures sampled azimuth angles in a batch cover the whole range
azimuth_deg = (
torch.rand(self.batch_size) + torch.arange(self.batch_size)
) / self.batch_size * (
self.azimuth_range[1] - self.azimuth_range[0]
) + self.azimuth_range[
0
]
else:
# simple random sampling
azimuth_deg = (
torch.rand(self.batch_size)
* (self.azimuth_range[1] - self.azimuth_range[0])
+ self.azimuth_range[0]
)
azimuth = azimuth_deg * math.pi / 180
# sample distances from a uniform distribution bounded by distance_range
camera_distances: Float[Tensor, "B"] = (
torch.rand(self.batch_size)
* (self.camera_distance_range[1] - self.camera_distance_range[0])
+ self.camera_distance_range[0]
)
# convert spherical coordinates to cartesian coordinates
# right hand coordinate system, x back, y right, z up
# elevation in (-90, 90), azimuth from +x to +y in (-180, 180)
camera_positions: Float[Tensor, "B 3"] = torch.stack(
[
camera_distances * torch.cos(elevation) * torch.cos(azimuth),
camera_distances * torch.cos(elevation) * torch.sin(azimuth),
camera_distances * torch.sin(elevation),
],
dim=-1,
)
# default scene center at origin
center: Float[Tensor, "B 3"] = torch.zeros_like(camera_positions)
# default camera up direction as +z
up: Float[Tensor, "B 3"] = torch.as_tensor([0, 0, 1], dtype=torch.float32)[
None, :
].repeat(self.batch_size, 1)
# sample camera perturbations from a uniform distribution [-camera_perturb, camera_perturb]
camera_perturb: Float[Tensor, "B 3"] = (
torch.rand(self.batch_size, 3) * 2 * self.cfg.camera_perturb
- self.cfg.camera_perturb
)
camera_positions = camera_positions + camera_perturb
# sample center perturbations from a normal distribution with mean 0 and std center_perturb
center_perturb: Float[Tensor, "B 3"] = (
torch.randn(self.batch_size, 3) * self.cfg.center_perturb
)
center = center + center_perturb
# sample up perturbations from a normal distribution with mean 0 and std up_perturb
up_perturb: Float[Tensor, "B 3"] = (
torch.randn(self.batch_size, 3) * self.cfg.up_perturb
)
up = up + up_perturb
# sample fovs from a uniform distribution bounded by fov_range
fovy_deg: Float[Tensor, "B"] = (
torch.rand(self.batch_size) * (self.fovy_range[1] - self.fovy_range[0])
+ self.fovy_range[0]
)
fovy = fovy_deg * math.pi / 180
# sample light distance from a uniform distribution bounded by light_distance_range
light_distances: Float[Tensor, "B"] = (
torch.rand(self.batch_size)
* (self.cfg.light_distance_range[1] - self.cfg.light_distance_range[0])
+ self.cfg.light_distance_range[0]
)
if self.cfg.light_sample_strategy == "dreamfusion":
# sample light direction from a normal distribution with mean camera_position and std light_position_perturb
light_direction: Float[Tensor, "B 3"] = F.normalize(
camera_positions
+ torch.randn(self.batch_size, 3) * self.cfg.light_position_perturb,
dim=-1,
)
# get light position by scaling light direction by light distance
light_positions: Float[Tensor, "B 3"] = (
light_direction * light_distances[:, None]
)
elif self.cfg.light_sample_strategy == "magic3d":
# sample light direction within restricted angle range (pi/3)
local_z = F.normalize(camera_positions, dim=-1)
local_x = F.normalize(
torch.stack(
[local_z[:, 1], -local_z[:, 0], torch.zeros_like(local_z[:, 0])],
dim=-1,
),
dim=-1,
)
local_y = F.normalize(torch.cross(local_z, local_x, dim=-1), dim=-1)
rot = torch.stack([local_x, local_y, local_z], dim=-1)
light_azimuth = (
torch.rand(self.batch_size) * math.pi * 2 - math.pi
) # [-pi, pi]
light_elevation = (
torch.rand(self.batch_size) * math.pi / 3 + math.pi / 6
) # [pi/6, pi/2]
light_positions_local = torch.stack(
[
light_distances
* torch.cos(light_elevation)
* torch.cos(light_azimuth),
light_distances
* torch.cos(light_elevation)
* torch.sin(light_azimuth),
light_distances * torch.sin(light_elevation),
],
dim=-1,
)
light_positions = (rot @ light_positions_local[:, :, None])[:, :, 0]
else:
raise ValueError(
f"Unknown light sample strategy: {self.cfg.light_sample_strategy}"
)
lookat: Float[Tensor, "B 3"] = F.normalize(center - camera_positions, dim=-1)
right: Float[Tensor, "B 3"] = F.normalize(torch.cross(lookat, up), dim=-1)
up = F.normalize(torch.cross(right, lookat), dim=-1)
c2w3x4: Float[Tensor, "B 3 4"] = torch.cat(
[torch.stack([right, up, -lookat], dim=-1), camera_positions[:, :, None]],
dim=-1,
)
c2w: Float[Tensor, "B 4 4"] = torch.cat(
[c2w3x4, torch.zeros_like(c2w3x4[:, :1])], dim=1
)
c2w[:, 3, 3] = 1.0
# get directions by dividing directions_unit_focal by focal length
focal_length: Float[Tensor, "B"] = 0.5 * self.height / torch.tan(0.5 * fovy)
directions: Float[Tensor, "B H W 3"] = self.directions_unit_focal[
None, :, :, :
].repeat(self.batch_size, 1, 1, 1)
directions[:, :, :, :2] = (
directions[:, :, :, :2] / focal_length[:, None, None, None]
)
# Importance note: the returned rays_d MUST be normalized!
rays_o, rays_d = get_rays(directions, c2w, keepdim=True)
proj_mtx: Float[Tensor, "B 4 4"] = get_projection_matrix(
fovy, self.width / self.height, 0.1, 1000.0
) # FIXME: hard-coded near and far
mvp_mtx: Float[Tensor, "B 4 4"] = get_mvp_matrix(c2w, proj_mtx)
return {
"rays_o": rays_o,
"rays_d": rays_d,
"mvp_mtx": mvp_mtx,
"camera_positions": camera_positions,
"c2w": c2w,
"light_positions": light_positions,
"elevation": elevation_deg,
"azimuth": azimuth_deg,
"camera_distances": camera_distances,
"height": self.height,
"width": self.width,
}
class RandomCameraDataset(Dataset):
def __init__(self, cfg: Any, split: str) -> None:
super().__init__()
self.cfg: RandomCameraDataModuleConfig = cfg
self.split = split
if split == "val":
self.n_views = self.cfg.n_val_views
else:
self.n_views = self.cfg.n_test_views
azimuth_deg: Float[Tensor, "B"]
if self.split == "val":
# make sure the first and last view are not the same
azimuth_deg = torch.linspace(0, 360.0, self.n_views + 1)[: self.n_views]
else:
azimuth_deg = torch.linspace(0, 360.0, self.n_views)
elevation_deg: Float[Tensor, "B"] = torch.full_like(
azimuth_deg, self.cfg.eval_elevation_deg
)
camera_distances: Float[Tensor, "B"] = torch.full_like(
elevation_deg, self.cfg.eval_camera_distance
)
elevation = elevation_deg * math.pi / 180
azimuth = azimuth_deg * math.pi / 180
# convert spherical coordinates to cartesian coordinates
# right hand coordinate system, x back, y right, z up
# elevation in (-90, 90), azimuth from +x to +y in (-180, 180)
camera_positions: Float[Tensor, "B 3"] = torch.stack(
[
camera_distances * torch.cos(elevation) * torch.cos(azimuth),
camera_distances * torch.cos(elevation) * torch.sin(azimuth),
camera_distances * torch.sin(elevation),
],
dim=-1,
)
# default scene center at origin
center: Float[Tensor, "B 3"] = torch.zeros_like(camera_positions)
# default camera up direction as +z
up: Float[Tensor, "B 3"] = torch.as_tensor([0, 0, 1], dtype=torch.float32)[
None, :
].repeat(self.cfg.eval_batch_size, 1)
fovy_deg: Float[Tensor, "B"] = torch.full_like(
elevation_deg, self.cfg.eval_fovy_deg
)
fovy = fovy_deg * math.pi / 180
light_positions: Float[Tensor, "B 3"] = camera_positions
lookat: Float[Tensor, "B 3"] = F.normalize(center - camera_positions, dim=-1)
right: Float[Tensor, "B 3"] = F.normalize(torch.cross(lookat, up), dim=-1)
up = F.normalize(torch.cross(right, lookat), dim=-1)
c2w3x4: Float[Tensor, "B 3 4"] = torch.cat(
[torch.stack([right, up, -lookat], dim=-1), camera_positions[:, :, None]],
dim=-1,
)
c2w: Float[Tensor, "B 4 4"] = torch.cat(
[c2w3x4, torch.zeros_like(c2w3x4[:, :1])], dim=1
)
c2w[:, 3, 3] = 1.0
# get directions by dividing directions_unit_focal by focal length
focal_length: Float[Tensor, "B"] = (
0.5 * self.cfg.eval_height / torch.tan(0.5 * fovy)
)
directions_unit_focal = get_ray_directions(
H=self.cfg.eval_height, W=self.cfg.eval_width, focal=1.0
)
directions: Float[Tensor, "B H W 3"] = directions_unit_focal[
None, :, :, :
].repeat(self.n_views, 1, 1, 1)
directions[:, :, :, :2] = (
directions[:, :, :, :2] / focal_length[:, None, None, None]
)
rays_o, rays_d = get_rays(directions, c2w, keepdim=True)
proj_mtx: Float[Tensor, "B 4 4"] = get_projection_matrix(
fovy, self.cfg.eval_width / self.cfg.eval_height, 0.1, 1000.0
) # FIXME: hard-coded near and far
mvp_mtx: Float[Tensor, "B 4 4"] = get_mvp_matrix(c2w, proj_mtx)
self.rays_o, self.rays_d = rays_o, rays_d
self.mvp_mtx = mvp_mtx
self.c2w = c2w
self.camera_positions = camera_positions
self.light_positions = light_positions
self.elevation, self.azimuth = elevation, azimuth
self.elevation_deg, self.azimuth_deg = elevation_deg, azimuth_deg
self.camera_distances = camera_distances
def __len__(self):
return self.n_views
def __getitem__(self, index):
return {
"index": index,
"rays_o": self.rays_o[index],
"rays_d": self.rays_d[index],
"mvp_mtx": self.mvp_mtx[index],
"c2w": self.c2w[index],
"camera_positions": self.camera_positions[index],
"light_positions": self.light_positions[index],
"elevation": self.elevation_deg[index],
"azimuth": self.azimuth_deg[index],
"camera_distances": self.camera_distances[index],
"height": self.cfg.eval_height,
"width": self.cfg.eval_width,
}
def collate(self, batch):
batch = torch.utils.data.default_collate(batch)
batch.update({"height": self.cfg.eval_height, "width": self.cfg.eval_width})
return batch
@register("random-camera-datamodule")
class RandomCameraDataModule(pl.LightningDataModule):
cfg: RandomCameraDataModuleConfig
def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> None:
super().__init__()
self.cfg = parse_structured(RandomCameraDataModuleConfig, cfg)
def setup(self, stage=None) -> None:
if stage in [None, "fit"]:
self.train_dataset = RandomCameraIterableDataset(self.cfg)
if stage in [None, "fit", "validate"]:
self.val_dataset = RandomCameraDataset(self.cfg, "val")
if stage in [None, "test", "predict"]:
self.test_dataset = RandomCameraDataset(self.cfg, "test")
def prepare_data(self):
pass
def general_loader(self, dataset, batch_size, collate_fn=None) -> DataLoader:
return DataLoader(
dataset,
# very important to disable multi-processing if you want to change self attributes at runtime!
# (for example setting self.width and self.height in update_step)
num_workers=0, # type: ignore
batch_size=batch_size,
collate_fn=collate_fn,
)
def train_dataloader(self) -> DataLoader:
return self.general_loader(
self.train_dataset, batch_size=None, collate_fn=self.train_dataset.collate
)
def val_dataloader(self) -> DataLoader:
return self.general_loader(
self.val_dataset, batch_size=1, collate_fn=self.val_dataset.collate
)
# return self.general_loader(self.train_dataset, batch_size=None, collate_fn=self.train_dataset.collate)
def test_dataloader(self) -> DataLoader:
return self.general_loader(
self.test_dataset, batch_size=1, collate_fn=self.test_dataset.collate
)
def predict_dataloader(self) -> DataLoader:
return self.general_loader(
self.test_dataset, batch_size=1, collate_fn=self.test_dataset.collate
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/data/uncond_multiview.py | threestudio/data/uncond_multiview.py | import math
import os
import random
from dataclasses import dataclass, field
import cv2
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, IterableDataset
from threestudio import register
from threestudio.data.uncond import (
RandomCameraDataModuleConfig,
RandomCameraDataset,
RandomCameraIterableDataset,
)
from threestudio.utils.base import Updateable
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import get_rank
from threestudio.utils.ops import (
get_mvp_matrix,
get_projection_matrix,
get_ray_directions,
get_rays,
)
from threestudio.utils.typing import *
@dataclass
class RandomMultiviewCameraDataModuleConfig(RandomCameraDataModuleConfig):
relative_radius: bool = True
n_view: int = 1
zoom_range: Tuple[float, float] = (1.0, 1.0)
class RandomMultiviewCameraIterableDataset(RandomCameraIterableDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.zoom_range = self.cfg.zoom_range
def collate(self, batch) -> Dict[str, Any]:
assert (
self.batch_size % self.cfg.n_view == 0
), f"batch_size ({self.batch_size}) must be dividable by n_view ({self.cfg.n_view})!"
real_batch_size = self.batch_size // self.cfg.n_view
# sample elevation angles
elevation_deg: Float[Tensor, "B"]
elevation: Float[Tensor, "B"]
if random.random() < 0.5:
# sample elevation angles uniformly with a probability 0.5 (biased towards poles)
elevation_deg = (
torch.rand(real_batch_size)
* (self.elevation_range[1] - self.elevation_range[0])
+ self.elevation_range[0]
).repeat_interleave(self.cfg.n_view, dim=0)
elevation = elevation_deg * math.pi / 180
else:
# otherwise sample uniformly on sphere
elevation_range_percent = [
(self.elevation_range[0] + 90.0) / 180.0,
(self.elevation_range[1] + 90.0) / 180.0,
]
# inverse transform sampling
elevation = torch.asin(
2
* (
torch.rand(real_batch_size)
* (elevation_range_percent[1] - elevation_range_percent[0])
+ elevation_range_percent[0]
)
- 1.0
).repeat_interleave(self.cfg.n_view, dim=0)
elevation_deg = elevation / math.pi * 180.0
# sample azimuth angles from a uniform distribution bounded by azimuth_range
azimuth_deg: Float[Tensor, "B"]
# ensures sampled azimuth angles in a batch cover the whole range
azimuth_deg = (
torch.rand(real_batch_size).reshape(-1, 1)
+ torch.arange(self.cfg.n_view).reshape(1, -1)
).reshape(-1) / self.cfg.n_view * (
self.azimuth_range[1] - self.azimuth_range[0]
) + self.azimuth_range[
0
]
azimuth = azimuth_deg * math.pi / 180
######## Different from original ########
# sample fovs from a uniform distribution bounded by fov_range
fovy_deg: Float[Tensor, "B"] = (
torch.rand(real_batch_size) * (self.fovy_range[1] - self.fovy_range[0])
+ self.fovy_range[0]
).repeat_interleave(self.cfg.n_view, dim=0)
fovy = fovy_deg * math.pi / 180
# sample distances from a uniform distribution bounded by distance_range
camera_distances: Float[Tensor, "B"] = (
torch.rand(real_batch_size)
* (self.camera_distance_range[1] - self.camera_distance_range[0])
+ self.camera_distance_range[0]
).repeat_interleave(self.cfg.n_view, dim=0)
if self.cfg.relative_radius:
scale = 1 / torch.tan(0.5 * fovy)
camera_distances = scale * camera_distances
# zoom in by decreasing fov after camera distance is fixed
zoom: Float[Tensor, "B"] = (
torch.rand(real_batch_size) * (self.zoom_range[1] - self.zoom_range[0])
+ self.zoom_range[0]
).repeat_interleave(self.cfg.n_view, dim=0)
fovy = fovy * zoom
fovy_deg = fovy_deg * zoom
###########################################
# convert spherical coordinates to cartesian coordinates
# right hand coordinate system, x back, y right, z up
# elevation in (-90, 90), azimuth from +x to +y in (-180, 180)
camera_positions: Float[Tensor, "B 3"] = torch.stack(
[
camera_distances * torch.cos(elevation) * torch.cos(azimuth),
camera_distances * torch.cos(elevation) * torch.sin(azimuth),
camera_distances * torch.sin(elevation),
],
dim=-1,
)
# default scene center at origin
center: Float[Tensor, "B 3"] = torch.zeros_like(camera_positions)
# default camera up direction as +z
up: Float[Tensor, "B 3"] = torch.as_tensor([0, 0, 1], dtype=torch.float32)[
None, :
].repeat(self.batch_size, 1)
# sample camera perturbations from a uniform distribution [-camera_perturb, camera_perturb]
camera_perturb: Float[Tensor, "B 3"] = (
torch.rand(real_batch_size, 3) * 2 * self.cfg.camera_perturb
- self.cfg.camera_perturb
).repeat_interleave(self.cfg.n_view, dim=0)
camera_positions = camera_positions + camera_perturb
# sample center perturbations from a normal distribution with mean 0 and std center_perturb
center_perturb: Float[Tensor, "B 3"] = (
torch.randn(real_batch_size, 3) * self.cfg.center_perturb
).repeat_interleave(self.cfg.n_view, dim=0)
center = center + center_perturb
# sample up perturbations from a normal distribution with mean 0 and std up_perturb
up_perturb: Float[Tensor, "B 3"] = (
torch.randn(real_batch_size, 3) * self.cfg.up_perturb
).repeat_interleave(self.cfg.n_view, dim=0)
up = up + up_perturb
# sample light distance from a uniform distribution bounded by light_distance_range
light_distances: Float[Tensor, "B"] = (
torch.rand(real_batch_size)
* (self.cfg.light_distance_range[1] - self.cfg.light_distance_range[0])
+ self.cfg.light_distance_range[0]
).repeat_interleave(self.cfg.n_view, dim=0)
if self.cfg.light_sample_strategy == "dreamfusion":
# sample light direction from a normal distribution with mean camera_position and std light_position_perturb
light_direction: Float[Tensor, "B 3"] = F.normalize(
camera_positions
+ torch.randn(real_batch_size, 3).repeat_interleave(
self.cfg.n_view, dim=0
)
* self.cfg.light_position_perturb,
dim=-1,
)
# get light position by scaling light direction by light distance
light_positions: Float[Tensor, "B 3"] = (
light_direction * light_distances[:, None]
)
elif self.cfg.light_sample_strategy == "magic3d":
# sample light direction within restricted angle range (pi/3)
local_z = F.normalize(camera_positions, dim=-1)
local_x = F.normalize(
torch.stack(
[local_z[:, 1], -local_z[:, 0], torch.zeros_like(local_z[:, 0])],
dim=-1,
),
dim=-1,
)
local_y = F.normalize(torch.cross(local_z, local_x, dim=-1), dim=-1)
rot = torch.stack([local_x, local_y, local_z], dim=-1)
light_azimuth = (
torch.rand(real_batch_size) * math.pi - 2 * math.pi
).repeat_interleave(
self.cfg.n_view, dim=0
) # [-pi, pi]
light_elevation = (
torch.rand(real_batch_size) * math.pi / 3 + math.pi / 6
).repeat_interleave(
self.cfg.n_view, dim=0
) # [pi/6, pi/2]
light_positions_local = torch.stack(
[
light_distances
* torch.cos(light_elevation)
* torch.cos(light_azimuth),
light_distances
* torch.cos(light_elevation)
* torch.sin(light_azimuth),
light_distances * torch.sin(light_elevation),
],
dim=-1,
)
light_positions = (rot @ light_positions_local[:, :, None])[:, :, 0]
else:
raise ValueError(
f"Unknown light sample strategy: {self.cfg.light_sample_strategy}"
)
lookat: Float[Tensor, "B 3"] = F.normalize(center - camera_positions, dim=-1)
right: Float[Tensor, "B 3"] = F.normalize(torch.cross(lookat, up), dim=-1)
up = F.normalize(torch.cross(right, lookat), dim=-1)
c2w3x4: Float[Tensor, "B 3 4"] = torch.cat(
[torch.stack([right, up, -lookat], dim=-1), camera_positions[:, :, None]],
dim=-1,
)
c2w: Float[Tensor, "B 4 4"] = torch.cat(
[c2w3x4, torch.zeros_like(c2w3x4[:, :1])], dim=1
)
c2w[:, 3, 3] = 1.0
# get directions by dividing directions_unit_focal by focal length
focal_length: Float[Tensor, "B"] = 0.5 * self.height / torch.tan(0.5 * fovy)
directions: Float[Tensor, "B H W 3"] = self.directions_unit_focal[
None, :, :, :
].repeat(self.batch_size, 1, 1, 1)
directions[:, :, :, :2] = (
directions[:, :, :, :2] / focal_length[:, None, None, None]
)
# Importance note: the returned rays_d MUST be normalized!
rays_o, rays_d = get_rays(directions, c2w, keepdim=True)
proj_mtx: Float[Tensor, "B 4 4"] = get_projection_matrix(
fovy, self.width / self.height, 0.1, 1000.0
) # FIXME: hard-coded near and far
mvp_mtx: Float[Tensor, "B 4 4"] = get_mvp_matrix(c2w, proj_mtx)
return {
"rays_o": rays_o,
"rays_d": rays_d,
"mvp_mtx": mvp_mtx,
"camera_positions": camera_positions,
"c2w": c2w,
"light_positions": light_positions,
"elevation": elevation_deg,
"azimuth": azimuth_deg,
"camera_distances": camera_distances,
"height": self.height,
"width": self.width,
"fovy": fovy_deg,
}
@register("random-multiview-camera-datamodule")
class RandomMultiviewCameraDataModule(pl.LightningDataModule):
cfg: RandomMultiviewCameraDataModuleConfig
def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> None:
super().__init__()
self.cfg = parse_structured(RandomMultiviewCameraDataModuleConfig, cfg)
def setup(self, stage=None) -> None:
if stage in [None, "fit"]:
self.train_dataset = RandomMultiviewCameraIterableDataset(self.cfg)
if stage in [None, "fit", "validate"]:
self.val_dataset = RandomCameraDataset(self.cfg, "val")
if stage in [None, "test", "predict"]:
self.test_dataset = RandomCameraDataset(self.cfg, "test")
def prepare_data(self):
pass
def general_loader(self, dataset, batch_size, collate_fn=None) -> DataLoader:
return DataLoader(
dataset,
# very important to disable multi-processing if you want to change self attributes at runtime!
# (for example setting self.width and self.height in update_step)
num_workers=0, # type: ignore
batch_size=batch_size,
collate_fn=collate_fn,
)
def train_dataloader(self) -> DataLoader:
return self.general_loader(
self.train_dataset, batch_size=None, collate_fn=self.train_dataset.collate
)
def val_dataloader(self) -> DataLoader:
return self.general_loader(
self.val_dataset, batch_size=1, collate_fn=self.val_dataset.collate
)
# return self.general_loader(self.train_dataset, batch_size=None, collate_fn=self.train_dataset.collate)
def test_dataloader(self) -> DataLoader:
return self.general_loader(
self.test_dataset, batch_size=1, collate_fn=self.test_dataset.collate
)
def predict_dataloader(self) -> DataLoader:
return self.general_loader(
self.test_dataset, batch_size=1, collate_fn=self.test_dataset.collate
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/data/multiview.py | threestudio/data/multiview.py | import json
import math
import os
import random
from dataclasses import dataclass
import cv2
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
from torch.utils.data import DataLoader, Dataset, IterableDataset
from tqdm import tqdm
import threestudio
from threestudio import register
from threestudio.utils.config import parse_structured
from threestudio.utils.ops import get_mvp_matrix, get_ray_directions, get_rays
from threestudio.utils.typing import *
def convert_pose(C2W):
flip_yz = torch.eye(4)
flip_yz[1, 1] = -1
flip_yz[2, 2] = -1
C2W = torch.matmul(C2W, flip_yz)
return C2W
def convert_proj(K, H, W, near, far):
return [
[2 * K[0, 0] / W, -2 * K[0, 1] / W, (W - 2 * K[0, 2]) / W, 0],
[0, -2 * K[1, 1] / H, (H - 2 * K[1, 2]) / H, 0],
[0, 0, (-far - near) / (far - near), -2 * far * near / (far - near)],
[0, 0, -1, 0],
]
def inter_pose(pose_0, pose_1, ratio):
pose_0 = pose_0.detach().cpu().numpy()
pose_1 = pose_1.detach().cpu().numpy()
pose_0 = np.linalg.inv(pose_0)
pose_1 = np.linalg.inv(pose_1)
rot_0 = pose_0[:3, :3]
rot_1 = pose_1[:3, :3]
rots = Rot.from_matrix(np.stack([rot_0, rot_1]))
key_times = [0, 1]
slerp = Slerp(key_times, rots)
rot = slerp(ratio)
pose = np.diag([1.0, 1.0, 1.0, 1.0])
pose = pose.astype(np.float32)
pose[:3, :3] = rot.as_matrix()
pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]
pose = np.linalg.inv(pose)
return pose
@dataclass
class MultiviewsDataModuleConfig:
dataroot: str = ""
train_downsample_resolution: int = 4
eval_downsample_resolution: int = 4
train_data_interval: int = 1
eval_data_interval: int = 1
batch_size: int = 1
eval_batch_size: int = 1
camera_layout: str = "around"
camera_distance: float = -1
eval_interpolation: Optional[Tuple[int, int, int]] = None # (0, 1, 30)
class MultiviewIterableDataset(IterableDataset):
def __init__(self, cfg: Any) -> None:
super().__init__()
self.cfg: MultiviewsDataModuleConfig = cfg
assert self.cfg.batch_size == 1
scale = self.cfg.train_downsample_resolution
camera_dict = json.load(
open(os.path.join(self.cfg.dataroot, "transforms.json"), "r")
)
assert camera_dict["camera_model"] == "OPENCV"
frames = camera_dict["frames"]
frames = frames[:: self.cfg.train_data_interval]
frames_proj = []
frames_c2w = []
frames_position = []
frames_direction = []
frames_img = []
self.frame_w = frames[0]["w"] // scale
self.frame_h = frames[0]["h"] // scale
threestudio.info("Loading frames...")
self.n_frames = len(frames)
c2w_list = []
for frame in tqdm(frames):
extrinsic: Float[Tensor, "4 4"] = torch.as_tensor(
frame["transform_matrix"], dtype=torch.float32
)
c2w = extrinsic
c2w_list.append(c2w)
c2w_list = torch.stack(c2w_list, dim=0)
if self.cfg.camera_layout == "around":
c2w_list[:, :3, 3] -= torch.mean(c2w_list[:, :3, 3], dim=0).unsqueeze(0)
elif self.cfg.camera_layout == "front":
assert self.cfg.camera_distance > 0
c2w_list[:, :3, 3] -= torch.mean(c2w_list[:, :3, 3], dim=0).unsqueeze(0)
z_vector = torch.zeros(c2w_list.shape[0], 3, 1)
z_vector[:, 2, :] = -1
rot_z_vector = c2w_list[:, :3, :3] @ z_vector
rot_z_vector = torch.mean(rot_z_vector, dim=0).unsqueeze(0)
c2w_list[:, :3, 3] -= rot_z_vector[:, :, 0] * self.cfg.camera_distance
else:
raise ValueError(
f"Unknown camera layout {self.cfg.camera_layout}. Now support only around and front."
)
for idx, frame in tqdm(enumerate(frames)):
intrinsic: Float[Tensor, "4 4"] = torch.eye(4)
intrinsic[0, 0] = frame["fl_x"] / scale
intrinsic[1, 1] = frame["fl_y"] / scale
intrinsic[0, 2] = frame["cx"] / scale
intrinsic[1, 2] = frame["cy"] / scale
frame_path = os.path.join(self.cfg.dataroot, frame["file_path"])
img = cv2.imread(frame_path)[:, :, ::-1].copy()
img = cv2.resize(img, (self.frame_w, self.frame_h))
img: Float[Tensor, "H W 3"] = torch.FloatTensor(img) / 255
frames_img.append(img)
direction: Float[Tensor, "H W 3"] = get_ray_directions(
self.frame_h,
self.frame_w,
(intrinsic[0, 0], intrinsic[1, 1]),
(intrinsic[0, 2], intrinsic[1, 2]),
use_pixel_centers=False,
)
c2w = c2w_list[idx]
camera_position: Float[Tensor, "3"] = c2w[:3, 3:].reshape(-1)
near = 0.1
far = 1000.0
proj = convert_proj(intrinsic, self.frame_h, self.frame_w, near, far)
proj: Float[Tensor, "4 4"] = torch.FloatTensor(proj)
frames_proj.append(proj)
frames_c2w.append(c2w)
frames_position.append(camera_position)
frames_direction.append(direction)
threestudio.info("Loaded frames.")
self.frames_proj: Float[Tensor, "B 4 4"] = torch.stack(frames_proj, dim=0)
self.frames_c2w: Float[Tensor, "B 4 4"] = torch.stack(frames_c2w, dim=0)
self.frames_position: Float[Tensor, "B 3"] = torch.stack(frames_position, dim=0)
self.frames_direction: Float[Tensor, "B H W 3"] = torch.stack(
frames_direction, dim=0
)
self.frames_img: Float[Tensor, "B H W 3"] = torch.stack(frames_img, dim=0)
self.rays_o, self.rays_d = get_rays(
self.frames_direction, self.frames_c2w, keepdim=True
)
self.mvp_mtx: Float[Tensor, "B 4 4"] = get_mvp_matrix(
self.frames_c2w, self.frames_proj
)
self.light_positions: Float[Tensor, "B 3"] = torch.zeros_like(
self.frames_position
)
def __iter__(self):
while True:
yield {}
def collate(self, batch):
index = torch.randint(0, self.n_frames, (1,)).item()
return {
"index": index,
"rays_o": self.rays_o[index : index + 1],
"rays_d": self.rays_d[index : index + 1],
"mvp_mtx": self.mvp_mtx[index : index + 1],
"c2w": self.frames_c2w[index : index + 1],
"camera_positions": self.frames_position[index : index + 1],
"light_positions": self.light_positions[index : index + 1],
"gt_rgb": self.frames_img[index : index + 1],
"height": self.frame_h,
"width": self.frame_w,
}
class MultiviewDataset(Dataset):
def __init__(self, cfg: Any, split: str) -> None:
super().__init__()
self.cfg: MultiviewsDataModuleConfig = cfg
assert self.cfg.eval_batch_size == 1
scale = self.cfg.eval_downsample_resolution
camera_dict = json.load(
open(os.path.join(self.cfg.dataroot, "transforms.json"), "r")
)
assert camera_dict["camera_model"] == "OPENCV"
frames = camera_dict["frames"]
frames = frames[:: self.cfg.eval_data_interval]
frames_proj = []
frames_c2w = []
frames_position = []
frames_direction = []
frames_img = []
self.frame_w = frames[0]["w"] // scale
self.frame_h = frames[0]["h"] // scale
threestudio.info("Loading frames...")
self.n_frames = len(frames)
c2w_list = []
for frame in tqdm(frames):
extrinsic: Float[Tensor, "4 4"] = torch.as_tensor(
frame["transform_matrix"], dtype=torch.float32
)
c2w = extrinsic
c2w_list.append(c2w)
c2w_list = torch.stack(c2w_list, dim=0)
if self.cfg.camera_layout == "around":
c2w_list[:, :3, 3] -= torch.mean(c2w_list[:, :3, 3], dim=0).unsqueeze(0)
elif self.cfg.camera_layout == "front":
assert self.cfg.camera_distance > 0
c2w_list[:, :3, 3] -= torch.mean(c2w_list[:, :3, 3], dim=0).unsqueeze(0)
z_vector = torch.zeros(c2w_list.shape[0], 3, 1)
z_vector[:, 2, :] = -1
rot_z_vector = c2w_list[:, :3, :3] @ z_vector
rot_z_vector = torch.mean(rot_z_vector, dim=0).unsqueeze(0)
c2w_list[:, :3, 3] -= rot_z_vector[:, :, 0] * self.cfg.camera_distance
else:
raise ValueError(
f"Unknown camera layout {self.cfg.camera_layout}. Now support only around and front."
)
if not (self.cfg.eval_interpolation is None):
idx0 = self.cfg.eval_interpolation[0]
idx1 = self.cfg.eval_interpolation[1]
eval_nums = self.cfg.eval_interpolation[2]
frame = frames[idx0]
intrinsic: Float[Tensor, "4 4"] = torch.eye(4)
intrinsic[0, 0] = frame["fl_x"] / scale
intrinsic[1, 1] = frame["fl_y"] / scale
intrinsic[0, 2] = frame["cx"] / scale
intrinsic[1, 2] = frame["cy"] / scale
for ratio in np.linspace(0, 1, eval_nums):
img: Float[Tensor, "H W 3"] = torch.zeros(
(self.frame_h, self.frame_w, 3)
)
frames_img.append(img)
direction: Float[Tensor, "H W 3"] = get_ray_directions(
self.frame_h,
self.frame_w,
(intrinsic[0, 0], intrinsic[1, 1]),
(intrinsic[0, 2], intrinsic[1, 2]),
use_pixel_centers=False,
)
c2w = torch.FloatTensor(
inter_pose(c2w_list[idx0], c2w_list[idx1], ratio)
)
camera_position: Float[Tensor, "3"] = c2w[:3, 3:].reshape(-1)
near = 0.1
far = 1000.0
proj = convert_proj(intrinsic, self.frame_h, self.frame_w, near, far)
proj: Float[Tensor, "4 4"] = torch.FloatTensor(proj)
frames_proj.append(proj)
frames_c2w.append(c2w)
frames_position.append(camera_position)
frames_direction.append(direction)
else:
for idx, frame in tqdm(enumerate(frames)):
intrinsic: Float[Tensor, "4 4"] = torch.eye(4)
intrinsic[0, 0] = frame["fl_x"] / scale
intrinsic[1, 1] = frame["fl_y"] / scale
intrinsic[0, 2] = frame["cx"] / scale
intrinsic[1, 2] = frame["cy"] / scale
frame_path = os.path.join(self.cfg.dataroot, frame["file_path"])
img = cv2.imread(frame_path)[:, :, ::-1].copy()
img = cv2.resize(img, (self.frame_w, self.frame_h))
img: Float[Tensor, "H W 3"] = torch.FloatTensor(img) / 255
frames_img.append(img)
direction: Float[Tensor, "H W 3"] = get_ray_directions(
self.frame_h,
self.frame_w,
(intrinsic[0, 0], intrinsic[1, 1]),
(intrinsic[0, 2], intrinsic[1, 2]),
use_pixel_centers=False,
)
c2w = c2w_list[idx]
camera_position: Float[Tensor, "3"] = c2w[:3, 3:].reshape(-1)
near = 0.1
far = 1000.0
K = intrinsic
proj = [
[
2 * K[0, 0] / self.frame_w,
-2 * K[0, 1] / self.frame_w,
(self.frame_w - 2 * K[0, 2]) / self.frame_w,
0,
],
[
0,
-2 * K[1, 1] / self.frame_h,
(self.frame_h - 2 * K[1, 2]) / self.frame_h,
0,
],
[
0,
0,
(-far - near) / (far - near),
-2 * far * near / (far - near),
],
[0, 0, -1, 0],
]
proj: Float[Tensor, "4 4"] = torch.FloatTensor(proj)
frames_proj.append(proj)
frames_c2w.append(c2w)
frames_position.append(camera_position)
frames_direction.append(direction)
threestudio.info("Loaded frames.")
self.frames_proj: Float[Tensor, "B 4 4"] = torch.stack(frames_proj, dim=0)
self.frames_c2w: Float[Tensor, "B 4 4"] = torch.stack(frames_c2w, dim=0)
self.frames_position: Float[Tensor, "B 3"] = torch.stack(frames_position, dim=0)
self.frames_direction: Float[Tensor, "B H W 3"] = torch.stack(
frames_direction, dim=0
)
self.frames_img: Float[Tensor, "B H W 3"] = torch.stack(frames_img, dim=0)
self.rays_o, self.rays_d = get_rays(
self.frames_direction, self.frames_c2w, keepdim=True
)
self.mvp_mtx: Float[Tensor, "B 4 4"] = get_mvp_matrix(
self.frames_c2w, self.frames_proj
)
self.light_positions: Float[Tensor, "B 3"] = torch.zeros_like(
self.frames_position
)
def __len__(self):
return self.frames_proj.shape[0]
def __getitem__(self, index):
return {
"index": index,
"rays_o": self.rays_o[index],
"rays_d": self.rays_d[index],
"mvp_mtx": self.mvp_mtx[index],
"c2w": self.frames_c2w[index],
"camera_positions": self.frames_position[index],
"light_positions": self.light_positions[index],
"gt_rgb": self.frames_img[index],
}
def __iter__(self):
while True:
yield {}
def collate(self, batch):
batch = torch.utils.data.default_collate(batch)
batch.update({"height": self.frame_h, "width": self.frame_w})
return batch
@register("multiview-camera-datamodule")
class MultiviewDataModule(pl.LightningDataModule):
cfg: MultiviewsDataModuleConfig
def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> None:
super().__init__()
self.cfg = parse_structured(MultiviewsDataModuleConfig, cfg)
def setup(self, stage=None) -> None:
if stage in [None, "fit"]:
self.train_dataset = MultiviewIterableDataset(self.cfg)
if stage in [None, "fit", "validate"]:
self.val_dataset = MultiviewDataset(self.cfg, "val")
if stage in [None, "test", "predict"]:
self.test_dataset = MultiviewDataset(self.cfg, "test")
def prepare_data(self):
pass
def general_loader(self, dataset, batch_size, collate_fn=None) -> DataLoader:
return DataLoader(
dataset,
num_workers=1, # type: ignore
batch_size=batch_size,
collate_fn=collate_fn,
)
def train_dataloader(self) -> DataLoader:
return self.general_loader(
self.train_dataset, batch_size=None, collate_fn=self.train_dataset.collate
)
def val_dataloader(self) -> DataLoader:
return self.general_loader(
self.val_dataset, batch_size=1, collate_fn=self.val_dataset.collate
)
# return self.general_loader(self.train_dataset, batch_size=None, collate_fn=self.train_dataset.collate)
def test_dataloader(self) -> DataLoader:
return self.general_loader(
self.test_dataset, batch_size=1, collate_fn=self.test_dataset.collate
)
def predict_dataloader(self) -> DataLoader:
return self.general_loader(
self.test_dataset, batch_size=1, collate_fn=self.test_dataset.collate
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/data/__init__.py | threestudio/data/__init__.py | from . import co3d, image, multiview, uncond, uncond_multiview
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/data/co3d.py | threestudio/data/co3d.py | import gzip
import json
import os
import warnings
from dataclasses import dataclass, field
from typing import List
import cv2
import numpy as np
import pytorch_lightning as pl
import torch
import torchvision.transforms.functional as TF
from PIL import Image
from torch.utils.data import DataLoader, Dataset, IterableDataset
from threestudio import register
from threestudio.data.uncond import (
RandomCameraDataModuleConfig,
RandomCameraDataset,
RandomCameraIterableDataset,
)
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import get_rank
from threestudio.utils.ops import (
get_mvp_matrix,
get_projection_matrix,
get_ray_directions,
get_rays,
)
from threestudio.utils.typing import *
def _load_16big_png_depth(depth_png) -> np.ndarray:
with Image.open(depth_png) as depth_pil:
# the image is stored with 16-bit depth but PIL reads it as I (32 bit).
# we cast it to uint16, then reinterpret as float16, then cast to float32
depth = (
np.frombuffer(np.array(depth_pil, dtype=np.uint16), dtype=np.float16)
.astype(np.float32)
.reshape((depth_pil.size[1], depth_pil.size[0]))
)
return depth
def _load_depth(path, scale_adjustment) -> np.ndarray:
if not path.lower().endswith(".png"):
raise ValueError('unsupported depth file name "%s"' % path)
d = _load_16big_png_depth(path) * scale_adjustment
d[~np.isfinite(d)] = 0.0
return d[None] # fake feature channel
# Code adapted from https://github.com/eldar/snes/blob/473ff2b1f6/3rdparty/co3d/dataset/co3d_dataset.py
def _get_1d_bounds(arr):
nz = np.flatnonzero(arr)
return nz[0], nz[-1]
def get_bbox_from_mask(mask, thr, decrease_quant=0.05):
# bbox in xywh
masks_for_box = np.zeros_like(mask)
while masks_for_box.sum() <= 1.0:
masks_for_box = (mask > thr).astype(np.float32)
thr -= decrease_quant
if thr <= 0.0:
warnings.warn(f"Empty masks_for_bbox (thr={thr}) => using full image.")
x0, x1 = _get_1d_bounds(masks_for_box.sum(axis=-2))
y0, y1 = _get_1d_bounds(masks_for_box.sum(axis=-1))
return x0, y0, x1 - x0, y1 - y0
def get_clamp_bbox(bbox, box_crop_context=0.0, impath=""):
# box_crop_context: rate of expansion for bbox
# returns possibly expanded bbox xyxy as float
# increase box size
if box_crop_context > 0.0:
c = box_crop_context
bbox = bbox.astype(np.float32)
bbox[0] -= bbox[2] * c / 2
bbox[1] -= bbox[3] * c / 2
bbox[2] += bbox[2] * c
bbox[3] += bbox[3] * c
if (bbox[2:] <= 1.0).any():
warnings.warn(f"squashed image {impath}!!")
return None
# bbox[2:] = np.clip(bbox[2:], 2, )
bbox[2:] = np.maximum(bbox[2:], 2)
bbox[2:] += bbox[0:2] + 1 # convert to [xmin, ymin, xmax, ymax]
# +1 because upper bound is not inclusive
return bbox
def crop_around_box(tensor, bbox, impath=""):
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0.0, tensor.shape[-2])
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0.0, tensor.shape[-3])
bbox = bbox.round().astype(np.longlong)
return tensor[bbox[1] : bbox[3], bbox[0] : bbox[2], ...]
def resize_image(image, height, width, mode="bilinear"):
if image.shape[:2] == (height, width):
return image, 1.0, np.ones_like(image[..., :1])
image = torch.from_numpy(image).permute(2, 0, 1)
minscale = min(height / image.shape[-2], width / image.shape[-1])
imre = torch.nn.functional.interpolate(
image[None],
scale_factor=minscale,
mode=mode,
align_corners=False if mode == "bilinear" else None,
recompute_scale_factor=True,
)[0]
# pyre-fixme[19]: Expected 1 positional argument.
imre_ = torch.zeros(image.shape[0], height, width)
imre_[:, 0 : imre.shape[1], 0 : imre.shape[2]] = imre
# pyre-fixme[6]: For 2nd param expected `int` but got `Optional[int]`.
# pyre-fixme[6]: For 3rd param expected `int` but got `Optional[int]`.
mask = torch.zeros(1, height, width)
mask[:, 0 : imre.shape[1], 0 : imre.shape[2]] = 1.0
return imre_.permute(1, 2, 0).numpy(), minscale, mask.permute(1, 2, 0).numpy()
# Code adapted from https://github.com/POSTECH-CVLab/PeRFception/data_util/co3d.py
def similarity_from_cameras(c2w, fix_rot=False, radius=1.0):
"""
Get a similarity transform to normalize dataset
from c2w (OpenCV convention) cameras
:param c2w: (N, 4)
:return T (4,4) , scale (float)
"""
t = c2w[:, :3, 3]
R = c2w[:, :3, :3]
# (1) Rotate the world so that z+ is the up axis
# we estimate the up axis by averaging the camera up axes
ups = np.sum(R * np.array([0, -1.0, 0]), axis=-1)
world_up = np.mean(ups, axis=0)
world_up /= np.linalg.norm(world_up)
up_camspace = np.array([0.0, 0.0, 1.0])
c = (up_camspace * world_up).sum()
cross = np.cross(world_up, up_camspace)
skew = np.array(
[
[0.0, -cross[2], cross[1]],
[cross[2], 0.0, -cross[0]],
[-cross[1], cross[0], 0.0],
]
)
if c > -1:
R_align = np.eye(3) + skew + (skew @ skew) * 1 / (1 + c)
else:
# In the unlikely case the original data has y+ up axis,
# rotate 180-deg about x axis
R_align = np.array([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
if fix_rot:
R_align = np.eye(3)
R = np.eye(3)
else:
R = R_align @ R
fwds = np.sum(R * np.array([0, 0.0, 1.0]), axis=-1)
t = (R_align @ t[..., None])[..., 0]
# (2) Recenter the scene using camera center rays
# find the closest point to the origin for each camera's center ray
nearest = t + (fwds * -t).sum(-1)[:, None] * fwds
# median for more robustness
translate = -np.median(nearest, axis=0)
# translate = -np.mean(t, axis=0) # DEBUG
transform = np.eye(4)
transform[:3, 3] = translate
transform[:3, :3] = R_align
# (3) Rescale the scene using camera distances
scale = radius / np.median(np.linalg.norm(t + translate, axis=-1))
return transform, scale
@dataclass
class Co3dDataModuleConfig:
root_dir: str = ""
batch_size: int = 1
height: int = 256
width: int = 256
load_preprocessed: bool = False
cam_scale_factor: float = 0.95
max_num_frames: int = 300
v2_mode: bool = True
use_mask: bool = True
box_crop: bool = True
box_crop_mask_thr: float = 0.4
box_crop_context: float = 0.3
train_num_rays: int = -1
train_views: Optional[list] = None
train_split: str = "train"
val_split: str = "val"
test_split: str = "test"
scale_radius: float = 1.0
use_random_camera: bool = True
random_camera: dict = field(default_factory=dict)
rays_noise_scale: float = 0.0
render_path: str = "circle"
class Co3dDatasetBase:
def setup(self, cfg, split):
self.split = split
self.rank = get_rank()
self.cfg: Co3dDataModuleConfig = cfg
if self.cfg.use_random_camera:
random_camera_cfg = parse_structured(
RandomCameraDataModuleConfig, self.cfg.get("random_camera", {})
)
if split == "train":
self.random_pose_generator = RandomCameraIterableDataset(
random_camera_cfg
)
else:
self.random_pose_generator = RandomCameraDataset(
random_camera_cfg, split
)
self.use_mask = self.cfg.use_mask
cam_scale_factor = self.cfg.cam_scale_factor
assert os.path.exists(self.cfg.root_dir), f"{self.cfg.root_dir} doesn't exist!"
cam_trans = np.diag(np.array([-1, -1, 1, 1], dtype=np.float32))
scene_number = self.cfg.root_dir.split("/")[-1]
json_path = os.path.join(self.cfg.root_dir, "..", "frame_annotations.jgz")
with gzip.open(json_path, "r") as fp:
all_frames_data = json.load(fp)
frame_data, images, intrinsics, extrinsics, image_sizes = [], [], [], [], []
masks = []
depths = []
for temporal_data in all_frames_data:
if temporal_data["sequence_name"] == scene_number:
frame_data.append(temporal_data)
self.all_directions = []
self.all_fg_masks = []
for frame in frame_data:
if "unseen" in frame["meta"]["frame_type"]:
continue
img = cv2.imread(
os.path.join(self.cfg.root_dir, "..", "..", frame["image"]["path"])
)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
# TODO: use estimated depth
depth = _load_depth(
os.path.join(self.cfg.root_dir, "..", "..", frame["depth"]["path"]),
frame["depth"]["scale_adjustment"],
)[0]
H, W = frame["image"]["size"]
image_size = np.array([H, W])
fxy = np.array(frame["viewpoint"]["focal_length"])
cxy = np.array(frame["viewpoint"]["principal_point"])
R = np.array(frame["viewpoint"]["R"])
T = np.array(frame["viewpoint"]["T"])
if self.cfg.v2_mode:
min_HW = min(W, H)
image_size_half = np.array([W * 0.5, H * 0.5], dtype=np.float32)
scale_arr = np.array([min_HW * 0.5, min_HW * 0.5], dtype=np.float32)
fxy_x = fxy * scale_arr
prp_x = np.array([W * 0.5, H * 0.5], dtype=np.float32) - cxy * scale_arr
cxy = (image_size_half - prp_x) / image_size_half
fxy = fxy_x / image_size_half
scale_arr = np.array([W * 0.5, H * 0.5], dtype=np.float32)
focal = fxy * scale_arr
prp = -1.0 * (cxy - 1.0) * scale_arr
pose = np.eye(4)
pose[:3, :3] = R
pose[:3, 3:] = -R @ T[..., None]
# original camera: x left, y up, z in (Pytorch3D)
# transformed camera: x right, y down, z in (OpenCV)
pose = pose @ cam_trans
intrinsic = np.array(
[
[focal[0], 0.0, prp[0], 0.0],
[0.0, focal[1], prp[1], 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
if any([np.all(pose == _pose) for _pose in extrinsics]):
continue
image_sizes.append(image_size)
intrinsics.append(intrinsic)
extrinsics.append(pose)
images.append(img)
depths.append(depth)
self.all_directions.append(get_ray_directions(W, H, focal, prp))
# vis_utils.vis_depth_pcd([depth], [pose], intrinsic, [(img * 255).astype(np.uint8)])
if self.use_mask:
mask = np.array(
Image.open(
os.path.join(
self.cfg.root_dir, "..", "..", frame["mask"]["path"]
)
)
)
mask = mask.astype(np.float32) / 255.0 # (h, w)
else:
mask = torch.ones_like(img[..., 0])
self.all_fg_masks.append(mask)
intrinsics = np.stack(intrinsics)
extrinsics = np.stack(extrinsics)
image_sizes = np.stack(image_sizes)
self.all_directions = torch.stack(self.all_directions, dim=0)
self.all_fg_masks = np.stack(self.all_fg_masks, 0)
H_median, W_median = np.median(
np.stack([image_size for image_size in image_sizes]), axis=0
)
H_inlier = np.abs(image_sizes[:, 0] - H_median) / H_median < 0.1
W_inlier = np.abs(image_sizes[:, 1] - W_median) / W_median < 0.1
inlier = np.logical_and(H_inlier, W_inlier)
dists = np.linalg.norm(
extrinsics[:, :3, 3] - np.median(extrinsics[:, :3, 3], axis=0), axis=-1
)
med = np.median(dists)
good_mask = dists < (med * 5.0)
inlier = np.logical_and(inlier, good_mask)
if inlier.sum() != 0:
intrinsics = intrinsics[inlier]
extrinsics = extrinsics[inlier]
image_sizes = image_sizes[inlier]
images = [images[i] for i in range(len(inlier)) if inlier[i]]
depths = [depths[i] for i in range(len(inlier)) if inlier[i]]
self.all_directions = self.all_directions[inlier]
self.all_fg_masks = self.all_fg_masks[inlier]
extrinsics = np.stack(extrinsics)
T, sscale = similarity_from_cameras(extrinsics, radius=self.cfg.scale_radius)
extrinsics = T @ extrinsics
extrinsics[:, :3, 3] *= sscale * cam_scale_factor
depths = [depth * sscale * cam_scale_factor for depth in depths]
num_frames = len(extrinsics)
if self.cfg.max_num_frames < num_frames:
num_frames = self.cfg.max_num_frames
extrinsics = extrinsics[:num_frames]
intrinsics = intrinsics[:num_frames]
image_sizes = image_sizes[:num_frames]
images = images[:num_frames]
depths = depths[:num_frames]
self.all_directions = self.all_directions[:num_frames]
self.all_fg_masks = self.all_fg_masks[:num_frames]
if self.cfg.box_crop:
print("cropping...")
crop_masks = []
crop_imgs = []
crop_depths = []
crop_directions = []
crop_xywhs = []
max_sl = 0
for i in range(num_frames):
bbox_xywh = np.array(
get_bbox_from_mask(self.all_fg_masks[i], self.cfg.box_crop_mask_thr)
)
clamp_bbox_xywh = get_clamp_bbox(bbox_xywh, self.cfg.box_crop_context)
max_sl = max(clamp_bbox_xywh[2] - clamp_bbox_xywh[0], max_sl)
max_sl = max(clamp_bbox_xywh[3] - clamp_bbox_xywh[1], max_sl)
mask = crop_around_box(self.all_fg_masks[i][..., None], clamp_bbox_xywh)
img = crop_around_box(images[i], clamp_bbox_xywh)
depth = crop_around_box(depths[i][..., None], clamp_bbox_xywh)
# resize to the same shape
mask, _, _ = resize_image(mask, self.cfg.height, self.cfg.width)
depth, _, _ = resize_image(depth, self.cfg.height, self.cfg.width)
img, scale, _ = resize_image(img, self.cfg.height, self.cfg.width)
fx, fy, cx, cy = (
intrinsics[i][0, 0],
intrinsics[i][1, 1],
intrinsics[i][0, 2],
intrinsics[i][1, 2],
)
crop_masks.append(mask)
crop_imgs.append(img)
crop_depths.append(depth)
crop_xywhs.append(clamp_bbox_xywh)
crop_directions.append(
get_ray_directions(
self.cfg.height,
self.cfg.width,
(fx * scale, fy * scale),
(
(cx - clamp_bbox_xywh[0]) * scale,
(cy - clamp_bbox_xywh[1]) * scale,
),
)
)
# # pad all images to the same shape
# for i in range(num_frames):
# uh = (max_sl - crop_imgs[i].shape[0]) // 2 # h
# dh = max_sl - crop_imgs[i].shape[0] - uh
# lw = (max_sl - crop_imgs[i].shape[1]) // 2
# rw = max_sl - crop_imgs[i].shape[1] - lw
# crop_masks[i] = np.pad(crop_masks[i], pad_width=((uh, dh), (lw, rw), (0, 0)), mode='constant', constant_values=0.)
# crop_imgs[i] = np.pad(crop_imgs[i], pad_width=((uh, dh), (lw, rw), (0, 0)), mode='constant', constant_values=1.)
# crop_depths[i] = np.pad(crop_depths[i], pad_width=((uh, dh), (lw, rw), (0, 0)), mode='constant', constant_values=0.)
# fx, fy, cx, cy = intrinsics[i][0, 0], intrinsics[i][1, 1], intrinsics[i][0, 2], intrinsics[i][1, 2]
# crop_directions.append(get_ray_directions(max_sl, max_sl, (fx, fy), (cx - crop_xywhs[i][0] + lw, cy - crop_xywhs[i][1] + uh)))
# self.w, self.h = max_sl, max_sl
images = crop_imgs
depths = crop_depths
self.all_fg_masks = np.stack(crop_masks, 0)
self.all_directions = torch.from_numpy(np.stack(crop_directions, 0))
# self.width, self.height = self.w, self.h
self.all_c2w = torch.from_numpy(
(
extrinsics
@ np.diag(np.array([1, -1, -1, 1], dtype=np.float32))[None, ...]
)[..., :3, :4]
)
self.all_images = torch.from_numpy(np.stack(images, axis=0))
self.all_depths = torch.from_numpy(np.stack(depths, axis=0))
# self.all_c2w = []
# self.all_images = []
# for i in range(num_frames):
# # convert to: x right, y up, z back (OpenGL)
# c2w = torch.from_numpy(extrinsics[i] @ np.diag(np.array([1, -1, -1, 1], dtype=np.float32)))[:3, :4]
# self.all_c2w.append(c2w)
# img = torch.from_numpy(images[i])
# self.all_images.append(img)
# TODO: save data for fast loading next time
if self.cfg.load_preprocessed and os.path.exists(
self.cfg.root_dir, "nerf_preprocessed.npy"
):
pass
i_all = np.arange(num_frames)
if self.cfg.train_views is None:
i_test = i_all[::10]
i_val = i_test
i_train = np.array([i for i in i_all if not i in i_test])
else:
# use provided views
i_train = self.cfg.train_views
i_test = np.array([i for i in i_all if not i in i_train])
i_val = i_test
if self.split == "train":
print("[INFO] num of train views: ", len(i_train))
print("[INFO] train view ids = ", i_train)
i_split = {"train": i_train, "val": i_val, "test": i_all}
# if self.split == 'test':
# self.all_c2w = create_spheric_poses(self.all_c2w[:,:,3], n_steps=self.cfg.n_test_traj_steps)
# self.all_images = torch.zeros((self.cfg.n_test_traj_steps, self.h, self.w, 3), dtype=torch.float32)
# self.all_fg_masks = torch.zeros((self.cfg.n_test_traj_steps, self.h, self.w), dtype=torch.float32)
# self.directions = self.directions[0].to(self.rank)
# else:
self.all_images, self.all_c2w = (
self.all_images[i_split[self.split]],
self.all_c2w[i_split[self.split]],
)
self.all_directions = self.all_directions[i_split[self.split]].to(self.rank)
self.all_fg_masks = torch.from_numpy(self.all_fg_masks)[i_split[self.split]]
self.all_depths = self.all_depths[i_split[self.split]]
# if render_random_pose:
# render_poses = random_pose(extrinsics[i_all], 50)
# elif render_scene_interp:
# render_poses = pose_interp(extrinsics[i_all], interp_fac)
# render_poses = spherical_poses(sscale * cam_scale_factor * np.eye(4))
# near, far = 0., 1.
# ndc_coeffs = (-1., -1.)
self.all_c2w, self.all_images, self.all_fg_masks = (
self.all_c2w.float().to(self.rank),
self.all_images.float().to(self.rank),
self.all_fg_masks.float().to(self.rank),
)
# self.all_c2w, self.all_images, self.all_fg_masks = \
# self.all_c2w.float(), \
# self.all_images.float(), \
# self.all_fg_masks.float()
self.all_depths = self.all_depths.float().to(self.rank)
def get_all_images(self):
return self.all_images
class Co3dDataset(Dataset, Co3dDatasetBase):
def __init__(self, cfg, split):
self.setup(cfg, split)
def __len__(self):
if self.split == "test":
if self.cfg.render_path == "circle":
return len(self.random_pose_generator)
else:
return len(self.all_images)
else:
return len(self.random_pose_generator)
# return len(self.all_images)
def prepare_data(self, index):
# prepare batch data here
c2w = self.all_c2w[index]
light_positions = c2w[..., :3, -1]
directions = self.all_directions[index]
rays_o, rays_d = get_rays(
directions, c2w, keepdim=True, noise_scale=self.cfg.rays_noise_scale
)
rgb = self.all_images[index]
depth = self.all_depths[index]
mask = self.all_fg_masks[index]
# TODO: get projection matrix and mvp matrix
# proj_mtx = get_projection_matrix()
batch = {
"rays_o": rays_o,
"rays_d": rays_d,
"mvp_mtx": 0,
"camera_positions": c2w[..., :3, -1],
"light_positions": light_positions,
"elevation": 0,
"azimuth": 0,
"camera_distances": 0,
"rgb": rgb,
"depth": depth,
"mask": mask,
}
# c2w = self.all_c2w[index]
# return {
# 'index': index,
# 'c2w': c2w,
# 'light_positions': c2w[:3, -1],
# 'H': self.h,
# 'W': self.w
# }
return batch
def __getitem__(self, index):
if self.split == "test":
if self.cfg.render_path == "circle":
return self.random_pose_generator[index]
else:
return self.prepare_data(index)
else:
return self.random_pose_generator[index]
class Co3dIterableDataset(IterableDataset, Co3dDatasetBase):
def __init__(self, cfg, split):
self.setup(cfg, split)
self.idx = 0
self.image_perm = torch.randperm(len(self.all_images))
def __iter__(self):
while True:
yield {}
def collate(self, batch) -> Dict[str, Any]:
idx = self.image_perm[self.idx]
# prepare batch data here
c2w = self.all_c2w[idx][None]
light_positions = c2w[..., :3, -1]
directions = self.all_directions[idx][None]
rays_o, rays_d = get_rays(
directions, c2w, keepdim=True, noise_scale=self.cfg.rays_noise_scale
)
rgb = self.all_images[idx][None]
depth = self.all_depths[idx][None]
mask = self.all_fg_masks[idx][None]
if (
self.cfg.train_num_rays != -1
and self.cfg.train_num_rays < self.cfg.height * self.cfg.width
):
_, height, width, _ = rays_o.shape
x = torch.randint(
0, width, size=(self.cfg.train_num_rays,), device=rays_o.device
)
y = torch.randint(
0, height, size=(self.cfg.train_num_rays,), device=rays_o.device
)
rays_o = rays_o[:, y, x].unsqueeze(-2)
rays_d = rays_d[:, y, x].unsqueeze(-2)
directions = directions[:, y, x].unsqueeze(-2)
rgb = rgb[:, y, x].unsqueeze(-2)
mask = mask[:, y, x].unsqueeze(-2)
depth = depth[:, y, x].unsqueeze(-2)
# TODO: get projection matrix and mvp matrix
# proj_mtx = get_projection_matrix()
batch = {
"rays_o": rays_o,
"rays_d": rays_d,
"mvp_mtx": None,
"camera_positions": c2w[..., :3, -1],
"light_positions": light_positions,
"elevation": None,
"azimuth": None,
"camera_distances": None,
"rgb": rgb,
"depth": depth,
"mask": mask,
}
if self.cfg.use_random_camera:
batch["random_camera"] = self.random_pose_generator.collate(None)
# prepare batch data in system
# c2w = self.all_c2w[idx][None]
# batch = {
# 'index': torch.tensor([idx]),
# 'c2w': c2w,
# 'light_positions': c2w[..., :3, -1],
# 'H': self.h,
# 'W': self.w
# }
self.idx += 1
if self.idx == len(self.all_images):
self.idx = 0
self.image_perm = torch.randperm(len(self.all_images))
# self.idx = (self.idx + 1) % len(self.all_images)
return batch
@register("co3d-datamodule")
class Co3dDataModule(pl.LightningDataModule):
def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> None:
super().__init__()
self.cfg = parse_structured(Co3dDataModuleConfig, cfg)
def setup(self, stage=None):
if stage in [None, "fit"]:
self.train_dataset = Co3dIterableDataset(self.cfg, self.cfg.train_split)
if stage in [None, "fit", "validate"]:
self.val_dataset = Co3dDataset(self.cfg, self.cfg.val_split)
if stage in [None, "test", "predict"]:
self.test_dataset = Co3dDataset(self.cfg, self.cfg.test_split)
def prepare_data(self):
pass
def general_loader(self, dataset, batch_size, collate_fn=None) -> DataLoader:
sampler = None
return DataLoader(
dataset,
num_workers=0,
batch_size=batch_size,
# pin_memory=True,
collate_fn=collate_fn,
)
def train_dataloader(self):
return self.general_loader(
self.train_dataset, batch_size=1, collate_fn=self.train_dataset.collate
)
def val_dataloader(self):
return self.general_loader(self.val_dataset, batch_size=1)
def test_dataloader(self):
return self.general_loader(self.test_dataset, batch_size=1)
def predict_dataloader(self):
return self.general_loader(self.test_dataset, batch_size=1)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/load/make_prompt_library.py | load/make_prompt_library.py | import json
dreamfusion_gallery_video_names = [
"a_20-sided_die_made_out_of_glass.mp4",
"a_bald_eagle_carved_out_of_wood.mp4",
"a_banana_peeling_itself.mp4",
"a_beagle_in_a_detective's_outfit.mp4",
"a_beautiful_dress_made_out_of_fruit,_on_a_mannequin._Studio_lighting,_high_quality,_high_resolution.mp4",
"a_beautiful_dress_made_out_of_garbage_bags,_on_a_mannequin._Studio_lighting,_high_quality,_high_resolution.mp4",
"a_beautiful_rainbow_fish.mp4",
"a_bichon_frise_wearing_academic_regalia.mp4",
"a_blue_motorcycle.mp4",
"a_blue_poison-dart_frog_sitting_on_a_water_lily.mp4",
"a_brightly_colored_mushroom_growing_on_a_log.mp4",
"a_bumblebee_sitting_on_a_pink_flower.mp4",
"a_bunch_of_colorful_marbles_spilling_out_of_a_red_velvet_bag.mp4",
"a_capybara_wearing_a_top_hat,_low_poly.mp4",
"a_cat_with_a_mullet.mp4",
"a_ceramic_lion.mp4",
"a_ceramic_upside_down_yellow_octopus_holding_a_blue_green_ceramic_cup.mp4",
"a_chihuahua_wearing_a_tutu.mp4",
"a_chimpanzee_holding_a_peeled_banana.mp4",
"a_chimpanzee_looking_through_a_telescope.mp4",
"a_chimpanzee_stirring_a_bubbling_purple_potion_in_a_cauldron.mp4",
"a_chimpanzee_with_a_big_grin.mp4",
"a_completely_destroyed_car.mp4",
"a_confused_beagle_sitting_at_a_desk_working_on_homework.mp4",
"a_corgi_taking_a_selfie.mp4",
"a_crab,_low_poly.mp4",
"a_crocodile_playing_a_drum_set.mp4",
"a_cute_steampunk_elephant.mp4",
"a_dachsund_dressed_up_in_a_hotdog_costume.mp4",
"a_delicious_hamburger.mp4",
"a_dragon-cat_hybrid.mp4",
"a_DSLR_photo_of_a_baby_dragon_drinking_boba.mp4",
"a_DSLR_photo_of_a_baby_dragon_hatching_out_of_a_stone_egg.mp4",
"a_DSLR_photo_of_a_baby_grand_piano_viewed_from_far_away.mp4",
"a_DSLR_photo_of_a_bagel_filled_with_cream_cheese_and_lox.mp4",
"a_DSLR_photo_of_a_bald_eagle.mp4",
"a_DSLR_photo_of_a_barbecue_grill_cooking_sausages_and_burger_patties.mp4",
"a_DSLR_photo_of_a_basil_plant.mp4",
"a_DSLR_photo_of_a_bear_dancing_ballet.mp4",
"a_DSLR_photo_of_a_bear_dressed_as_a_lumberjack.mp4",
"a_DSLR_photo_of_a_bear_dressed_in_medieval_armor.mp4",
"a_DSLR_photo_of_a_beautiful_violin_sitting_flat_on_a_table.mp4",
"a_DSLR_photo_of_a_blue_jay_standing_on_a_large_basket_of_rainbow_macarons.mp4",
"a_DSLR_photo_of_a_bulldozer_clearing_away_a_pile_of_snow.mp4",
"a_DSLR_photo_of_a_bulldozer.mp4",
"a_DSLR_photo_of_a_cake_covered_in_colorful_frosting_with_a_slice_being_taken_out,_high_resolution.mp4",
"a_DSLR_photo_of_a_candelabra_with_many_candles_on_a_red_velvet_tablecloth.mp4",
"a_DSLR_photo_of_a_car_made_out_of_cheese.mp4",
"a_DSLR_photo_of_A_car_made_out_of_sushi.mp4",
"a_DSLR_photo_of_a_car_made_out_pizza.mp4",
"a_DSLR_photo_of_a_cat_lying_on_its_side_batting_at_a_ball_of_yarn.mp4",
"a_DSLR_photo_of_a_cat_magician_making_a_white_dove_appear.mp4",
"a_DSLR_photo_of_a_cat_wearing_a_bee_costume.mp4",
"a_DSLR_photo_of_a_cat_wearing_a_lion_costume.mp4",
"a_DSLR_photo_of_a_cauldron_full_of_gold_coins.mp4",
"a_DSLR_photo_of_a_chimpanzee_dressed_like_Henry_VIII_king_of_England.mp4",
"a_DSLR_photo_of_a_chimpanzee_dressed_like_Napoleon_Bonaparte.mp4",
"a_DSLR_photo_of_a_chow_chow_puppy.mp4",
"a_DSLR_photo_of_a_Christmas_tree_with_donuts_as_decorations.mp4",
"a_DSLR_photo_of_a_chrome-plated_duck_with_a_golden_beak_arguing_with_an_angry_turtle_in_a_forest.mp4",
"a_DSLR_photo_of_a_classic_Packard_car.mp4",
"a_DSLR_photo_of_a_cocker_spaniel_wearing_a_crown.mp4",
"a_DSLR_photo_of_a_corgi_lying_on_its_back_with_its_tongue_lolling_out.mp4",
"a_DSLR_photo_of_a_corgi_puppy.mp4",
"a_DSLR_photo_of_a_corgi_sneezing.mp4",
"a_DSLR_photo_of_a_corgi_standing_up_drinking_boba.mp4",
"a_DSLR_photo_of_a_corgi_taking_a_selfie.mp4",
"a_DSLR_photo_of_a_corgi_wearing_a_beret_and_holding_a_baguette,_standing_up_on_two_hind_legs.mp4",
"a_DSLR_photo_of_a_covered_wagon.mp4",
"a_DSLR_photo_of_a_cracked_egg_with_the_yolk_spilling_out_on_a_wooden_table.mp4",
"a_DSLR_photo_of_a_cup_full_of_pens_and_pencils.mp4",
"a_DSLR_photo_of_a_dalmation_wearing_a_fireman's_hat.mp4",
"a_DSLR_photo_of_a_delicious_chocolate_brownie_dessert_with_ice_cream_on_the_side.mp4",
"a_DSLR_photo_of_a_delicious_croissant.mp4",
"a_DSLR_photo_of_A_DMC_Delorean_car.mp4",
"a_DSLR_photo_of_a_dog_made_out_of_salad.mp4",
"a_DSLR_photo_of_a_drum_set_made_of_cheese.mp4",
"a_DSLR_photo_of_a_drying_rack_covered_in_clothes.mp4",
"a_DSLR_photo_of_aerial_view_of_a_ruined_castle.mp4",
"a_DSLR_photo_of_a_football_helmet.mp4",
"a_DSLR_photo_of_a_fox_holding_a_videogame_controller.mp4",
"a_DSLR_photo_of_a_fox_taking_a_photograph_using_a_DSLR.mp4",
"a_DSLR_photo_of_a_frazer_nash_super_sport_car.mp4",
"a_DSLR_photo_of_a_frog_wearing_a_sweater.mp4",
"a_DSLR_photo_of_a_ghost_eating_a_hamburger.mp4",
"a_DSLR_photo_of_a_giant_worm_emerging_from_the_sand_in_the_middle_of_the_desert.mp4",
"a_DSLR_photo_of_a_goose_made_out_of_gold.mp4",
"a_DSLR_photo_of_a_green_monster_truck.mp4",
"a_DSLR_photo_of_a_group_of_dogs_eating_pizza.mp4",
"a_DSLR_photo_of_a_group_of_dogs_playing_poker.mp4",
"a_DSLR_photo_of_a_gummy_bear_playing_the_saxophone.mp4",
"a_DSLR_photo_of_a_hippo_wearing_a_sweater.mp4",
"a_DSLR_photo_of_a_humanoid_robot_holding_a_human_brain.mp4",
"a_DSLR_photo_of_a_humanoid_robot_playing_solitaire.mp4",
"a_DSLR_photo_of_a_humanoid_robot_playing_the_cello.mp4",
"a_DSLR_photo_of_a_humanoid_robot_using_a_laptop.mp4",
"a_DSLR_photo_of_a_humanoid_robot_using_a_rolling_pin_to_roll_out_dough.mp4",
"a_DSLR_photo_of_a_human_skull.mp4",
"a_DSLR_photo_of_a_kitten_standing_on_top_of_a_giant_tortoise.mp4",
"a_DSLR_photo_of_a_knight_chopping_wood.mp4",
"a_DSLR_photo_of_a_knight_holding_a_lance_and_sitting_on_an_armored_horse.mp4",
"a_DSLR_photo_of_a_koala_wearing_a_party_hat_and_blowing_out_birthday_candles_on_a_cake.mp4",
"a_DSLR_photo_of_a_lemur_taking_notes_in_a_journal.mp4",
"a_DSLR_photo_of_a_lion_reading_the_newspaper.mp4",
"a_DSLR_photo_of_a_mandarin_duck_swimming_in_a_pond.mp4",
"a_DSLR_photo_of_a_model_of_the_eiffel_tower_made_out_of_toothpicks.mp4",
"a_DSLR_photo_of_a_mouse_playing_the_tuba.mp4",
"a_DSLR_photo_of_a_mug_of_hot_chocolate_with_whipped_cream_and_marshmallows.mp4",
"a_DSLR_photo_of_an_adorable_piglet_in_a_field.mp4",
"a_DSLR_photo_of_an_airplane_taking_off_from_the_runway.mp4",
"a_DSLR_photo_of_an_astronaut_standing_on_the_surface_of_mars.mp4",
"a_DSLR_photo_of_an_eggshell_broken_in_two_with_an_adorable_chick_standing_next_to_it.mp4",
"a_DSLR_photo_of_an_elephant_skull.mp4",
"a_DSLR_photo_of_an_exercise_bike_in_a_well_lit_room.mp4",
"a_DSLR_photo_of_an_extravagant_mansion,_aerial_view.mp4",
"a_DSLR_photo_of_an_ice_cream_sundae.mp4",
"a_DSLR_photo_of_an_iguana_holding_a_balloon.mp4",
"a_DSLR_photo_of_an_intricate_and_complex_dish_from_a_michelin_star_restaurant.mp4",
"a_DSLR_photo_of_An_iridescent_steampunk_patterned_millipede_with_bison_horns.mp4",
"a_DSLR_photo_of_an_octopus_playing_the_piano.mp4",
"a_DSLR_photo_of_an_old_car_overgrown_by_vines_and_weeds.mp4",
"a_DSLR_photo_of_an_old_vintage_car.mp4",
"a_DSLR_photo_of_an_orangutan_making_a_clay_bowl_on_a_throwing_wheel.mp4",
"a_DSLR_photo_of_an_orc_forging_a_hammer_on_an_anvil.mp4",
"a_DSLR_photo_of_an_origami_motorcycle.mp4",
"a_DSLR_photo_of_an_ornate_silver_gravy_boat_sitting_on_a_patterned_tablecloth.mp4",
"a_DSLR_photo_of_an_overstuffed_pastrami_sandwich.mp4",
"a_DSLR_photo_of_an_unstable_rock_cairn_in_the_middle_of_a_stream.mp4",
"a_DSLR_photo_of_a_pair_of_headphones_sitting_on_a_desk.mp4",
"a_DSLR_photo_of_a_pair_of_tan_cowboy_boots,_studio_lighting,_product_photography.mp4",
"a_DSLR_photo_of_a_peacock_on_a_surfboard.mp4",
"a_DSLR_photo_of_a_pigeon_reading_a_book.mp4",
"a_DSLR_photo_of_a_piglet_sitting_in_a_teacup.mp4",
"a_DSLR_photo_of_a_pig_playing_a_drum_set.mp4",
"a_DSLR_photo_of_a_pile_of_dice_on_a_green_tabletop_next_to_some_playing_cards.mp4",
"a_DSLR_photo_of_a_pirate_collie_dog,_high_resolution.mp4",
"a_DSLR_photo_of_a_plate_of_fried_chicken_and_waffles_with_maple_syrup_on_them.mp4",
"a_DSLR_photo_of_a_plate_piled_high_with_chocolate_chip_cookies.mp4",
"a_DSLR_photo_of_a_plush_t-rex_dinosaur_toy,_studio_lighting,_high_resolution.mp4",
"a_DSLR_photo_of_a_plush_triceratops_toy,_studio_lighting,_high_resolution.mp4",
"a_DSLR_photo_of_a_pomeranian_dog.mp4",
"a_DSLR_photo_of_a_porcelain_dragon.mp4",
"a_DSLR_photo_of_a_praying_mantis_wearing_roller_skates.mp4",
"a_DSLR_photo_of_a_puffin_standing_on_a_rock.mp4",
"a_DSLR_photo_of_a_pug_made_out_of_metal.mp4",
"a_DSLR_photo_of_a_pug_wearing_a_bee_costume.mp4",
"a_DSLR_photo_of_a_quill_and_ink_sitting_on_a_desk.mp4",
"a_DSLR_photo_of_a_raccoon_stealing_a_pie.mp4",
"a_DSLR_photo_of_a_red_cardinal_bird_singing.mp4",
"a_DSLR_photo_of_a_red_convertible_car_with_the_top_down.mp4",
"a_DSLR_photo_of_a_red-eyed_tree_frog.mp4",
"a_DSLR_photo_of_a_red_pickup_truck_driving_across_a_stream.mp4",
"a_DSLR_photo_of_a_red_wheelbarrow_with_a_shovel_in_it.mp4",
"a_DSLR_photo_of_a_roast_turkey_on_a_platter.mp4",
"a_DSLR_photo_of_a_robot_and_dinosaur_playing_chess,_high_resolution.mp4",
"a_DSLR_photo_of_a_robot_arm_picking_up_a_colorful_block_from_a_table.mp4",
"a_DSLR_photo_of_a_robot_cat_knocking_over_a_chess_piece_on_a_board.mp4",
"a_DSLR_photo_of_a_robot_dinosaur.mp4",
"a_DSLR_photo_of_a_robot_made_out_of_vegetables.mp4",
"a_DSLR_photo_of_a_robot_stegosaurus.mp4",
"a_DSLR_photo_of_a_robot_tiger.mp4",
"a_DSLR_photo_of_a_rolling_pin_on_top_of_bread_dough.mp4",
"a_DSLR_photo_of_a_sheepdog_running.mp4",
"a_DSLR_photo_of_a_shiba_inu_playing_golf_wearing_tartan_golf_clothes_and_hat.mp4",
"a_DSLR_photo_of_a_shiny_silver_robot_cat.mp4",
"a_DSLR_photo_of_a_silverback_gorilla_holding_a_golden_trophy.mp4",
"a_DSLR_photo_of_a_silver_humanoid_robot_flipping_a_coin.mp4",
"a_DSLR_photo_of_a_small_cherry_tomato_plant_in_a_pot_with_a_few_red_tomatoes_growing_on_it.mp4",
"a_DSLR_photo_of_a_small_saguaro_cactus_planted_in_a_clay_pot.mp4",
"a_DSLR_photo_of_a_Space_Shuttle.mp4",
"a_DSLR_photo_of_a_squirrel_dressed_like_a_clown.mp4",
"a_DSLR_photo_of_a_squirrel_flying_a_biplane.mp4",
"a_DSLR_photo_of_a_squirrel_giving_a_lecture_writing_on_a_chalkboard.mp4",
"a_DSLR_photo_of_a_squirrel_holding_a_bowling_ball.mp4",
"a_DSLR_photo_of_a_squirrel-lizard_hybrid.mp4",
"a_DSLR_photo_of_a_squirrel_made_out_of_fruit.mp4",
"a_DSLR_photo_of_a_squirrel-octopus_hybrid.mp4",
"a_DSLR_photo_of_a_stack_of_pancakes_covered_in_maple_syrup.mp4",
"a_DSLR_photo_of_a_steam_engine_train,_high_resolution.mp4",
"a_DSLR_photo_of_a_steaming_basket_full_of_dumplings.mp4",
"a_DSLR_photo_of_a_steaming_hot_plate_piled_high_with_spaghetti_and_meatballs.mp4",
"a_DSLR_photo_of_a_steampunk_space_ship_designed_in_the_18th_century.mp4",
"a_DSLR_photo_of_a_straw_basket_with_a_cobra_coming_out_of_it.mp4",
"a_DSLR_photo_of_a_swan_and_its_cygnets_swimming_in_a_pond.mp4",
"a_DSLR_photo_of_a_tarantula,_highly_detailed.mp4",
"a_DSLR_photo_of_a_teal_moped.mp4",
"a_DSLR_photo_of_a_teapot_shaped_like_an_elephant_head_where_its_snout_acts_as_the_spout.mp4",
"a_DSLR_photo_of_a_teddy_bear_taking_a_selfie.mp4",
"a_DSLR_photo_of_a_terracotta_bunny.mp4",
"a_DSLR_photo_of_a_tiger_dressed_as_a_doctor.mp4",
"a_DSLR_photo_of_a_tiger_made_out_of_yarn.mp4",
"a_DSLR_photo_of_a_toilet_made_out_of_gold.mp4",
"a_DSLR_photo_of_a_toy_robot.mp4",
"a_DSLR_photo_of_a_train_engine_made_out_of_clay.mp4",
"a_DSLR_photo_of_a_tray_of_Sushi_containing_pugs.mp4",
"a_DSLR_photo_of_a_tree_stump_with_an_axe_buried_in_it.mp4",
"a_DSLR_photo_of_a_turtle_standing_on_its_hind_legs,_wearing_a_top_hat_and_holding_a_cane.mp4",
"a_DSLR_photo_of_a_very_beautiful_small_organic_sculpture_made_of_fine_clockwork_and_gears_with_tiny_ruby_bearings,_very_intricate,_caved,_curved._Studio_lighting,_High_resolution,_white_background.mp4",
"a_DSLR_photo_of_A_very_beautiful_tiny_human_heart_organic_sculpture_made_of_copper_wire_and_threaded_pipes,_very_intricate,_curved,_Studio_lighting,_high_resolution.mp4",
"a_DSLR_photo_of_a_very_cool_and_trendy_pair_of_sneakers,_studio_lighting.mp4",
"a_DSLR_photo_of_a_vintage_record_player.mp4",
"a_DSLR_photo_of_a_wine_bottle_and_full_wine_glass_on_a_chessboard.mp4",
"a_DSLR_photo_of_a_wooden_desk_and_chair_from_an_elementary_school.mp4",
"a_DSLR_photo_of_a_yorkie_dog_eating_a_donut.mp4",
"a_DSLR_photo_of_a_yorkie_dog_wearing_extremely_cool_sneakers.mp4",
"a_DSLR_photo_of_baby_elephant_jumping_on_a_trampoline.mp4",
"a_DSLR_photo_of_cat_wearing_virtual_reality_headset_in_renaissance_oil_painting_high_detail_caravaggio.mp4",
"a_DSLR_photo_of_edible_typewriter_made_out_of_vegetables.mp4",
"a_DSLR_photo_of_Mont_Saint-Michel,_France,_aerial_view.mp4",
"a_DSLR_photo_of_Mount_Fuji,_aerial_view.mp4",
"a_DSLR_photo_of_Neuschwanstein_Castle,_aerial_view.mp4",
"A_DSLR_photo_of___pyramid_shaped_burrito_with_a_slice_cut_out_of_it.mp4",
"a_DSLR_photo_of_the_Imperial_State_Crown_of_England.mp4",
"a_DSLR_photo_of_the_leaning_tower_of_Pisa,_aerial_view.mp4",
"a_DSLR_photo_of_the_Statue_of_Liberty,_aerial_view.mp4",
"a_DSLR_photo_of_Two_locomotives_playing_tug_of_war.mp4",
"a_DSLR_photo_of_two_macaw_parrots_sharing_a_milkshake_with_two_straws.mp4",
"a_DSLR_photo_of_Westminster_Abbey,_aerial_view.mp4",
"a_ficus_planted_in_a_pot.mp4",
"a_flower_made_out_of_metal.mp4",
"a_fluffy_cat_lying_on_its_back_in_a_patch_of_sunlight.mp4",
"a_fox_and_a_hare_tangoing_together.mp4",
"a_fox_holding_a_videogame_controller.mp4",
"a_fox_playing_the_cello.mp4",
"a_frazer_nash_super_sport_car.mp4",
"a_freshly_baked_loaf_of_sourdough_bread_on_a_cutting_board.mp4",
"a_goat_drinking_beer.mp4",
"a_golden_goblet,_low_poly.mp4",
"a_green_dragon_breathing_fire.mp4",
"a_green_tractor_farming_corn_fields.mp4",
"a_highland_cow.mp4",
"a_hotdog_in_a_tutu_skirt.mp4",
"a_humanoid_robot_laying_on_the_couch_while_on_a_laptop.mp4",
"a_humanoid_robot_playing_the_violin.mp4",
"a_humanoid_robot_sitting_looking_at_a_Go_board_with_some_pieces_on_it.mp4",
"a_human_skeleton_drinking_a_glass_of_red_wine.mp4",
"a_human_skull_with_a_vine_growing_through_one_of_the_eye_sockets.mp4",
"a_kitten_looking_at_a_goldfish_in_a_bowl.mp4",
"a_lemur_drinking_boba.mp4",
"a_lemur_taking_notes_in_a_journal.mp4",
"a_lionfish.mp4",
"a_llama_wearing_a_suit.mp4",
"a_marble_bust_of_a_mouse.mp4",
"a_metal_sculpture_of_a_lion's_head,_highly_detailed.mp4",
"a_mojito_in_a_beach_chair.mp4",
"a_monkey-rabbit_hybrid.mp4",
"an_airplane_made_out_of_wood.mp4",
"an_amigurumi_bulldozer.mp4",
"An_anthropomorphic_tomato_eating_another_tomato.mp4",
"an_astronaut_playing_the_violin.mp4",
"an_astronaut_riding_a_kangaroo.mp4",
"an_English_castle,_aerial_view.mp4",
"an_erupting_volcano,_aerial_view.mp4",
"a_nest_with_a_few_white_eggs_and_one_golden_egg.mp4",
"an_exercise_bike.mp4",
"an_iridescent_metal_scorpion.mp4",
"An_octopus_and_a_giraffe_having_cheesecake.mp4",
"an_octopus_playing_the_harp.mp4",
"an_old_vintage_car.mp4",
"an_opulent_couch_from_the_palace_of_Versailles.mp4",
"an_orange_road_bike.mp4",
"an_orangutan_holding_a_paint_palette_in_one_hand_and_a_paintbrush_in_the_other.mp4",
"an_orangutan_playing_accordion_with_its_hands_spread_wide.mp4",
"an_orangutan_using_chopsticks_to_eat_ramen.mp4",
"an_orchid_flower_planted_in_a_clay_pot.mp4",
"a_palm_tree,_low_poly_3d_model.mp4",
"a_panda_rowing_a_boat_in_a_pond.mp4",
"a_panda_wearing_a_necktie_and_sitting_in_an_office_chair.mp4",
"A_Panther_De_Ville_car.mp4",
"a_pig_wearing_a_backpack.mp4",
"a_plate_of_delicious_tacos.mp4",
"a_plush_dragon_toy.mp4",
"a_plush_toy_of_a_corgi_nurse.mp4",
"a_rabbit,_animated_movie_character,_high_detail_3d_model.mp4",
"a_rabbit_cutting_grass_with_a_lawnmower.mp4",
"a_red_eyed_tree_frog,_low_poly.mp4",
"a_red_panda.mp4",
"a_ripe_strawberry.mp4",
"a_roulette_wheel.mp4",
"a_shiny_red_stand_mixer.mp4",
"a_silver_platter_piled_high_with_fruits.mp4",
"a_sliced_loaf_of_fresh_bread.mp4",
"a_snail_on_a_leaf.mp4",
"a_spanish_galleon_sailing_on_the_open_sea.mp4",
"a_squirrel_dressed_like_Henry_VIII_king_of_England.mp4",
"a_squirrel_gesturing_in_front_of_an_easel_showing_colorful_pie_charts.mp4",
"a_squirrel_wearing_a_tuxedo_and_holding_a_conductor's_baton.mp4",
"a_team_of_butterflies_playing_soccer_on_a_field.mp4",
"a_teddy_bear_pushing_a_shopping_cart_full_of_fruits_and_vegetables.mp4",
"a_tiger_dressed_as_a_military_general.mp4",
"a_tiger_karate_master.mp4",
"a_tiger_playing_the_violin.mp4",
"a_tiger_waiter_at_a_fancy_restaurant.mp4",
"a_tiger_wearing_a_tuxedo.mp4",
"a_t-rex_roaring_up_into_the_air.mp4",
"a_turtle_standing_on_its_hind_legs,_wearing_a_top_hat_and_holding_a_cane.mp4",
"a_typewriter.mp4",
"a_walrus_smoking_a_pipe.mp4",
"a_wedge_of_cheese_on_a_silver_platter.mp4",
"a_wide_angle_DSLR_photo_of_a_colorful_rooster.mp4",
"a_wide_angle_DSLR_photo_of_a_humanoid_banana_sitting_at_a_desk_doing_homework.mp4",
"a_wide_angle_DSLR_photo_of_a_mythical_troll_stirring_a_cauldron.mp4",
"a_wide_angle_DSLR_photo_of_a_squirrel_in_samurai_armor_wielding_a_katana.mp4",
"a_wide_angle_zoomed_out_DSLR_photo_of_A_red_dragon_dressed_in_a_tuxedo_and_playing_chess._The_chess_pieces_are_fashioned_after_robots.mp4",
"a_wide_angle_zoomed_out_DSLR_photo_of_a_skiing_penguin_wearing_a_puffy_jacket.mp4",
"a_wide_angle_zoomed_out_DSLR_photo_of_zoomed_out_view_of_Tower_Bridge_made_out_of_gingerbread_and_candy.mp4",
"a_woolly_mammoth_standing_on_ice.mp4",
"a_yellow_schoolbus.mp4",
"a_zoomed_out_DSLR_photo_of_a_3d_model_of_an_adorable_cottage_with_a_thatched_roof.mp4",
"a_zoomed_out_DSLR_photo_of_a_baby_bunny_sitting_on_top_of_a_stack_of_pancakes.mp4",
"a_zoomed_out_DSLR_photo_of_a_baby_dragon.mp4",
"a_zoomed_out_DSLR_photo_of_a_baby_monkey_riding_on_a_pig.mp4",
"a_zoomed_out_DSLR_photo_of_a_badger_wearing_a_party_hat_and_blowing_out_birthday_candles_on_a_cake.mp4",
"a_zoomed_out_DSLR_photo_of_a_beagle_eating_a_donut.mp4",
"a_zoomed_out_DSLR_photo_of_a_bear_playing_electric_bass.mp4",
"a_zoomed_out_DSLR_photo_of_a_beautifully_carved_wooden_knight_chess_piece.mp4",
"a_zoomed_out_DSLR_photo_of_a_beautiful_suit_made_out_of_moss,_on_a_mannequin._Studio_lighting,_high_quality,_high_resolution.mp4",
"a_zoomed_out_DSLR_photo_of_a_blue_lobster.mp4",
"a_zoomed_out_DSLR_photo_of_a_blue_tulip.mp4",
"a_zoomed_out_DSLR_photo_of_a_bowl_of_cereal_and_milk_with_a_spoon_in_it.mp4",
"a_zoomed_out_DSLR_photo_of_a_brain_in_a_jar.mp4",
"a_zoomed_out_DSLR_photo_of_a_bulldozer_made_out_of_toy_bricks.mp4",
"a_zoomed_out_DSLR_photo_of_a_cake_in_the_shape_of_a_train.mp4",
"a_zoomed_out_DSLR_photo_of_a_chihuahua_lying_in_a_pool_ring.mp4",
"a_zoomed_out_DSLR_photo_of_a_chimpanzee_dressed_as_a_football_player.mp4",
"a_zoomed_out_DSLR_photo_of_a_chimpanzee_holding_a_cup_of_hot_coffee.mp4",
"a_zoomed_out_DSLR_photo_of_a_chimpanzee_wearing_headphones.mp4",
"a_zoomed_out_DSLR_photo_of_a_colorful_camping_tent_in_a_patch_of_grass.mp4",
"a_zoomed_out_DSLR_photo_of_a_complex_movement_from_an_expensive_watch_with_many_shiny_gears,_sitting_on_a_table.mp4",
"a_zoomed_out_DSLR_photo_of_a_construction_excavator.mp4",
"a_zoomed_out_DSLR_photo_of_a_corgi_wearing_a_top_hat.mp4",
"a_zoomed_out_DSLR_photo_of_a_corn_cob_and_a_banana_playing_poker.mp4",
"a_zoomed_out_DSLR_photo_of_a_dachsund_riding_a_unicycle.mp4",
"a_zoomed_out_DSLR_photo_of_a_dachsund_wearing_a_boater_hat.mp4",
"a_zoomed_out_DSLR_photo_of_a_few_pool_balls_sitting_on_a_pool_table.mp4",
"a_zoomed_out_DSLR_photo_of_a_fox_working_on_a_jigsaw_puzzle.mp4",
"a_zoomed_out_DSLR_photo_of_a_fresh_cinnamon_roll_covered_in_glaze.mp4",
"a_zoomed_out_DSLR_photo_of_a_green_tractor.mp4",
"a_zoomed_out_DSLR_photo_of_a_greyhound_dog_racing_down_the_track.mp4",
"a_zoomed_out_DSLR_photo_of_a_group_of_squirrels_rowing_crew.mp4",
"a_zoomed_out_DSLR_photo_of_a_gummy_bear_driving_a_convertible.mp4",
"a_zoomed_out_DSLR_photo_of_a_hermit_crab_with_a_colorful_shell.mp4",
"a_zoomed_out_DSLR_photo_of_a_hippo_biting_through_a_watermelon.mp4",
"a_zoomed_out_DSLR_photo_of_a_hippo_made_out_of_chocolate.mp4",
"a_zoomed_out_DSLR_photo_of_a_humanoid_robot_lying_on_a_couch_using_a_laptop.mp4",
"a_zoomed_out_DSLR_photo_of_a_humanoid_robot_sitting_on_a_chair_drinking_a_cup_of_coffee.mp4",
"a_zoomed_out_DSLR_photo_of_a_human_skeleton_relaxing_in_a_lounge_chair.mp4",
"a_zoomed_out_DSLR_photo_of_a_kangaroo_sitting_on_a_bench_playing_the_accordion.mp4",
"a_zoomed_out_DSLR_photo_of_a_kingfisher_bird.mp4",
"a_zoomed_out_DSLR_photo_of_a_ladybug.mp4",
"a_zoomed_out_DSLR_photo_of_a_lion's_mane_jellyfish.mp4",
"a_zoomed_out_DSLR_photo_of_a_lobster_playing_the_saxophone.mp4",
"a_zoomed_out_DSLR_photo_of_a_majestic_sailboat.mp4",
"a_zoomed_out_DSLR_photo_of_a_marble_bust_of_a_cat,_a_real_mouse_is_sitting_on_its_head.mp4",
"a_zoomed_out_DSLR_photo_of_a_marble_bust_of_a_fox_head.mp4",
"a_zoomed_out_DSLR_photo_of_a_model_of_a_house_in_Tudor_style.mp4",
"a_zoomed_out_DSLR_photo_of_a_monkey-rabbit_hybrid.mp4",
"a_zoomed_out_DSLR_photo_of_a_monkey_riding_a_bike.mp4",
"a_zoomed_out_DSLR_photo_of_a_mountain_goat_standing_on_a_boulder.mp4",
"a_zoomed_out_DSLR_photo_of_a_mouse_holding_a_candlestick.mp4",
"a_zoomed_out_DSLR_photo_of_an_adorable_kitten_lying_next_to_a_flower.mp4",
"a_zoomed_out_DSLR_photo_of_an_all-utility_vehicle_driving_across_a_stream.mp4",
"a_zoomed_out_DSLR_photo_of_an_amigurumi_motorcycle.mp4",
"a_zoomed_out_DSLR_photo_of_an_astronaut_chopping_vegetables_in_a_sunlit_kitchen.mp4",
"a_zoomed_out_DSLR_photo_of_an_egg_cracked_open_with_a_newborn_chick_hatching_out_of_it.mp4",
"a_zoomed_out_DSLR_photo_of_an_expensive_office_chair.mp4",
"a_zoomed_out_DSLR_photo_of_an_origami_bulldozer_sitting_on_the_ground.mp4",
"a_zoomed_out_DSLR_photo_of_an_origami_crane.mp4",
"a_zoomed_out_DSLR_photo_of_an_origami_hippo_in_a_river.mp4",
"a_zoomed_out_DSLR_photo_of_an_otter_lying_on_its_back_in_the_water_holding_a_flower.mp4",
"a_zoomed_out_DSLR_photo_of_a_pair_of_floating_chopsticks_picking_up_noodles_out_of_a_bowl_of_ramen.mp4",
"a_zoomed_out_DSLR_photo_of_a_panda_throwing_wads_of_cash_into_the_air.mp4",
"a_zoomed_out_DSLR_photo_of_a_panda_wearing_a_chef's_hat_and_kneading_bread_dough_on_a_countertop.mp4",
"a_zoomed_out_DSLR_photo_of_a_pigeon_standing_on_a_manhole_cover.mp4",
"a_zoomed_out_DSLR_photo_of_a_pig_playing_the_saxophone.mp4",
"a_zoomed_out_DSLR_photo_of_a_pile_of_dice_on_a_green_tabletop.mp4",
"a_zoomed_out_DSLR_photo_of_a_pita_bread_full_of_hummus_and_falafel_and_vegetables.mp4",
"a_zoomed_out_DSLR_photo_of_a_pug_made_out_of_modeling_clay.mp4",
"a_zoomed_out_DSLR_photo_of_A_punk_rock_squirrel_in_a_studded_leather_jacket_shouting_into_a_microphone_while_standing_on_a_stump_and_holding_a_beer.mp4",
"a_zoomed_out_DSLR_photo_of_a_rabbit_cutting_grass_with_a_lawnmower.mp4",
"a_zoomed_out_DSLR_photo_of_a_rabbit_digging_a_hole_with_a_shovel.mp4",
"a_zoomed_out_DSLR_photo_of_a_raccoon_astronaut_holding_his_helmet.mp4",
"a_zoomed_out_DSLR_photo_of_a_rainforest_bird_mating_ritual_dance.mp4",
"a_zoomed_out_DSLR_photo_of_a_recliner_chair.mp4",
"a_zoomed_out_DSLR_photo_of_a_red_rotary_telephone.mp4",
"a_zoomed_out_DSLR_photo_of_a_robot_couple_fine_dining.mp4",
"a_zoomed_out_DSLR_photo_of_a_rotary_telephone_carved_out_of_wood.mp4",
"a_zoomed_out_DSLR_photo_of_a_shiny_beetle.mp4",
"a_zoomed_out_DSLR_photo_of_a_silver_candelabra_sitting_on_a_red_velvet_tablecloth,_only_one_candle_is_lit.mp4",
"a_zoomed_out_DSLR_photo_of_a_squirrel_DJing.mp4",
"a_zoomed_out_DSLR_photo_of_a_squirrel_dressed_up_like_a_Victorian_woman.mp4",
"a_zoomed_out_DSLR_photo_of_a_table_with_dim_sum_on_it.mp4",
"a_zoomed_out_DSLR_photo_of_a_tiger_dressed_as_a_maid.mp4",
"a_zoomed_out_DSLR_photo_of_a_tiger_dressed_as_a_military_general.mp4",
"a_zoomed_out_DSLR_photo_of_a_tiger_eating_an_ice_cream_cone.mp4",
"a_zoomed_out_DSLR_photo_of_a_tiger_wearing_sunglasses_and_a_leather_jacket,_riding_a_motorcycle.mp4",
"a_zoomed_out_DSLR_photo_of_a_toad_catching_a_fly_with_its_tongue.mp4",
"a_zoomed_out_DSLR_photo_of_a_wizard_raccoon_casting_a_spell.mp4",
"a_zoomed_out_DSLR_photo_of_a_yorkie_dog_dressed_as_a_maid.mp4",
"a_zoomed_out_DSLR_photo_of_cats_wearing_eyeglasses.mp4",
"a_zoomed_out_DSLR_photo_of_miniature_schnauzer_wooden_sculpture,_high_quality_studio_photo.mp4",
"A_zoomed_out_DSLR_photo_of___phoenix_made_of_splashing_water_.mp4",
"a_zoomed_out_DSLR_photo_of_Sydney_opera_house,_aerial_view.mp4",
"a_zoomed_out_DSLR_photo_of_two_foxes_tango_dancing.mp4",
"a_zoomed_out_DSLR_photo_of_two_raccoons_playing_poker.mp4",
"Chichen_Itza,_aerial_view.mp4",
"__Coffee_cup_with_many_holes.mp4",
"fries_and_a_hamburger.mp4",
"__Luminescent_wild_horses.mp4",
"Michelangelo_style_statue_of_an_astronaut.mp4",
"Michelangelo_style_statue_of_dog_reading_news_on_a_cellphone.mp4",
"the_titanic,_aerial_view.mp4",
"two_gummy_bears_playing_dominoes.mp4",
"two_macaw_parrots_playing_chess.mp4",
"Wedding_dress_made_of_tentacles.mp4",
]
def main():
prompt_library = {
"dreamfusion": [
p.replace(".mp4", "").replace("_", " ")
for p in dreamfusion_gallery_video_names
]
}
with open("load/prompt_library.json", "w") as f:
json.dump(prompt_library, f, indent=2)
if __name__ == "__main__":
main()
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/load/tets/generate_tets.py | load/tets/generate_tets.py | # Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
import os
import numpy as np
"""
This code segment shows how to use Quartet: https://github.com/crawforddoran/quartet,
to generate a tet grid
1) Download, compile and run Quartet as described in the link above. Example usage `quartet meshes/cube.obj 0.5 cube_5.tet`
2) Run the function below to generate a file `cube_32_tet.tet`
"""
def generate_tetrahedron_grid_file(res=32, root=".."):
frac = 1.0 / res
command = f"cd {root}; ./quartet meshes/cube.obj {frac} meshes/cube_{res}_tet.tet -s meshes/cube_boundary_{res}.obj"
os.system(command)
"""
This code segment shows how to convert from a quartet .tet file to compressed npz file
"""
def convert_from_quartet_to_npz(quartetfile="cube_32_tet.tet", npzfile="32_tets"):
file1 = open(quartetfile, "r")
header = file1.readline()
numvertices = int(header.split(" ")[1])
numtets = int(header.split(" ")[2])
print(numvertices, numtets)
# load vertices
vertices = np.loadtxt(quartetfile, skiprows=1, max_rows=numvertices)
print(vertices.shape)
# load indices
indices = np.loadtxt(
quartetfile, dtype=int, skiprows=1 + numvertices, max_rows=numtets
)
print(indices.shape)
np.savez_compressed(npzfile, vertices=vertices, indices=indices)
root = "/home/gyc/quartet"
for res in [300, 350, 400]:
generate_tetrahedron_grid_file(res, root)
convert_from_quartet_to_npz(
os.path.join(root, f"meshes/cube_{res}_tet.tet"), npzfile=f"{res}_tets"
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/setup.py | setup.py | from setuptools import setup, find_packages
if __name__ == "__main__":
setup(
name="cheap-proteins",
version="1.0.0",
author="Amy X. Lu",
license="MIT",
author_email="amyxlu@berkeley.edu",
packages=find_packages(where="src"),
package_dir={"": "src"},
)
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/run_benchmark.py | run_benchmark.py | import os
import hydra
import sys
import math
import pprint
import shutil
import logging
import argparse
import numpy as np
import time
import yaml
import easydict
from pathlib import Path
import uuid
import torch
from torch import distributed as dist
from omegaconf import OmegaConf, DictConfig
import torchdrug
from torchdrug import core, datasets, tasks, models, layers
from torchdrug.utils import comm
from plaid.benchmarking import ours, flip
def resolve_cfg(cfg: DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
cfg = easydict.EasyDict(cfg)
return cfg
def get_root_logger(file=True):
logger = logging.getLogger("")
logger.setLevel(logging.INFO)
format = logging.Formatter("%(asctime)-10s %(message)s", "%H:%M:%S")
if file:
handler = logging.FileHandler("log.txt")
handler.setFormatter(format)
logger.addHandler(handler)
return logger
def create_working_directory(cfg):
file_name = "working_dir.tmp"
hashid = uuid.uuid4().hex[:7]
world_size = comm.get_world_size()
if world_size > 1 and not dist.is_initialized():
comm.init_process_group("nccl", init_method="env://")
output_dir = os.path.join(
os.path.expanduser(cfg.output_dir),
cfg.task["class"],
cfg.dataset["class"],
cfg.task.model["class"] + "_" + time.strftime("%Y-%m-%d-%H-%M-%S") + "_" + hashid,
)
# synchronize working directory
if comm.get_rank() == 0:
with open(file_name, "w") as fout:
fout.write(output_dir)
os.makedirs(output_dir)
comm.synchronize()
if comm.get_rank() != 0:
with open(file_name, "r") as fin:
output_dir = fin.read()
comm.synchronize()
if comm.get_rank() == 0:
try:
os.remove(file_name)
except:
pass
os.chdir(output_dir)
return output_dir
def set_seed(seed):
torch.manual_seed(seed + comm.get_rank())
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
return
def build_solver(cfg, logger):
# build dataset
_dataset = core.Configurable.load_config_dict(cfg.dataset)
if "test_split" in cfg:
train_set, valid_set, test_set = _dataset.split(["train", "valid", cfg.test_split])
else:
train_set, valid_set, test_set = _dataset.split()
if comm.get_rank() == 0:
logger.warning(_dataset)
logger.warning("#train: %d, #valid: %d, #test: %d" % (len(train_set), len(valid_set), len(test_set)))
# build task model
if cfg.task["class"] in ["PropertyPrediction", "InteractionPrediction"]:
cfg.task.task = _dataset.tasks
task = core.Configurable.load_config_dict(cfg.task)
# fix the pre-trained encoder if specified
# fix_encoder = cfg.get("fix_encoder", False)
# fix_encoder2 = cfg.get("fix_encoder2", False)
# if fix_encoder:
# for p in task.model.parameters():
# p.requires_grad = False
# if fix_encoder2:
# for p in task.model2.parameters():
# p.requires_grad = False
# build solver
cfg.optimizer.params = task.parameters()
optimizer = core.Configurable.load_config_dict(cfg.optimizer)
if not "scheduler" in cfg:
scheduler = None
else:
cfg.scheduler.optimizer = optimizer
scheduler = core.Configurable.load_config_dict(cfg.scheduler)
solver = core.Engine(task, train_set, valid_set, test_set, optimizer, scheduler, **cfg.engine)
if "lr_ratio" in cfg:
cfg.optimizer.params = [
{"params": solver.model.model.parameters(), "lr": cfg.optimizer.lr * cfg.lr_ratio},
{"params": solver.model.mlp.parameters(), "lr": cfg.optimizer.lr},
]
optimizer = core.Configurable.load_config_dict(cfg.optimizer)
solver.optimizer = optimizer
if "checkpoint" in cfg:
solver.load(cfg.checkpoint, load_optimizer=False)
return solver
def train_and_validate(cfg, solver):
step = math.ceil(cfg.train.num_epoch / 10)
best_score = float("-inf")
best_epoch = -1
if not cfg.train.num_epoch > 0:
return solver, best_epoch
for i in range(0, cfg.train.num_epoch, step):
kwargs = cfg.train.copy()
kwargs["num_epoch"] = min(step, cfg.train.num_epoch - i)
solver.model.split = "train"
solver.train(**kwargs)
# solver.save("model_epoch_%d.pth" % solver.epoch)
if "test_batch_size" in cfg:
solver.batch_size = cfg.test_batch_size
solver.model.split = "valid"
metric = solver.evaluate("valid")
solver.batch_size = cfg.engine.batch_size
score = []
for k, v in metric.items():
if k.startswith(cfg.eval_metric):
if "root mean squared error" in cfg.eval_metric:
score.append(-v)
else:
score.append(v)
score = sum(score) / len(score)
if score > best_score:
best_score = score
best_epoch = solver.epoch
# solver.load("model_epoch_%d.pth" % best_epoch)
# return solver, best_epoch
def test(cfg, solver):
if "test_batch_size" in cfg:
solver.batch_size = cfg.test_batch_size
solver.model.split = "valid"
solver.evaluate("valid")
solver.model.split = "test"
solver.evaluate("test")
return
@hydra.main(version_base=None, config_path="configs/benchmark", config_name="beta")
def main(cfg: DictConfig) -> None:
cfg = resolve_cfg(cfg)
set_seed(0) # TODO: run with more seeds
output_dir = create_working_directory(cfg)
logger = get_root_logger()
os.chdir(output_dir)
solver = build_solver(cfg, logger)
train_and_validate(cfg, solver)
# if comm.get_rank() == 0:
# logger.warning("Best epoch on valid: %d" % best_epoch)
# test(cfg, solver)
if __name__ == "__main__":
main()
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/train_sequence_decoder.py | train_sequence_decoder.py | import typing as T
from pathlib import Path
import os
import hydra
import lightning as L
from lightning.pytorch.loggers import WandbLogger
from lightning.pytorch.utilities import rank_zero_only
from omegaconf import DictConfig, OmegaConf
import torch
import time
from plaid.utils import get_model_device
from plaid.transforms import get_random_sequence_crop_batch
"""
Helper Functions
"""
def make_embedder(lm_embedder_type):
start = time.time()
print(f"making {lm_embedder_type}...")
if "esmfold" in lm_embedder_type:
# from plaid.denoisers.esmfold import ESMFold
from plaid.esmfold import esmfold_v1
embedder = esmfold_v1()
alphabet = None
else:
print("loading LM from torch hub")
embedder, alphabet = torch.hub.load(
"facebookresearch/esm:main", lm_embedder_type
)
embedder = embedder.eval().to("cuda")
for param in embedder.parameters():
param.requires_grad = False
end = time.time()
print(f"done loading model in {end - start:.2f} seconds.")
return embedder, alphabet
def embed_batch_esmfold(esmfold, sequences, max_len=512, embed_result_key="s"):
with torch.no_grad():
# don't disgard short sequences since we're also saving headers
sequences = get_random_sequence_crop_batch(
sequences, max_len=max_len, min_len=0
)
seq_lens = [len(seq) for seq in sequences]
embed_results = esmfold.infer_embedding(sequences, return_intermediates=True)
feats = embed_results[embed_result_key].detach()
seq_lens = torch.tensor(seq_lens, device="cpu", dtype=torch.int16)
return feats, seq_lens, sequences
def embed_batch_esm(embedder, sequences, batch_converter, repr_layer, max_len=512):
sequences = get_random_sequence_crop_batch(sequences, max_len=max_len, min_len=0)
seq_lens = [len(seq) for seq in sequences]
seq_lens = torch.tensor(seq_lens, device="cpu", dtype=torch.int16)
batch = [("", seq) for seq in sequences]
_, _, tokens = batch_converter(batch)
device = get_model_device(embedder)
tokens = tokens.to(device)
with torch.no_grad():
results = embedder(tokens, repr_layers=[repr_layer], return_contacts=False)
feats = results["representations"][repr_layer]
return feats, seq_lens, sequences
"""
Training
"""
@hydra.main(
version_base=None, config_path="configs", config_name="train_sequence_decoder"
)
def train(cfg: DictConfig):
"""
Set up device and data module
"""
torch.set_float32_matmul_precision("medium")
log_cfg = OmegaConf.to_container(cfg, throw_on_missing=True, resolve=True)
if rank_zero_only.rank == 0:
print(OmegaConf.to_yaml(log_cfg))
datamodule = hydra.utils.instantiate(cfg.datamodule)
datamodule.setup(stage="fit")
max_seq_len = cfg.max_seq_len
# maybe set up the scaler
try:
latent_scaler = hydra.utils.instantiate(cfg.latent_scaler)
print("scaling")
except:
latent_scaler = None
print("not scaling")
"""
Set up the embedding model
"""
lm_embedder_type = cfg.lm_embedder_type
embedder, alphabet = make_embedder(lm_embedder_type)
# processing for grabbing intermediates from ESMFold
if "esmfold" in lm_embedder_type:
batch_converter = None
repr_layer = None
if lm_embedder_type == "esmfold":
embed_result_key = "s"
elif lm_embedder_type == "esmfold_pre_mlp":
embed_result_key = "s_post_softmax"
else:
raise ValueError(f"lm embedder type {lm_embedder_type} not understood.")
# processing for ESM LM-only models
else:
batch_converter = alphabet.get_batch_converter()
repr_layer = int(lm_embedder_type.split("_")[1][1:])
embed_result_key = None
"""
Make the embedding function
"""
embedder = embedder.eval().requires_grad_(False)
if "esmfold" in lm_embedder_type:
fn = lambda seqs: embed_batch_esmfold(
embedder, seqs, max_seq_len, embed_result_key
)[0]
else:
fn = lambda seqs: embed_batch_esm(
embedder, seqs, batch_converter, repr_layer, max_seq_len
)
"""
Run training
"""
model = hydra.utils.instantiate(
cfg.sequence_decoder,
training_embed_from_sequence_fn=fn,
latent_scaler=latent_scaler,
)
job_id = os.environ.get("SLURM_JOB_ID") # is None if not using SLURM
dirpath = Path(cfg.paths.checkpoint_dir) / "sequence_decoder" / job_id
if not cfg.dryrun:
logger = hydra.utils.instantiate(cfg.logger, id=job_id)
logger.watch(model, log="all", log_graph=False)
else:
logger = None
lr_monitor = hydra.utils.instantiate(cfg.callbacks.lr_monitor)
checkpoint_callback = hydra.utils.instantiate(
cfg.callbacks.checkpoint, dirpath=dirpath
)
trainer = hydra.utils.instantiate(
cfg.trainer, logger=logger, callbacks=[lr_monitor, checkpoint_callback]
)
if rank_zero_only.rank == 0 and isinstance(trainer.logger, WandbLogger):
trainer.logger.experiment.config.update({"cfg": log_cfg}, allow_val_change=True)
if not cfg.dryrun:
trainer.fit(model, datamodule=datamodule)
if __name__ == "__main__":
train()
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/benchmarking/ours.py | src/benchmarking/ours.py | import os
from pathlib import Path
from einops import reduce
import torch
from torch import nn
from torchdrug import core, layers, utils, data
from torchdrug.layers import functional
from torchdrug.core import Registry as R
import numpy as np
import string
# disassembled to maintain consistency with
# https://github.com/DeepGraphLearning/torchdrug/blob/master/torchdrug/data/protein.py#L372
# https://github.com/DeepGraphLearning/torchdrug/blob/master/torchdrug/data/protein.py#L50
residue2id = {
"GLY": 0,
"ALA": 1,
"SER": 2,
"PRO": 3,
"VAL": 4,
"THR": 5,
"CYS": 6,
"ILE": 7,
"LEU": 8,
"ASN": 9,
"ASP": 10,
"GLN": 11,
"LYS": 12,
"GLU": 13,
"MET": 14,
"HIS": 15,
"PHE": 16,
"ARG": 17,
"TYR": 18,
"TRP": 19,
}
residue_symbol2id = {
"G": 0,
"A": 1,
"S": 2,
"P": 3,
"V": 4,
"T": 5,
"C": 6,
"I": 7,
"L": 8,
"N": 9,
"D": 10,
"Q": 11,
"K": 12,
"E": 13,
"M": 14,
"H": 15,
"F": 16,
"R": 17,
"Y": 18,
"W": 19,
}
atom_name2id = {
"C": 0,
"CA": 1,
"CB": 2,
"CD": 3,
"CD1": 4,
"CD2": 5,
"CE": 6,
"CE1": 7,
"CE2": 8,
"CE3": 9,
"CG": 10,
"CG1": 11,
"CG2": 12,
"CH2": 13,
"CZ": 14,
"CZ2": 15,
"CZ3": 16,
"N": 17,
"ND1": 18,
"ND2": 19,
"NE": 20,
"NE1": 21,
"NE2": 22,
"NH1": 23,
"NH2": 24,
"NZ": 25,
"O": 26,
"OD1": 27,
"OD2": 28,
"OE1": 29,
"OE2": 30,
"OG": 31,
"OG1": 32,
"OH": 33,
"OXT": 34,
"SD": 35,
"SG": 36,
"UNK": 37,
}
alphabet2id = {
c: i for i, c in enumerate(" " + string.ascii_uppercase + string.ascii_lowercase + string.digits)
}
id2residue = {v: k for k, v in residue2id.items()}
id2residue_symbol = {v: k for k, v in residue_symbol2id.items()}
id2atom_name = {v: k for k, v in atom_name2id.items()}
id2alphabet = {v: k for k, v in alphabet2id.items()}
def to_sequence(residue_type, num_residues=None):
"""
Return a sequence of this protein.
Returns:
str
"""
residue_type = residue_type.tolist()
sequence = []
if num_residues is None:
num_residues = len(residue_type)
for i in range(num_residues):
sequence.append(id2residue_symbol[residue_type[i]])
return "".join(sequence)
def to_tensor(x, device=None, dtype=None):
if isinstance(x, torch.Tensor):
pass
elif isinstance(x, np.ndarray):
x = torch.from_numpy(x)
else:
x = torch.tensor(x)
if device is not None:
x = x.to(device)
if dtype is not None:
x = x.type(dtype)
return x
def pad_to_multiple(x, shorten_factor):
from plaid.transforms import trim_or_pad_batch_first
s = shorten_factor
extra = x.shape[1] % s
if extra != 0:
needed = s - extra
x = trim_or_pad_batch_first(x, pad_to=x.shape[1] + needed, pad_idx=0)
return x
@R.register("models.PLAID")
class PLAID(nn.Module, core.Configurable):
"""
The protein language model, ProtBert-BFD proposed in
`ProtTrans: Towards Cracking the Language of Life's Code Through Self-Supervised Deep Learning and High Performance Computing`_.
.. _ProtTrans: Towards Cracking the Language of Life's Code Through Self-Supervised Deep Learning and High Performance Computing:
https://arxiv.org/ftp/arxiv/papers/2007/2007.06225.pdf
Parameters:
path (str): path to store ProtBert model weights.
readout (str, optional): readout function. Available functions are ``pooler``, ``sum`` and ``mean``.
"""
def __init__(
self,
compression_model_id="identity",
hourglass_weights_dir="/data/lux70/cheap/checkpoints",
pool="mean",
):
super().__init__()
assert pool in ["mean", "attention"]
from plaid.compression.hourglass_vq import HourglassVQLightningModule
from plaid.utils import LatentScaler
from plaid.esmfold import esmfold_v1
ckpt_dir = Path(hourglass_weights_dir)
ckpt_path = ckpt_dir / compression_model_id / "last.ckpt"
if compression_model_id == "identity":
self.hourglass = None
self.shorten_factor = 1
self.output_dim = 1024
else:
self.hourglass = HourglassVQLightningModule.load_from_checkpoint(ckpt_path)
self.hourglass.eval().requires_grad_(False)
self.shorten_factor = self.hourglass.enc.shorten_factor
self.output_dim = 1024 // self.hourglass.enc.downproj_factor
self.scaler = LatentScaler()
self.esmfold = esmfold_v1().eval().requires_grad_(False)
self.pad_idx = 0
def forward(self, graph, input, all_loss=None, metric=None):
residues = graph.residue_type
size = graph.num_residues
residues, mask = functional.variadic_to_padded(residues, size, value=self.pad_idx)
mask = mask.to(self.device)
with torch.no_grad():
sequences = [to_sequence(residues[i, ...]) for i in range(len(residues))]
latent = self.esmfold.infer_embedding(sequences)["s"]
latent = self.scaler.scale(latent)
latent = latent.to(self.device)
if not self.hourglass is None:
with torch.no_grad():
residue_feature = self.hourglass(latent, mask, infer_only=True)
residue_feature = to_tensor(residue_feature).to(self.device)
else:
residue_feature = to_tensor(latent.detach()).to(self.device)
# mean pool with mask
mask = pad_to_multiple(mask, self.shorten_factor)
downsampled_mask = reduce(mask, "b (n s) -> b n", "sum", s=self.shorten_factor) > 0
downsampled_mask = downsampled_mask.unsqueeze(-1)
if downsampled_mask.shape[1] != residue_feature.shape[1]:
from plaid.transforms import trim_or_pad_batch_first
downsampled_mask = trim_or_pad_batch_first(downsampled_mask, residue_feature.shape[1], pad_idx=0)
graph_feature = (residue_feature * downsampled_mask.long()).sum(dim=1) / downsampled_mask.sum(dim=1)
if self.shorten_factor == 1:
# hack -- only uses this for the contact prediction tasks, which needs full sequence
adjusted_size = size // self.shorten_factor
residue_feature = functional.padded_to_variadic(residue_feature, adjusted_size)
starts = adjusted_size.cumsum(0) - adjusted_size
ends = starts + size
mask = functional.multi_slice_mask(starts, ends, len(residue_feature))
residue_feature = residue_feature[mask]
return {"graph_feature": graph_feature, "residue_feature": residue_feature}
if __name__ == "__main__":
compression_model_id = "kyytc8i9"
model = PLAID(compression_model_id)
from torchdrug import transforms
truncate_transform = transforms.TruncateProtein(max_length=200, random=False)
protein_view_transform = transforms.ProteinView(view="residue")
transform = transforms.Compose([truncate_transform, protein_view_transform])
from torchdrug import datasets
dataset = datasets.BetaLactamase(
"~/protein-datasets/",
atom_feature=None,
bond_feature=None,
residue_feature="default",
transform=transform,
)
train_set, valid_set, test_set = dataset.split()
print("The label of first sample: ", dataset[0][dataset.target_fields[0]])
print(
"train samples: %d, valid samples: %d, test samples: %d"
% (len(train_set), len(valid_set), len(test_set))
)
from torchdrug import tasks
task = tasks.PropertyPrediction(
model,
task=dataset.tasks,
criterion="mse",
metric=("mae", "rmse", "spearmanr"),
normalization=False,
num_mlp_layer=2,
)
import torch
from torchdrug import core
optimizer = torch.optim.Adam(task.parameters(), lr=1e-4)
solver = core.Engine(task, train_set, valid_set, test_set, optimizer, gpus=[0], batch_size=64)
solver.train(num_epoch=10)
solver.evaluate("valid")
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/benchmarking/flip.py | src/benchmarking/flip.py | import os
import csv
import math
from collections import defaultdict
from tqdm import tqdm
from torch.utils import data as torch_data
from torchdrug import data, utils
from torchdrug.core import Registry as R
class FLIPDataset(data.ProteinDataset):
def load_csv(self, csv_file, sequence_field="sequence", target_fields=None, verbose=0, **kwargs):
if target_fields is not None:
target_fields = set(target_fields)
with open(csv_file, "r") as fin:
reader = csv.reader(fin)
if verbose:
reader = iter(tqdm(reader, "Loading %s" % csv_file, utils.get_line_count(csv_file)))
fields = next(reader)
train, valid, test = [], [], []
_sequences = []
_targets = defaultdict(list)
for i, values in enumerate(reader):
for field, value in zip(fields, values):
if field == sequence_field:
_sequences.append(value)
elif target_fields is None or field in target_fields:
value = utils.literal_eval(value)
if value == "":
value = math.nan
_targets[field].append(value)
elif field == "set":
if value == "train":
train.append(i)
elif value == "test":
test.append(i)
elif field == "validation":
if value == "True":
valid.append(i)
valid_set = set(valid)
sequences = (
[_sequences[i] for i in train if i not in valid_set]
+ [_sequences[i] for i in valid]
+ [_sequences[i] for i in test]
)
targets = defaultdict(list)
for key, value in _targets.items():
targets[key] = (
[value[i] for i in train if i not in valid_set]
+ [value[i] for i in valid]
+ [value[i] for i in test]
)
self.load_sequence(sequences, targets, verbose=verbose, **kwargs)
self.num_samples = [len(train) - len(valid), len(valid), len(test)]
@R.register("datasets.AAV")
class AAV(FLIPDataset):
url = (
"https://github.com/J-SNACKKB/FLIP/raw/d5c35cc716ca93c3c74a0b43eef5b60cbf88521f/splits/aav/splits.zip"
)
md5 = "cabdd41f3386f4949b32ca220db55c58"
splits = ["train", "valid", "test"]
target_fields = ["target"]
region = slice(474, 674)
def __init__(self, path, split="two_vs_many", keep_mutation_region=False, verbose=1, **kwargs):
path = os.path.expanduser(path)
path = os.path.join(path, "aav")
if not os.path.exists(path):
os.makedirs(path)
self.path = path
assert split in [
"des_mut",
"low_vs_high",
"mut_des",
"one_vs_many",
"sampled",
"seven_vs_many",
"two_vs_many",
]
zip_file = utils.download(self.url, path, md5=self.md5)
data_path = utils.extract(zip_file)
csv_file = os.path.join(data_path, "splits/%s.csv" % split)
self.load_csv(csv_file, target_fields=self.target_fields, verbose=verbose, **kwargs)
if keep_mutation_region:
for i in range(len(self.data)):
self.data[i] = self.data[i][self.region]
self.sequences[i] = self.sequences[i][self.region]
def split(self):
offset = 0
splits = []
for num_sample in self.num_samples:
split = torch_data.Subset(self, range(offset, offset + num_sample))
splits.append(split)
offset += num_sample
return splits
@R.register("datasets.GB1")
class GB1(FLIPDataset):
url = (
"https://github.com/J-SNACKKB/FLIP/raw/d5c35cc716ca93c3c74a0b43eef5b60cbf88521f/splits/gb1/splits.zip"
)
md5 = "14216947834e6db551967c2537332a12"
splits = ["train", "valid", "test"]
target_fields = ["target"]
def __init__(self, path, split="two_vs_rest", verbose=1, **kwargs):
path = os.path.expanduser(path)
path = os.path.join(path, "gb1")
if not os.path.exists(path):
os.makedirs(path)
self.path = path
assert split in ["one_vs_rest", "two_vs_rest", "three_vs_rest", "low_vs_high", "sampled"]
zip_file = utils.download(self.url, path, md5=self.md5)
data_path = utils.extract(zip_file)
csv_file = os.path.join(data_path, "splits/%s.csv" % split)
self.load_csv(csv_file, target_fields=self.target_fields, verbose=verbose, **kwargs)
def split(self):
offset = 0
splits = []
for num_sample in self.num_samples:
split = torch_data.Subset(self, range(offset, offset + num_sample))
splits.append(split)
offset += num_sample
return splits
@R.register("datasets.Thermostability")
class Thermostability(FLIPDataset):
url = "https://github.com/J-SNACKKB/FLIP/raw/d5c35cc716ca93c3c74a0b43eef5b60cbf88521f/splits/meltome/splits.zip"
md5 = "0f8b1e848568f7566713d53594c0ca90"
splits = ["train", "valid", "test"]
target_fields = ["target"]
def __init__(self, path, split="human_cell", verbose=1, **kwargs):
path = os.path.expanduser(path)
path = os.path.join(path, "thermostability")
if not os.path.exists(path):
os.makedirs(path)
self.path = path
assert split in ["human", "human_cell", "mixed_split"]
zip_file = utils.download(self.url, path, md5=self.md5)
data_path = utils.extract(zip_file)
csv_file = os.path.join(data_path, "splits/%s.csv" % split)
self.load_csv(csv_file, target_fields=self.target_fields, verbose=verbose, **kwargs)
def split(self):
offset = 0
splits = []
for num_sample in self.num_samples:
split = torch_data.Subset(self, range(offset, offset + num_sample))
splits.append(split)
offset += num_sample
return splits
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/proteins.py | src/cheap/proteins.py | import os
import re
import typing as T
from pathlib import Path
from tqdm import trange
import numpy as np
import torch
import torch.nn.functional as F
import pandas as pd
import torch
import typing as T
import numpy as np
import re
from openfold.np import residue_constants
from lightning.pytorch.utilities import rank_zero_info
from .utils import npy, to_tensor, get_model_device, trim_or_pad_batch_first
from .decoder import FullyConnectedNetwork
from .esmfold import ESMFOLD_Z_DIM, esmfold_v1, output_to_pdb, batch_encode_sequences
from .typed import PathLike, ArrayLike
CANONICAL_AA = "ACDEFGHIKLMNPQRSTVWY"
# https://github.com/dauparas/ProteinMPNN/blob/main/protein_mpnn_utils.py#L61
PROTEINMPNN_AACHAR_TO_AAIDX_ARR = list("ARNDCQEGHILKMFPSTWYV-")
PROTEINMPNN_AAIDX_TO_AACHAR = {
idx: char for idx, char in enumerate(PROTEINMPNN_AACHAR_TO_AAIDX_ARR)
}
PROTEINMPNN_AACHAR_TO_AAIDX = {
char: idx for idx, char in enumerate(PROTEINMPNN_AACHAR_TO_AAIDX_ARR)
}
def stack_tensor_dicts(
dicts: T.List[T.Dict[str, torch.Tensor]], list_of_igored_keys: T.List[str]
):
keys = set(k for d in dicts for k in d.keys())
keys = keys - set(list_of_igored_keys)
return {key: torch.cat([d[key] for d in dicts if key in d], dim=0) for key in keys}
class DecoderTokenizer:
def __init__(self, vocab="openfold"):
if vocab == "openfold":
self.aachar_to_aaidx = residue_constants.restype_order_with_x
self.aaidx_to_aachar = {v: k for k, v in self.aachar_to_aaidx.items()}
elif vocab == "proteinmpnn":
self.aachar_to_aaidx = PROTEINMPNN_AACHAR_TO_AAIDX
self.aaidx_to_aachar = PROTEINMPNN_AAIDX_TO_AACHAR
else:
raise ValueError(f"Unknown vocab {vocab}")
self.vocab_size = len(self.aaidx_to_aachar)
self.unk_idx = self.aachar_to_aaidx.get("X", None)
self.pad_idx = self.aachar_to_aaidx.get("_", None)
self.eos_idx = self.aachar_to_aaidx.get(">", None)
self.bos_idx = self.aachar_to_aaidx.get("<", None)
def __len__(self):
return len(self.aaidx_to_aachar)
def _char2idx(self, char: str) -> int:
return self.aachar_to_aaidx.get(char, self.unk_idx)
def str_to_aatype_sequence(
self, seq: T.Union[T.Iterable, str], as_torch_tensor: bool = True
):
if isinstance(seq, str):
seq = list(seq)
aatype = [self._char2idx(aa) for aa in seq]
if as_torch_tensor:
return torch.tensor(aatype)
else:
return aatype
def aatype_to_str_sequence(self, aatype: T.List[int], strip_mode: str = "none"):
assert strip_mode in ["none", "strip_pad", "strip_eos", "strip_after_eos"]
aastr = "".join([self.aaidx_to_aachar[aa] for aa in npy(aatype)])
if strip_mode == "none":
return aastr
elif strip_mode == "strip_pad":
aastr = aastr.replace("_", "")
elif strip_mode == "strip_eos":
# strip ">" and everything after it
pattern = r"^(.*?)[>]"
elif strip_mode == "strip_after_eos":
# keep ">" but strip everything after it
pattern = r"^(.*?[>])"
match = re.search(pattern, aastr)
if match:
aastr = match.group(1)
else:
raise ValueError(f"Unrecognized strip_mode: {strip_mode}")
return aastr
def collate_dense_tensors(
self, samples: T.List[torch.Tensor], pad_v: int
) -> torch.Tensor:
"""
Takes a list of tensors with the following dimensions:
[(d_11, ..., d_1K),
(d_21, ..., d_2K),
...,
(d_N1, ..., d_NK)]
and stack + pads them into a single tensor of:
(N, max_i=1,N { d_i1 }, ..., max_i=1,N {diK})
"""
if len(samples) == 0:
return torch.Tensor()
if len(set(x.dim() for x in samples)) != 1:
raise RuntimeError(
f"Samples has varying dimensions: {[x.dim() for x in samples]}"
)
(device,) = tuple(set(x.device for x in samples)) # assumes all on same device
max_shape = [max(lst) for lst in zip(*[x.shape for x in samples])]
result = torch.empty(
len(samples), *max_shape, dtype=samples[0].dtype, device=device
)
result.fill_(pad_v)
for i in range(len(samples)):
result_i = result[i]
t = samples[i]
result_i[tuple(slice(0, k) for k in t.shape)] = t
return result
def batch_encode_sequences(
self, sequences: T.Sequence[str], pad_v: T.Optional[int] = None
) -> T.Tuple[torch.Tensor, torch.Tensor]:
"""Encode sequences using this tokenization scheme, mostly to generate labels during training
of the autoregressive decoder.
Args:
sequences (T.Sequence[str]): List of amino acid sequence strings.
add_eos_bos (bool): Whether or not to also add the local EOS/BOS token in generating the labels.
Returns:
T.Tuple[torch.Tensor, torch.Tensor]: Amino acid indices and mask (0 if padded, 1 otherwise).
"""
if pad_v is None:
pad_v = self.pad_idx
aatype_list = []
for seq in sequences:
aatype_seq = self.str_to_aatype_sequence(seq)
aatype_list.append(aatype_seq)
aatype = self.collate_dense_tensors(aatype_list, pad_v=pad_v)
mask = self.collate_dense_tensors(
[aatype.new_ones(len(aatype_seq)) for aatype_seq in aatype_list],
pad_v=pad_v,
)
return aatype, mask
def _is_valid_aa(self, aa):
return aa in CANONICAL_AA
def remove_invalid_aa(self, string: str):
return "".join([s for s in string if self._is_valid_aa(s)])
class LatentToSequence:
def __init__(self, temperature: float = 1.0):
"""On construction, all models are on the CPU."""
self.temperature = temperature
self.tokenizer = DecoderTokenizer()
self.decoder = FullyConnectedNetwork.from_pretrained(device="cpu")
self.device = torch.device("cpu")
self.decoder.eval()
for param in self.decoder.parameters():
param.requires_grad = False
def to(self, device):
"""Move onto the device for the usecase before calling to_sequence()."""
self.decoder = self.decoder.to(device)
self.device = device
return self
def to_sequence(
self, latent: ArrayLike, mask=None, return_logits=False, drop_mask_idx=True
):
if not mask is None:
mask = torch.ones_like(latent)
latent = to_tensor(latent, device=self.device)
assert (
latent.device == get_model_device(self.decoder)
), "Make sure to call .to(device) to move decoder to the correct device."
with torch.no_grad():
output_logits = self.decoder(latent)
# adjust by temperature
output_logits /= self.temperature
# remove UNK token
if drop_mask_idx:
_mask = (
torch.arange(output_logits.shape[-1], device=self.device)
!= self.tokenizer.unk_idx
)
drop_mask_logits = torch.index_select(
input=output_logits,
dim=-1,
index=torch.arange(output_logits.shape[-1], device=self.device)[_mask],
)
argmax_idx = drop_mask_logits.argmax(-1)
dist = torch.distributions.OneHotCategorical(logits=drop_mask_logits)
sequence_probs = F.softmax(drop_mask_logits, dim=-1)
else:
# get the argmax index & compare it to the actual sample, to get a sense as to how temperature affects diversity
argmax_idx = output_logits.argmax(-1)
dist = torch.distributions.OneHotCategorical(logits=output_logits)
sequence_probs = F.softmax(output_logits, dim=-1)
sequence_idx = dist.sample().argmax(-1)
sequence_probs = torch.gather(
sequence_probs, dim=-1, index=argmax_idx.unsqueeze(-1)
).squeeze(-1)
stochasticity = (argmax_idx == sequence_idx).sum() / torch.numel(argmax_idx)
# print(f"percentage similarty to argmax idx: {stochasticity:.3f}")
sequence_str = [
self.tokenizer.aatype_to_str_sequence(s)
for s in sequence_idx.long().cpu().numpy()
]
if return_logits:
# return the original output logits, e.g. for loss & backprop purposes
return output_logits, sequence_idx, sequence_str
else:
return sequence_probs, sequence_idx, sequence_str
class LatentToStructure:
def __init__(self, esmfold=None, chunk_size=128, delete_esm_lm=False, use_compile=False):
if esmfold is None:
esmfold = esmfold_v1()
self.esmfold = esmfold
self.esmfold.set_chunk_size(chunk_size)
if delete_esm_lm:
del self.esmfold.esm # save some GPU space
assert not self.esmfold.trunk is None
self.esmfold.eval()
for param in self.esmfold.parameters():
param.requires_grad = False
if use_compile:
self.esmfold = torch.compile(self.esmfold)
self.device = get_model_device(self.esmfold)
def to(self, device):
self.esmfold = self.esmfold.to(device)
self.device = device
return self
@torch.no_grad()
def run_batch(self, s_, aa_, mask_, residx_, num_recycles=1, *args, **kwargs):
# https://github.com/facebookresearch/esm/blob/main/esm/esmfold/v1/esmfold.py#L208
# utils.print_cuda_memory_usage()
_, L, _ = s_.shape
z_ = s_.new_zeros(s_.shape[0], L, L, ESMFOLD_Z_DIM).to(self.device)
def maybe_pad(tensor, length):
if tensor.shape[1] != length:
return trim_or_pad_batch_first(tensor, length, pad_idx=0)
else:
return tensor
mask_ = maybe_pad(mask_, L)
aa_ = maybe_pad(aa_, L)
residx_ = maybe_pad(residx_, L)
with torch.no_grad():
output = self.esmfold.folding_trunk(
s_s_0=s_,
s_z_0=z_,
aa=aa_,
residx=residx_,
mask=mask_,
num_recycles=num_recycles,
)
pdb_str = output_to_pdb(output)
for k, v in output.items():
try:
output[k] = v.cpu()
except:
pass
return pdb_str, output
def to_structure(
self,
latent: ArrayLike,
sequences: T.List[str],
num_recycles: int = 4,
batch_size: T.Optional[int] = None,
mask: T.Optional[ArrayLike] = None, # used to override padding positions if sequence includes padding.
return_raw_outputs: bool = False,
verbose: bool = False,
*args,
**kwargs,
) -> T.Tuple[T.List[PathLike], T.Union[T.Dict, pd.DataFrame]]:
"""set up devices and tensors"""
aatype, mask_, residx, _, _ = batch_encode_sequences(sequences)
if mask is None:
mask = mask_
aatype, mask, residx = tuple(
map(lambda x: x.to(self.device), (aatype, mask, residx))
)
latent = to_tensor(latent, device=self.device)
assert (
latent.device == self.esmfold.device
), "Make sure to call .to(device) to move trunk to the correct device."
if batch_size is None:
if verbose:
print("Generating structure from latents")
return self.run_batch(latent, aatype, mask, residx, num_recycles)
else:
all_output_dicts = []
all_pdb_strs = []
for start in trange(
0, len(latent), batch_size, desc="(Generating structure)"
):
# Process current batch
s_, aa_, mask_, residx_ = tuple(
map(
lambda x: x[start : start + batch_size],
(latent, aatype, mask, residx),
)
)
# Collect outputs
pdb_str, outputs = self.run_batch(s_, aa_, mask_, residx_, num_recycles)
all_pdb_strs.extend(pdb_str)
all_output_dicts.append(outputs)
if return_raw_outputs:
try:
all_output_dicts = stack_tensor_dicts(
all_output_dicts,
list_of_igored_keys=["max_predicted_aligned_error"],
)
except:
rank_zero_info(
"Error stacking tensors from batches, returning raw list of outputs instead."
)
return all_pdb_strs, all_output_dicts
else:
return all_pdb_strs
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/pretrained.py | src/cheap/pretrained.py | import os
from pathlib import Path
import torch
from torch.hub import load_state_dict_from_url
from .esmfold import esmfold_v1_embed_only
from .model import HourglassProteinCompressionTransformer
from .pipeline import Pipeline
from .constants import CATH_COMPRESS_LEVEL_TO_ID, CHECKPOINT_DIR_PATH, HF_HUB_PREFIX
from .typed import PathLike
def url_to_state_dict(url, model_dir):
"""If not already cached, this will download the weights from the given URL and return the state dict."""
return load_state_dict_from_url(url, model_dir=model_dir, file_name="last.ckpt", progress=True, map_location=torch.device("cpu"))
def load_pretrained_model(
shorten_factor=1,
channel_dimension=1024,
model_dir=CHECKPOINT_DIR_PATH,
infer_mode=True
):
if (shorten_factor == 1) and (channel_dimension == 1024):
# this uses the ESM mechanism for automatically downloading weights if they're not cached
return esmfold_v1_embed_only()
else:
model_id = CATH_COMPRESS_LEVEL_TO_ID[shorten_factor][channel_dimension]
return load_model_from_id(model_id=model_id, model_dir=model_dir, infer_mode=infer_mode)
def load_model_from_id(
model_id: str,
model_dir: PathLike = CHECKPOINT_DIR_PATH,
infer_mode: bool = True,
):
url = f"{HF_HUB_PREFIX}/checkpoints/{model_id}/last.ckpt"
model_dir = Path(model_dir) / model_id
print(f"Using checkpoint at {str(model_dir)}.")
ckpt = url_to_state_dict(url, model_dir)
# initialize model based on saved hyperparameters
init_hparams = ckpt["hyper_parameters"]
keys_to_ignore = ["latent_scaler", "seq_emb_fn"]
for k in keys_to_ignore:
try:
init_hparams.pop(k)
except KeyError:
pass
# load state dict
model = HourglassProteinCompressionTransformer(
**init_hparams, force_infer=infer_mode
)
model.load_state_dict(ckpt["state_dict"])
if infer_mode:
model.eval()
model.requires_grad_(False)
return model
def get_pipeline(
model: HourglassProteinCompressionTransformer,
device: str = "cuda",
):
esmfold_embed_only = esmfold_v1_embed_only()
return Pipeline(hourglass_model=model, esmfold_embed_only_module=esmfold_embed_only, device=device)
def CHEAP_shorten_1_dim_1024(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=1,
channel_dimension=1024,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_1_dim_512(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=1,
channel_dimension=512,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_1_dim_256(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=1,
channel_dimension=256,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_1_dim_128(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=1,
channel_dimension=128,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_1_dim_64(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=1,
channel_dimension=64,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_1_dim_32(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=1,
channel_dimension=32,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_1_dim_16(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=1,
channel_dimension=16,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_1_dim_8(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=1,
channel_dimension=8,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_1_dim_4(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=1,
channel_dimension=4,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_2_dim_1024(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=2,
channel_dimension=1024,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_2_dim_512(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=2,
channel_dimension=512,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_2_dim_256(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=2,
channel_dimension=256,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_2_dim_128(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=2,
channel_dimension=128,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_2_dim_64(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=2,
channel_dimension=64,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_2_dim_32(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=2,
channel_dimension=32,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_2_dim_16(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=2,
channel_dimension=16,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_2_dim_8(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=2,
channel_dimension=8,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
def CHEAP_shorten_2_dim_4(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_pretrained_model(
shorten_factor=2,
channel_dimension=4,
infer_mode=infer_mode,
model_dir=model_dir,
)
if return_pipeline:
return get_pipeline(model)
return model
# A few 'special cases' compression models
def CHEAP_pfam_shorten_2_dim_32(infer_mode=True, model_dir=CHECKPOINT_DIR_PATH, return_pipeline=True):
model = load_model_from_id("j1v1wv6w", infer_mode=infer_mode, model_dir=model_dir)
if return_pipeline:
return get_pipeline(model)
return model | python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/constants.py | src/cheap/constants.py | import os
from pathlib import Path
# defaults to ~/.cache/cheap, but can be overridden by setting the CHEAP_CACHE as an environment variable
DEFAULT_CACHE = Path(os.environ.get("CHEAP_CACHE", Path.home() / ".cache/cheap"))
if not DEFAULT_CACHE.exists():
DEFAULT_CACHE.mkdir(parents=True)
HF_HUB_PREFIX = "https://huggingface.co/amyxlu/cheap-proteins/resolve/main"
# Weights to trained latent-to-sequence decoder
DECODER_CKPT_PATH = Path(DEFAULT_CACHE) / "sequence_decoder/mlp.ckpt"
# Directory to where per-channel statistics are stored
TENSOR_STATS_DIR = Path(DEFAULT_CACHE) / "statistics"
# Directory to where pre-trained models are stored
CHECKPOINT_DIR_PATH = Path(DEFAULT_CACHE) / "checkpoints"
# Mapping of compression levels to model IDs
CATH_COMPRESS_LEVEL_TO_ID = {
2: {
4: "8ebs7j9h",
8: "mm9fe6x9",
16: "kyytc8i9",
32: "fbbrfqzk",
64: "13lltqha",
128: "uhg29zk4",
256: "ich20c3q",
512: "7str7fhl",
1024: "g8e83omk",
},
1: {
4: "1b64t79h",
8: "1hr1x9r5",
16: "yfel5fnl",
32: "v2cer77t",
64: "2tjrgcde",
128: "3rs1hxky",
256: "5z4iaak9",
512: "q3m9fhii",
},
}
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/decoder.py | src/cheap/decoder.py | from pathlib import Path
import torch.nn as nn
import torch
from torch.hub import load_state_dict_from_url
from .esmfold import batch_encode_sequences
from .constants import DECODER_CKPT_PATH, HF_HUB_PREFIX
class FullyConnectedNetwork(nn.Module):
def __init__(
self,
n_classes: int = 21,
mlp_hidden_dim: int = 1024,
mlp_num_layers: int = 3,
mlp_dropout_p: float = 0.1,
add_sigmoid: bool = False,
lr: float = 1e-4,
):
super().__init__()
self.batch_encode_sequences = batch_encode_sequences
self.lr = lr
if mlp_num_layers == 1:
layers = [nn.Linear(mlp_hidden_dim, n_classes)]
elif mlp_num_layers == 2:
first_layer = [
nn.Linear(mlp_hidden_dim, mlp_hidden_dim // 4),
nn.ReLU(),
nn.Dropout(p=mlp_dropout_p),
]
final_layer = [
nn.Linear(mlp_hidden_dim // 4, n_classes),
]
layers = first_layer + final_layer
else:
assert mlp_num_layers >= 3
num_hidden_layers = mlp_num_layers - 3
first_layer = [
nn.Linear(mlp_hidden_dim, mlp_hidden_dim // 2),
nn.ReLU(),
nn.Dropout(p=mlp_dropout_p),
]
second_layer = [
nn.Linear(mlp_hidden_dim // 2, mlp_hidden_dim // 4),
nn.ReLU(),
nn.Dropout(p=mlp_dropout_p),
]
hidden_layer = [
nn.Linear(mlp_hidden_dim // 4, mlp_hidden_dim // 4),
nn.ReLU(),
nn.Dropout(p=mlp_dropout_p),
]
final_layer = [
nn.Linear(mlp_hidden_dim // 4, n_classes),
]
layers = (
first_layer
+ second_layer
+ hidden_layer * num_hidden_layers
+ final_layer
)
if add_sigmoid:
layers.append(nn.Sigmoid())
self.net = nn.Sequential(*layers)
def forward(self, x):
# assumes that x is the raw, un-normalized embedding
return self.net(x)
@classmethod
def from_pretrained(cls, device=None, model_dir=None, eval_mode=True):
if model_dir is None:
model_dir = Path(DECODER_CKPT_PATH).parent
url = f"{HF_HUB_PREFIX}/sequence_decoder/mlp.ckpt"
# will load from cache if available, and otherwise downloads.
ckpt = load_state_dict_from_url(url, model_dir=model_dir, file_name="mlp.ckpt", progress=True, map_location=torch.device("cpu"))
model = cls()
# original model was trained/checkpointed with pytorch lightning
model.load_state_dict(ckpt["state_dict"])
if device is not None:
model.to(device)
if eval_mode:
model.eval()
for param in model.parameters():
param.requires_grad = False
return model
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/typed.py | src/cheap/typed.py | from typing import Union, List
from pathlib import Path
import torch
import numpy as np
ArrayLike = Union[np.ndarray, torch.Tensor, List]
PathLike = Union[str, Path]
DeviceLike = Union[str, torch.device] | python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/pipeline.py | src/cheap/pipeline.py | """
CHEAP model pipeline wrapper around ESMFold embedding, normalization module, and hourglass compression.
"""
from typing import Optional, Union, List, Tuple
import torch
from .model import HourglassProteinCompressionTransformer
from .esmfold import ESMFoldEmbed, esmfold_v1_embed_only
from .utils import LatentScaler
from .typed import DeviceLike
class Pipeline:
def __init__(
self,
hourglass_model: HourglassProteinCompressionTransformer,
esmfold_embed_only_module: ESMFoldEmbed,
latent_scaler: LatentScaler = LatentScaler(),
device: DeviceLike = "cuda",
):
super().__init__()
self.hourglass_model = hourglass_model.to(device)
self.esmfold_embed_only_module = esmfold_embed_only_module.to(device)
self.latent_scaler = latent_scaler
self.device = device
def to(self, device: DeviceLike):
self.hourglass_model = self.hourglass_model.to(device)
self.esmfold_embed_only_module = self.esmfold_embed_only_module.to(device)
self.device = device
return self
def decode(self, x_compressed: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Given a compressed representation, uncompress and unnormalize back to the original ESMFold latent."""
x_uncompressed = self.hourglass_model.decode(x_compressed, mask)
return self.latent_scaler.unscale(x_uncompressed)
def __call__(self, sequences: Union[str, List[str]]) -> Tuple[torch.Tensor, torch.Tensor]:
"""Given the original ESMFold latent, normalize and compress using the loaded checkpoint."""
res = self.esmfold_embed_only_module.infer_embedding(sequences)
emb, mask = res['s'], res['mask']
emb, mask = emb.to(self.device), mask.to(self.device)
emb = self.latent_scaler.scale(emb)
compressed_representation, downsampled_mask = self.hourglass_model(emb, mask, infer_only=True)
return compressed_representation, downsampled_mask
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/__init__.py | src/cheap/__init__.py | python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false | |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/model/_hourglass.py | src/cheap/model/_hourglass.py | import typing as T
import torch
from torch import nn
import numpy as np
from . import HourglassDecoder, HourglassEncoder, VectorQuantizer, FiniteScalarQuantizer
from ..utils import (
LatentScaler,
trim_or_pad_batch_first,
get_lr_scheduler,
get_model_device,
)
from ..esmfold._misc import batch_encode_sequences
from ..proteins import LatentToSequence, LatentToStructure
from ..losses import SequenceAuxiliaryLoss, BackboneAuxiliaryLoss, masked_mse_loss
class HourglassProteinCompressionTransformer(nn.Module):
def __init__(
self,
dim,
*,
depth=4, # depth used for both encoder and decoder
shorten_factor=2,
downproj_factor=2,
attn_resampling=True,
updown_sample_type="naive",
heads=8,
dim_head=64,
causal=False,
norm_out=False,
use_quantizer="vq",
# quantizer
n_e=512,
e_dim=64,
vq_beta=0.25,
enforce_single_codebook_per_position: bool = False,
fsq_levels: T.Optional[T.List[int]] = None,
lr=1e-4,
lr_adam_betas=(0.9, 0.999),
lr_sched_type: str = "constant",
lr_num_warmup_steps: int = 0,
lr_num_training_steps: int = 10_000_000,
lr_num_cycles: int = 1,
# auxiliary losses
seq_loss_weight: float = 0.0,
struct_loss_weight: float = 0.0,
log_sequence_loss=False,
log_structure_loss=False,
# in case we need to embed on the fly
esmfold=None,
force_infer=False,
):
super().__init__()
"""Make quantizer. Can be either the traditional VQ-VAE, the FSQ, or
none (i.e. output of encoder goes directly back into the decoder).
"""
self.latent_scaler = LatentScaler()
if esmfold is not None:
self.esmfold = esmfold
for param in self.esmfold.parameters():
param.requires_grad = False
if isinstance(use_quantizer, bool):
if use_quantizer:
# for backwards compatitibility
print("Using quantizer: VQVAE")
self.quantize_scheme = "vq"
else:
# for backwards compatitibility
print("Using non-quantization mode...")
self.quantize_scheme = None # no quantization
else:
assert use_quantizer in ["vq", "fsq", "tanh"]
self.quantize_scheme = use_quantizer
print(f"Using {use_quantizer} layer at bottleneck...")
assert self.check_valid_compression_method(self.quantize_scheme)
# Set up quantizer modules
self.pre_quant_proj = None
self.post_quant_proj = None
if self.quantize_scheme == "vq":
self.quantizer = VectorQuantizer(n_e, e_dim, vq_beta)
# if this is enforced, then we'll project down the channel dimension to make sure that the
# output of the encoder has the same dimension as the embedding codebook.
# otherwise, the excess channel dimensions will be tiled up lengthwise,
# which combinatorially increases the size of the codebook. The latter will
# probably lead to better results, but is not the convention and may lead to
# an excessively large codebook for purposes such as training an AR model downstream.
if enforce_single_codebook_per_position and (
dim / downproj_factor != e_dim
):
self.pre_quant_proj = torch.nn.Linear(dim // downproj_factor, e_dim)
self.post_quant_proj = torch.nn.Linear(e_dim, dim // downproj_factor)
elif self.quantize_scheme == "fsq":
if not len(fsq_levels) == (dim / downproj_factor):
# similarly, project down to the length of the FSQ vectors.
# unlike with VQ-VAE, the convention with FSQ *is* to combinatorially incraese the size of codebook
self.pre_quant_proj = torch.nn.Linear(
dim // downproj_factor, len(fsq_levels)
)
self.post_quant_proj = torch.nn.Linear(
len(fsq_levels), dim // downproj_factor
)
self.fsq_levels = fsq_levels
self.quantizer = FiniteScalarQuantizer(fsq_levels)
else:
# self.quantize_scheme in [None, "tanh"]
self.quantizer = None
# Set up encoder/decoders
self.enc = HourglassEncoder(
dim=dim,
depth=depth,
shorten_factor=shorten_factor,
downproj_factor=downproj_factor,
attn_resampling=attn_resampling,
updown_sample_type=updown_sample_type,
heads=heads,
dim_head=dim_head,
causal=causal,
norm_out=norm_out,
)
self.dec = HourglassDecoder(
dim=dim // downproj_factor,
depth=depth,
elongate_factor=shorten_factor,
upproj_factor=downproj_factor,
attn_resampling=True,
updown_sample_type=updown_sample_type,
)
# other misc settings
self.z_q_dim = dim // np.prod(dim)
self.n_e = n_e
self.lr = lr
self.lr_adam_betas = lr_adam_betas
self.lr_sched_type = lr_sched_type
self.lr_num_warmup_steps = lr_num_warmup_steps
self.lr_num_training_steps = lr_num_training_steps
self.lr_num_cycles = lr_num_cycles
self.make_sequence_constructor = log_sequence_loss or (seq_loss_weight > 0.0)
self.make_structure_constructor = log_structure_loss or (
struct_loss_weight > 0.0
)
self.seq_loss_weight = seq_loss_weight
self.struct_loss_weight = struct_loss_weight
# auxiliary losses
if not force_infer:
if self.make_sequence_constructor:
self.sequence_constructor = LatentToSequence()
self.seq_loss_fn = SequenceAuxiliaryLoss(self.sequence_constructor)
if self.make_structure_constructor:
self.structure_constructor = LatentToStructure(esmfold=esmfold)
self.structure_loss_fn = BackboneAuxiliaryLoss(
self.structure_constructor
)
print(
f"Finished loading HPCT model with shorten factor {shorten_factor} and {1024 // downproj_factor} channel dimensions."
)
def check_valid_compression_method(self, method):
return method in ["fsq", "vq", "tanh", None]
def encode(self, x, mask=None, verbose=False, infer_only=True, *args, **kwargs):
if mask is None:
mask = torch.ones((x.shape[0], x.shape[1])).to(x.device)
mask = mask.bool()
# ensure that input length is a multiple of the shorten factor
s = self.enc.shorten_factor
extra = x.shape[1] % s
if extra != 0:
needed = s - extra
x = trim_or_pad_batch_first(x, pad_to=x.shape[1] + needed, pad_idx=0)
# In any case where the mask and token generated from sequence strings don't match latent, make it match
if mask.shape[1] != x.shape[1]:
# pad with False
mask = trim_or_pad_batch_first(mask, x.shape[1], pad_idx=0)
# encode and possibly downsample
log_dict = {}
z_e, downsampled_mask = self.enc(x, mask, verbose)
# if encoder output dimensions does not match quantization inputs, project down
if self.pre_quant_proj is not None:
z_e = self.pre_quant_proj(z_e)
##################
# Quantize
##################
# VQ-VAE
if self.quantize_scheme == "vq":
quant_out = self.quantizer(z_e, verbose)
if not infer_only:
z_q = quant_out["z_q"]
vq_loss = quant_out["loss"]
log_dict["vq_loss"] = quant_out["loss"]
log_dict["vq_perplexity"] = quant_out["perplexity"]
compressed_representation = quant_out[
"min_encoding_indices"
].detach() # .cpu().numpy()
# FSQ
elif self.quantize_scheme == "fsq":
z_q = self.quantizer.quantize(z_e)
compressed_representation = self.quantizer.codes_to_indexes(
z_q
).detach() # .cpu().numpy()
# Continuous (no quantization) with a tanh bottleneck
elif self.quantize_scheme == "tanh":
z_e = z_e.to(torch.promote_types(z_e.dtype, torch.float32))
z_q = torch.tanh(z_e)
else:
raise NotImplementedError
if infer_only:
compressed_representation = z_q.detach() # .cpu().numpy()
downsampled_mask = downsampled_mask.detach() # .cpu().numpy()
return compressed_representation, downsampled_mask
else:
return z_q, downsampled_mask, log_dict
def decode(self, z_q, downsampled_mask=None, verbose=False):
if self.post_quant_proj is not None:
z_q = self.post_quant_proj(z_q)
x_recons = self.dec(z_q, downsampled_mask, verbose)
return x_recons
def forward(self, x, mask=None, verbose=False, infer_only=True, *args, **kwargs):
if infer_only:
return self.encode(x, mask, verbose, infer_only)
else:
# encode and obtain post-quantize embedding
z_q, downsampled_mask, log_dict = self.encode(x, mask, verbose, infer_only)
# decode back to original
x_recons = self.decode(z_q, downsampled_mask, verbose)
# calculate losses
recons_loss = masked_mse_loss(x_recons, x, mask)
vq_loss = log_dict.get("vq_loss", 0.0)
loss = vq_loss + recons_loss
log_dict["recons_loss"] = recons_loss.item()
log_dict["loss"] = loss.item()
return x_recons, loss, log_dict, z_q, downsampled_mask
def configure_optimizers(self):
parameters = list(self.enc.parameters()) + list(self.dec.parameters())
if not self.quantizer is None:
parameters += list(self.quantizer.parameters())
optimizer = torch.optim.AdamW(
parameters, lr=self.lr, betas=self.lr_adam_betas
)
scheduler = get_lr_scheduler(
optimizer=optimizer,
sched_type=self.lr_sched_type,
num_warmup_steps=self.lr_num_warmup_steps,
num_training_steps=self.lr_num_training_steps,
num_cycles=self.lr_num_cycles,
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return {"optimizer": optimizer, "lr_scheduler": scheduler}
def run_batch(self, batch, prefix="train"):
"""
The input batch can be:
(1) precomputed embeddings along with a dictionary of structures (CATHShardedDataModule)
(2) precomputed embeddings with a placeholder for the structure dictionary (CATHStructureDataModule)
(3) raw headers and sequences tuples (FastaDataset)
to trigger the raw sequence mode, the `seq_emb_fn` should be passed, which should be defined outside
the train loop, and should of the desired embedding function from ESMFold/etc., already moved to device.
"""
# hack infer which type of input batch we're using
if len(batch) == 3:
x, sequences, gt_structures = batch
elif len(batch) == 2:
# using a FastaLoader, sequence only
assert not self.esmfold is None
headers, sequences = batch
x = self.esmfold.infer_embedding(sequences)["s"]
else:
raise
device = get_model_device(self)
# get masks and ground truth tokens and move to device
tokens, mask, _, _, _ = batch_encode_sequences(sequences)
# if shortened and using a Fasta loader, the latent might not be a multiple of shorten factor
s = self.enc.shorten_factor
extra = x.shape[1] % s
if extra != 0:
needed = s - extra
x = trim_or_pad_batch_first(x, pad_to=x.shape[1] + needed, pad_idx=0)
# In any case where the mask and token generated from sequence strings don't match latent, make it match
if mask.shape[1] != x.shape[1]:
# pad with False
mask = trim_or_pad_batch_first(mask, x.shape[1], pad_idx=0)
tokens = trim_or_pad_batch_first(tokens, x.shape[1], pad_idx=0)
x = x.to(device)
mask = mask.to(device)
tokens = tokens.to(device)
# scale (maybe) latent values to be per-channel normalized
x = self.latent_scaler.scale(x)
# forward pass
log_dict = {}
x_recons, loss, log_dict, _, _ = self(x, mask.bool())
log_dict = log_dict | {f"{prefix}/{k}": v for k, v in log_dict.items()}
# unscale to decode into sequence and/or structure
x_recons_unscaled = self.latent_scaler.unscale(x_recons)
batch_size = x_recons_unscaled.shape[0]
# sequence loss
if self.make_sequence_constructor:
self.sequence_constructor = self.sequence_constructor.to(device)
with torch.no_grad():
seq_loss, seq_loss_dict, recons_strs = self.seq_loss_fn(
x_recons_unscaled, tokens, mask, return_reconstructed_sequences=True
)
seq_loss_dict = {f"{prefix}/{k}": v for k, v in seq_loss_dict.items()}
log_dict = log_dict | seq_loss_dict
loss += seq_loss * self.seq_loss_weight
# structure loss
if self.make_structure_constructor:
self.structure_constructor = self.structure_constructor.to(device)
with torch.no_grad():
struct_loss, struct_loss_dict = self.structure_loss_fn(
x_recons_unscaled, gt_structures, sequences
)
struct_loss_dict = {
f"{prefix}/{k}": v.mean() for k, v in struct_loss_dict.items()
}
log_dict = log_dict | struct_loss_dict
loss += struct_loss * self.struct_loss_weight
return loss, log_dict
def training_step(self, batch, batch_idx):
return self.run_batch(batch, prefix="train")
def validation_step(self, batch, batch_idx):
return self.run_batch(batch, prefix="val")
def state_dict(self):
state = super().state_dict()
state = {k: v for k, v in state.items() if "esmfold" not in k}
return state
@classmethod
def from_pretrained(cls, checkpoint_path):
model = cls()
state = torch.load(checkpoint_path)
model.load_state_dict(state)
return model
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/model/__init__.py | src/cheap/model/__init__.py | from ._modules import (
HourglassDecoder,
HourglassEncoder,
VectorQuantizer,
FiniteScalarQuantizer,
)
from ._hourglass import HourglassProteinCompressionTransformer
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/model/_modules.py | src/cheap/model/_modules.py | import typing as T
import math
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
import einops
import numpy as np
from omegaconf import ListConfig
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def maybe_shape_check(tensor, verbose, prefix=""):
if not verbose:
return
else:
print(prefix, tensor.shape)
def expand_to_shape(x, target_shape):
# keep adding dimensions to the end until we match target dimensions
while len(x.shape) < len(target_shape):
x = x[..., None]
return x.expand(target_shape)
def pad_to_multiple(tensor, multiple, dim=-1, value=0):
seq_len = tensor.shape[dim]
m = seq_len / multiple
if m.is_integer():
return tensor
remainder = math.ceil(m) * multiple - seq_len
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value=value)
def cast_tuple(val, depth=1):
return val if isinstance(val, tuple) else ((val,) * depth)
# factory
def _valid_depth_dtype(depth):
if isinstance(depth, int):
return True
if (
isinstance(depth, tuple)
or isinstance(depth, list)
or isinstance(depth, ListConfig)
):
if len(depth) == 3:
return True
return False
def _check_if_nest(var):
return isinstance(var, (tuple, list, ListConfig)) and len(var) > 0
# up and down sample classes
class NaiveDownsample(nn.Module):
def __init__(self, shorten_factor):
super().__init__()
self.shorten_factor = shorten_factor
def forward(self, x):
return reduce(x, "b (n s) d -> b n d", "mean", s=self.shorten_factor)
class NaiveUpsample(nn.Module):
def __init__(self, elongate_factor):
super().__init__()
self.elongate_factor = elongate_factor
def forward(self, x):
return repeat(x, "b n d -> b (n s) d", s=self.elongate_factor)
class LinearDownsample(nn.Module):
def __init__(self, dim, shorten_factor):
super().__init__()
self.proj = nn.Linear(dim * shorten_factor, dim)
self.shorten_factor = shorten_factor
def forward(self, x):
x = rearrange(x, "b (n s) d -> b n (s d)", s=self.shorten_factor)
return self.proj(x)
class LinearUpsample(nn.Module):
def __init__(self, dim, elongate_factor):
super().__init__()
self.proj = nn.Linear(dim, dim * elongate_factor)
self.elongate_factor = elongate_factor
def forward(self, x):
x = self.proj(x)
return rearrange(x, "b n (s d) -> b (n s) d", s=self.elongate_factor)
# classes
class PreNormLinearDownProjection(nn.Module):
def __init__(self, dim, downproj_factor):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.proj = nn.Linear(dim, dim // downproj_factor)
def forward(self, x):
return self.proj(self.norm(x))
class PreNormLinearUpProjection(nn.Module):
def __init__(self, dim, upproj_factor):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.proj = nn.Linear(dim, dim * upproj_factor)
def forward(self, x):
return self.proj(self.norm(x))
class PreNormResidual(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs) + x
class Attention(nn.Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, causal=False):
super().__init__()
self.heads = heads
self.causal = causal
self.scale = dim_head**-0.5
inner_dim = heads * dim_head
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, context=None, mask=None):
h, device = self.heads, x.device
kv_input = default(context, x)
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v))
q = q * self.scale
sim = einsum("b h i d, b h j d -> b h i j", q, k)
mask_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = rearrange(mask, "b j -> b () () j")
sim = sim.masked_fill(~mask, mask_value)
if self.causal:
i, j = sim.shape[-2:]
mask = torch.ones(i, j, device=device, dtype=torch.bool).triu_(j - i + 1)
mask = rearrange(mask, "i j -> () () i j")
sim = sim.masked_fill(mask, mask_value)
attn = sim.softmax(dim=-1)
attn = self.dropout(attn)
out = einsum("b h i j, b h j d -> b h i d", attn, v)
out = rearrange(out, "b h n d -> b n (h d)", h=h)
return self.to_out(out)
def FeedForward(dim, mult=4, dropout=0.0):
return nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim),
)
# transformer classes
class Transformer(nn.Module):
def __init__(
self,
dim,
*,
depth,
causal=False,
heads=8,
dim_head=64,
attn_dropout=0.0,
ff_mult=4,
ff_dropout=0.0,
norm_out=False,
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
PreNormResidual(
dim,
Attention(
dim,
heads=heads,
dim_head=dim_head,
dropout=attn_dropout,
causal=causal,
),
),
PreNormResidual(
dim, FeedForward(dim, mult=ff_mult, dropout=ff_dropout)
),
]
)
)
self.norm = nn.LayerNorm(dim) if norm_out else nn.Identity()
def forward(self, x, context=None, mask=None, compressed=None):
for attn, ff in self.layers:
x = attn(x, context=context, mask=mask)
x = ff(x)
return self.norm(x)
class HourglassEncoder(nn.Module):
def __init__(
self,
dim,
*,
depth=(4, 4),
shorten_factor=(2, 2),
downproj_factor=(2, 2),
attn_resampling=True,
updown_sample_type="naive",
heads=8,
dim_head=64,
causal=False,
norm_out=False,
):
super().__init__()
# shorten factor
if _check_if_nest(shorten_factor):
assert len(depth) == len(shorten_factor) == len(downproj_factor)
shorten_factor, *rest_shorten_factor = shorten_factor
else:
shorten_factor, rest_shorten_factor = shorten_factor, None
if not isinstance(shorten_factor, int):
shorten_factor = shorten_factor[0]
# downproj factor
if _check_if_nest(downproj_factor):
downproj_factor, *rest_downproj_factor = downproj_factor
else:
downproj_factor, rest_downproj_factor = downproj_factor, None
if not isinstance(downproj_factor, int):
downproj_factor = downproj_factor[0]
# depth
if _check_if_nest(depth):
depth, *rest_depth = depth
else:
depth, rest_depth = depth, None
if not isinstance(depth, int):
depth = depth[0]
# shared transformer kwargs
transformer_kwargs = dict(heads=heads, dim_head=dim_head)
self.causal = causal
self.shorten_factor = shorten_factor
self.downproj_factor = downproj_factor
if updown_sample_type == "naive":
self.downsample = NaiveDownsample(shorten_factor)
elif updown_sample_type == "linear":
self.downsample = LinearDownsample(dim, shorten_factor)
else:
raise ValueError(
f"unknown updown_sample_type keyword value - must be either naive or linear for now"
)
self.down_projection = PreNormLinearDownProjection(dim, downproj_factor)
if _check_if_nest(rest_depth):
assert _check_if_nest(rest_shorten_factor)
assert _check_if_nest(rest_downproj_factor)
self.nested_encoder = HourglassEncoder(
dim=dim // downproj_factor,
shorten_factor=rest_shorten_factor,
downproj_factor=rest_downproj_factor,
depth=rest_depth,
attn_resampling=attn_resampling,
updown_sample_type=updown_sample_type,
causal=causal,
**transformer_kwargs,
)
self.has_nest = True
else:
self.has_nest = False
self.pre_transformer = Transformer(
dim=dim, depth=depth, causal=causal, **transformer_kwargs
)
self.attn_resampling_pre_valley = (
Transformer(dim=dim, depth=1, **transformer_kwargs)
if attn_resampling
else None
)
self.norm_out = nn.LayerNorm(dim) if norm_out else nn.Identity()
def forward(self, x, mask=None, verbose=False):
"""
x: input
mask: indicates if input has a padding (True if we should keep it, False if it's padding & we should discard it)
compressed: at the start, should be None; if it's already populated, ignore further actions at the valley step
"""
# b : batch, n : sequence length, d : feature dimension, s : shortening factor
s, b, n = self.shorten_factor, *x.shape[:2]
maybe_shape_check(x, verbose)
# top half of hourglass, pre-transformer layers
x = self.pre_transformer(x, mask=mask)
# pad to multiple of shortening factor, in preparation for pooling
x = pad_to_multiple(x, s, dim=-2)
if exists(mask):
mask = pad_to_multiple(mask, s, dim=-1, value=False)
# if autoregressive, do the shift by shortening factor minus one
if self.causal:
shift = s - 1
x = F.pad(x, (0, 0, shift, -shift), value=0.0)
if exists(mask):
mask = F.pad(mask, (shift, -shift), value=False)
# naive average pool along length dimension
downsampled = self.downsample(x)
maybe_shape_check(downsampled, verbose)
if exists(mask):
downsampled_mask = reduce(mask, "b (n s) -> b n", "sum", s=s) > 0
maybe_shape_check(downsampled_mask, verbose)
else:
downsampled_mask = None
# pre-valley "attention resampling" - they have the pooled token in each bucket attend to the tokens pre-pooled
if exists(self.attn_resampling_pre_valley):
if exists(mask):
attn_resampling_mask = rearrange(mask, "b (n s) -> (b n) s", s=s)
else:
attn_resampling_mask = None
downsampled = self.attn_resampling_pre_valley(
rearrange(downsampled, "b n d -> (b n) () d"),
rearrange(x, "b (n s) d -> (b n) s d", s=s),
mask=attn_resampling_mask,
)
downsampled = rearrange(downsampled, "(b n) () d -> b n d", b=b)
maybe_shape_check(downsampled, verbose)
# also possibly reduce along dim=-1
out = self.down_projection(downsampled)
# the "valley" - either a regular transformer or another hourglass
if self.has_nest:
out, downsampled_mask = self.nested_encoder(out, mask=downsampled_mask)
maybe_shape_check(out, verbose, "Encoder output:")
return self.norm_out(out), downsampled_mask
class HourglassDecoder(nn.Module):
def __init__(
self,
dim,
*,
depth=(4, 4),
elongate_factor=(2, 2),
upproj_factor=(2, 2),
attn_resampling=True,
updown_sample_type="linear",
heads=8,
dim_head=64,
causal=False,
norm_out=False,
):
super().__init__()
# set up nesting
if isinstance(elongate_factor, (tuple, list, ListConfig)):
assert len(depth) == len(elongate_factor) == len(upproj_factor)
elongate_factor, *rest_elongate_factor = elongate_factor
elif isinstance(elongate_factor, int):
elongate_factor, rest_elongate_factor = elongate_factor, None
else:
raise TypeError()
if isinstance(upproj_factor, (tuple, list, ListConfig)):
upproj_factor, *rest_upproj_factor = upproj_factor
elif isinstance(upproj_factor, int):
upproj_factor, rest_upproj_factor = upproj_factor, None
else:
raise TypeError()
if isinstance(depth, (tuple, list, ListConfig)):
depth, *rest_depth = depth
elif isinstance(depth, int):
depth, rest_depth = depth, None
else:
raise TypeError()
# shared transformer kwargs
transformer_kwargs = dict(heads=heads, dim_head=dim_head)
self.causal = causal
self.elongate_factor = elongate_factor
self.upproj_factor = upproj_factor
if updown_sample_type == "naive":
self.upsample = NaiveUpsample(elongate_factor)
elif updown_sample_type == "linear":
self.upsample = LinearUpsample(dim, elongate_factor)
else:
raise ValueError(
f"unknown updown_sample_type keyword value - must be either naive or linear for now"
)
self.up_projection = PreNormLinearUpProjection(dim, upproj_factor)
if _check_if_nest(rest_depth):
assert _check_if_nest(rest_elongate_factor)
assert _check_if_nest(rest_upproj_factor)
self.nested_decoder = HourglassDecoder(
dim=dim * upproj_factor,
elongate_factor=rest_elongate_factor,
upproj_factor=rest_upproj_factor,
depth=rest_depth,
attn_resampling=attn_resampling,
updown_sample_type=updown_sample_type,
causal=causal,
**transformer_kwargs,
)
self.has_nest = True
else:
self.has_nest = False
self.post_transformer = Transformer(
dim=dim * upproj_factor, depth=depth, causal=causal, **transformer_kwargs
)
self.attn_resampling_post_valley = (
Transformer(dim=dim, depth=1, **transformer_kwargs)
if attn_resampling
else None
)
self.norm_out = nn.LayerNorm(dim) if norm_out else nn.Identity()
def forward(self, z_q, mask=None, verbose=False):
"""
z_q: input compressed representation
mask: indicates if input has a padding (True if we should keep it, False if it's padding & we should discard it)
compressed: at the start, should be None; if it's already populated, ignore further actions at the valley step
"""
# b : batch, n : sequence length, d : feature dimension, s : elongation factor
maybe_shape_check(z_q, verbose, "Decoder z_q input:")
s, b, n = self.elongate_factor, *z_q.shape[:2]
upsampled = self.upsample(z_q)
maybe_shape_check(upsampled, verbose, "Upsampled:")
if exists(mask):
upsampled_mask = einops.repeat(mask, "b n -> b (n s)", s=s) > 0
maybe_shape_check(upsampled_mask, verbose, "Upsampled mask:")
else:
upsampled_mask = None
# post-valley "attention resampling"
if exists(self.attn_resampling_post_valley):
if exists(upsampled_mask):
attn_resampling_mask = rearrange(
upsampled_mask, "b (n s) -> (b n) s", s=s
)
maybe_shape_check(attn_resampling_mask, verbose)
else:
attn_resampling_mask = None
x = rearrange(upsampled, "b (n s) d -> (b n) s d", s=s)
context = rearrange(z_q, "b n d -> (b n) () d")
maybe_shape_check(x, verbose, "Post-valley transformer input:")
maybe_shape_check(context, verbose, "Post-valley transformer context:")
x = self.attn_resampling_post_valley(x, context, attn_resampling_mask)
x = rearrange(x, "(b n) s d -> b (n s) d", b=b)
x = self.up_projection(x)
out = self.post_transformer(x, mask=upsampled_mask)
if self.has_nest:
out = self.nested_decoder(out, mask=upsampled_mask)
maybe_shape_check(out, verbose, "decoder output")
return self.norm_out(out)
class VectorQuantizer(nn.Module):
"""
Discretization bottleneck part of the VQ-VAE.
Inputs:
- n_e : number of embeddings
- e_dim : dimension of embedding
- beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
"""
def __init__(self, n_e, e_dim, beta):
super(VectorQuantizer, self).__init__()
self.n_e = n_e
self.e_dim = e_dim
self.beta = beta
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
def forward(self, z, verbose=False):
"""
Inputs the output of the encoder network z and maps it to a discrete
one-hot vector that is the index of the closest embedding vector e_j
z (continuous) -> z_q (discrete)
z.shape = (batch, length, channel)
quantization pipeline:
1. get encoder input (B,L,C)
2. flatten input to (B*L,C)
"""
# reshape z -> (batch, height, channel) and flatten
maybe_shape_check(z, verbose, "z_e quantizer input")
z_flattened = einops.rearrange(z, "b l c -> (b l) c").view(-1, self.e_dim)
maybe_shape_check(z_flattened, verbose, "z_flattened")
device = z.device
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = (
torch.sum(z_flattened**2, dim=1, keepdim=True)
+ torch.sum(self.embedding.weight**2, dim=1)
- 2 * torch.matmul(z_flattened, self.embedding.weight.t())
)
# find closest encodings
min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
min_encodings = torch.zeros(min_encoding_indices.shape[0], self.n_e).to(device)
min_encodings.scatter_(1, min_encoding_indices, 1)
# get quantized latent vectors
z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
maybe_shape_check(z_q, verbose, "z_q after min_encoding * embedding.weight")
# compute loss for embedding
# embedding_loss = masked_mse_loss(z_q.detach(), z, mask)
# commitment_loss = masked_mse_loss(z_q, z.detach(), mask)
# loss = embedding_loss + self.beta * commitment_loss
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean(
(z_q - z.detach()) ** 2
)
# preserve gradients
z_q = z + (z_q - z).detach()
# perplexity
e_mean = torch.mean(min_encodings, dim=0)
perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
return {
"loss": loss,
"z_q": z_q,
"perplexity": perplexity,
"min_encodings": min_encodings,
"min_encoding_indices": min_encoding_indices,
}
def get_codebook_entry(self, indices, shape=None):
# shape specifying (batch, length, channel)
# TODO: check for more easy handling with nn.Embedding
min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices)
min_encodings.scatter_(1, indices[:, None], 1)
# get quantized latent vectors
z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
if shape is not None:
z_q = z_q.view(shape)
return z_q
"""Torch implementation of Finite Scalar Quantization
From Appendix 1"""
def round_ste(z):
"""Round with straight through gradients."""
zhat = torch.round(z)
return z + (zhat - z).detach()
class FiniteScalarQuantizer(nn.Module):
def __init__(self, levels: T.List[int]):
super().__init__()
levels = torch.tensor(levels)
basis = torch.cat([torch.tensor([1]), torch.cumprod(levels[:-1], dim=0)]).to(
dtype=torch.int32
)
self.levels = levels
self.basis = basis
# number of dimensions expect from inputs
self.num_dimensions = len(levels)
# size of the codebook
self.codebook_size = torch.prod(levels)
self.implicit_codebook = self.indexes_to_codes(torch.arange(self.codebook_size))
print("Codebook size:", self.codebook_size)
@property
def codebook(self):
return self.implicit_codebook
def bound(self, z, eps=1e-3):
"""Bound z, an array of shape (..., d)."""
levels = self.levels.to(z.device)
half_l = (levels - 1) * (1 - eps) / 2
offset = torch.where(levels % 2 == 1, 0.0, 0.5)
shift = torch.tan(offset / half_l)
return torch.tanh(z + shift) * half_l - offset
def quantize(self, z):
"""Quanitzes z, returns quantized zhat as codewords, same shape as z."""
quantized = round_ste(self.bound(z))
half_width = self.levels // 2 # Renormalize to [-1, 1].
half_width = half_width.to(z.device)
return quantized / half_width
def _scale_and_shift(self, zhat_normalized):
levels = self.levels.to(zhat_normalized.device)
half_width = levels // 2
return (zhat_normalized * half_width) + half_width
def _scale_and_shift_inverse(self, zhat):
levels = self.levels.to(zhat.device)
half_width = levels // 2
return (zhat - half_width) / half_width
def codes_to_indexes(self, zhat):
# assert zhat.shape[-1] == len(self.levels)
basis = self.basis.to(zhat.device)
zhat = self._scale_and_shift(zhat)
return (zhat * basis).sum(axis=-1).to(dtype=torch.int32)
def indexes_to_codes(self, indices):
indices = indices.unsqueeze(-1)
basis = self.basis.to(indices.device)
levels = self.levels.to(indices.device)
codes_non_centered = torch.remainder(torch.floor_divide(indices, basis), levels)
return self._scale_and_shift_inverse(codes_non_centered)
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/_default_config.py | src/cheap/openfold_utils/_default_config.py | import re
import copy
import importlib
import ml_collections as mlc
def set_inf(c, inf):
for k, v in c.items():
if isinstance(v, mlc.ConfigDict):
set_inf(v, inf)
elif k == "inf":
c[k] = inf
def enforce_config_constraints(config):
def string_to_setting(s):
path = s.split(".")
setting = config
for p in path:
setting = setting.get(p)
return setting
mutually_exclusive_bools = [
("model.template.average_templates", "model.template.offload_templates"),
("globals.use_lma", "globals.use_flash", "globals.use_deepspeed_evo_attention"),
]
for options in mutually_exclusive_bools:
option_settings = [string_to_setting(o) for o in options]
if sum(option_settings) > 1:
raise ValueError(f"Only one of {', '.join(options)} may be set at a time")
fa_is_installed = importlib.util.find_spec("flash_attn") is not None
if config.globals.use_flash and not fa_is_installed:
raise ValueError("use_flash requires that FlashAttention is installed")
deepspeed_is_installed = importlib.util.find_spec("deepspeed") is not None
ds4s_is_installed = (
deepspeed_is_installed
and importlib.util.find_spec("deepspeed.ops.deepspeed4science") is not None
)
if config.globals.use_deepspeed_evo_attention and not ds4s_is_installed:
raise ValueError(
"use_deepspeed_evo_attention requires that DeepSpeed be installed "
"and that the deepspeed.ops.deepspeed4science package exists"
)
if config.globals.offload_inference and not config.model.template.average_templates:
config.model.template.offload_templates = True
def model_config(name, train=False, low_prec=False, long_sequence_inference=False):
c = copy.deepcopy(config)
# TRAINING PRESETS
if name == "initial_training":
# AF2 Suppl. Table 4, "initial training" setting
pass
elif name == "finetuning":
# AF2 Suppl. Table 4, "finetuning" setting
c.data.train.crop_size = 384
c.data.train.max_extra_msa = 5120
c.data.train.max_msa_clusters = 512
c.loss.violation.weight = 1.0
c.loss.experimentally_resolved.weight = 0.01
elif name == "finetuning_ptm":
c.data.train.max_extra_msa = 5120
c.data.train.crop_size = 384
c.data.train.max_msa_clusters = 512
c.loss.violation.weight = 1.0
c.loss.experimentally_resolved.weight = 0.01
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif name == "finetuning_no_templ":
# AF2 Suppl. Table 4, "finetuning" setting
c.data.train.crop_size = 384
c.data.train.max_extra_msa = 5120
c.data.train.max_msa_clusters = 512
c.model.template.enabled = False
c.loss.violation.weight = 1.0
c.loss.experimentally_resolved.weight = 0.01
elif name == "finetuning_no_templ_ptm":
# AF2 Suppl. Table 4, "finetuning" setting
c.data.train.crop_size = 384
c.data.train.max_extra_msa = 5120
c.data.train.max_msa_clusters = 512
c.model.template.enabled = False
c.loss.violation.weight = 1.0
c.loss.experimentally_resolved.weight = 0.01
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
# INFERENCE PRESETS
elif name == "model_1":
# AF2 Suppl. Table 5, Model 1.1.1
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.data.common.reduce_max_clusters_by_max_templates = True
c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True
elif name == "model_2":
# AF2 Suppl. Table 5, Model 1.1.2
c.data.common.reduce_max_clusters_by_max_templates = True
c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True
elif name == "model_3":
# AF2 Suppl. Table 5, Model 1.2.1
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.model.template.enabled = False
elif name == "model_4":
# AF2 Suppl. Table 5, Model 1.2.2
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.model.template.enabled = False
elif name == "model_5":
# AF2 Suppl. Table 5, Model 1.2.3
c.model.template.enabled = False
elif name == "model_1_ptm":
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.data.common.reduce_max_clusters_by_max_templates = True
c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif name == "model_2_ptm":
c.data.common.reduce_max_clusters_by_max_templates = True
c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif name == "model_3_ptm":
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.model.template.enabled = False
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif name == "model_4_ptm":
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.model.template.enabled = False
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif name == "model_5_ptm":
c.model.template.enabled = False
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif name.startswith("seq"): # SINGLE SEQUENCE EMBEDDING PRESETS
c.update(seq_mode_config.copy_and_resolve_references())
if name == "seqemb_initial_training":
c.data.train.max_msa_clusters = 1
c.data.eval.max_msa_clusters = 1
c.data.train.block_delete_msa = False
c.data.train.max_distillation_msa_clusters = 1
elif name == "seqemb_finetuning":
c.data.train.max_msa_clusters = 1
c.data.eval.max_msa_clusters = 1
c.data.train.block_delete_msa = False
c.data.train.max_distillation_msa_clusters = 1
c.data.train.crop_size = 384
c.loss.violation.weight = 1.0
c.loss.experimentally_resolved.weight = 0.01
elif name == "seq_model_esm1b":
c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True
c.data.predict.max_msa_clusters = 1
elif name == "seq_model_esm1b_ptm":
c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True
c.data.predict.max_msa_clusters = 1
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif "multimer" in name: # MULTIMER PRESETS
c.update(multimer_config_update.copy_and_resolve_references())
# Not used in multimer
del c.model.template.template_pointwise_attention
del c.loss.fape.backbone
# TODO: Change max_msa_clusters and max_extra_msa to multimer feats within model
if re.fullmatch("^model_[1-5]_multimer(_v2)?$", name):
# c.model.input_embedder.num_msa = 252
# c.model.extra_msa.extra_msa_embedder.num_extra_msa = 1152
c.data.train.crop_size = 384
c.data.train.max_msa_clusters = 252
c.data.eval.max_msa_clusters = 252
c.data.predict.max_msa_clusters = 252
c.data.train.max_extra_msa = 1152
c.data.eval.max_extra_msa = 1152
c.data.predict.max_extra_msa = 1152
c.model.evoformer_stack.fuse_projection_weights = False
c.model.extra_msa.extra_msa_stack.fuse_projection_weights = False
c.model.template.template_pair_stack.fuse_projection_weights = False
elif name == "model_4_multimer_v3":
# c.model.extra_msa.extra_msa_embedder.num_extra_msa = 1152
c.data.train.max_extra_msa = 1152
c.data.eval.max_extra_msa = 1152
c.data.predict.max_extra_msa = 1152
elif name == "model_5_multimer_v3":
# c.model.extra_msa.extra_msa_embedder.num_extra_msa = 1152
c.data.train.max_extra_msa = 1152
c.data.eval.max_extra_msa = 1152
c.data.predict.max_extra_msa = 1152
else:
raise ValueError("Invalid model name")
if long_sequence_inference:
assert not train
c.globals.offload_inference = True
# Default to DeepSpeed memory-efficient attention kernel unless use_lma is explicitly set
c.globals.use_deepspeed_evo_attention = True if not c.globals.use_lma else False
c.globals.use_flash = False
c.model.template.offload_inference = True
c.model.template.template_pair_stack.tune_chunk_size = False
c.model.extra_msa.extra_msa_stack.tune_chunk_size = False
c.model.evoformer_stack.tune_chunk_size = False
if train:
c.globals.blocks_per_ckpt = 1
c.globals.chunk_size = None
c.globals.use_lma = False
c.globals.offload_inference = False
c.model.template.average_templates = False
c.model.template.offload_templates = False
if low_prec:
c.globals.eps = 1e-4
# If we want exact numerical parity with the original, inf can't be
# a global constant
set_inf(c, 1e4)
enforce_config_constraints(c)
return c
c_z = mlc.FieldReference(128, field_type=int)
c_m = mlc.FieldReference(256, field_type=int)
c_t = mlc.FieldReference(64, field_type=int)
c_e = mlc.FieldReference(64, field_type=int)
c_s = mlc.FieldReference(384, field_type=int)
# For seqemb mode, dimension size of the per-residue sequence embedding passed to the model
# In current model, the dimension size is the ESM-1b dimension size i.e. 1280.
preemb_dim_size = mlc.FieldReference(1280, field_type=int)
blocks_per_ckpt = mlc.FieldReference(None, field_type=int)
chunk_size = mlc.FieldReference(4, field_type=int)
aux_distogram_bins = mlc.FieldReference(64, field_type=int)
tm_enabled = mlc.FieldReference(False, field_type=bool)
eps = mlc.FieldReference(1e-8, field_type=float)
templates_enabled = mlc.FieldReference(True, field_type=bool)
embed_template_torsion_angles = mlc.FieldReference(True, field_type=bool)
tune_chunk_size = mlc.FieldReference(True, field_type=bool)
NUM_RES = "num residues placeholder"
NUM_MSA_SEQ = "msa placeholder"
NUM_EXTRA_SEQ = "extra msa placeholder"
NUM_TEMPLATES = "num templates placeholder"
config = mlc.ConfigDict(
{
"data": {
"common": {
"feat": {
"aatype": [NUM_RES],
"all_atom_mask": [NUM_RES, None],
"all_atom_positions": [NUM_RES, None, None],
"alt_chi_angles": [NUM_RES, None],
"atom14_alt_gt_exists": [NUM_RES, None],
"atom14_alt_gt_positions": [NUM_RES, None, None],
"atom14_atom_exists": [NUM_RES, None],
"atom14_atom_is_ambiguous": [NUM_RES, None],
"atom14_gt_exists": [NUM_RES, None],
"atom14_gt_positions": [NUM_RES, None, None],
"atom37_atom_exists": [NUM_RES, None],
"backbone_rigid_mask": [NUM_RES],
"backbone_rigid_tensor": [NUM_RES, None, None],
"bert_mask": [NUM_MSA_SEQ, NUM_RES],
"chi_angles_sin_cos": [NUM_RES, None, None],
"chi_mask": [NUM_RES, None],
"extra_deletion_value": [NUM_EXTRA_SEQ, NUM_RES],
"extra_has_deletion": [NUM_EXTRA_SEQ, NUM_RES],
"extra_msa": [NUM_EXTRA_SEQ, NUM_RES],
"extra_msa_mask": [NUM_EXTRA_SEQ, NUM_RES],
"extra_msa_row_mask": [NUM_EXTRA_SEQ],
"is_distillation": [],
"msa_feat": [NUM_MSA_SEQ, NUM_RES, None],
"msa_mask": [NUM_MSA_SEQ, NUM_RES],
"msa_row_mask": [NUM_MSA_SEQ],
"no_recycling_iters": [],
"pseudo_beta": [NUM_RES, None],
"pseudo_beta_mask": [NUM_RES],
"residue_index": [NUM_RES],
"residx_atom14_to_atom37": [NUM_RES, None],
"residx_atom37_to_atom14": [NUM_RES, None],
"resolution": [],
"rigidgroups_alt_gt_frames": [NUM_RES, None, None, None],
"rigidgroups_group_exists": [NUM_RES, None],
"rigidgroups_group_is_ambiguous": [NUM_RES, None],
"rigidgroups_gt_exists": [NUM_RES, None],
"rigidgroups_gt_frames": [NUM_RES, None, None, None],
"seq_length": [],
"seq_mask": [NUM_RES],
"target_feat": [NUM_RES, None],
"template_aatype": [NUM_TEMPLATES, NUM_RES],
"template_all_atom_mask": [NUM_TEMPLATES, NUM_RES, None],
"template_all_atom_positions": [
NUM_TEMPLATES,
NUM_RES,
None,
None,
],
"template_alt_torsion_angles_sin_cos": [
NUM_TEMPLATES,
NUM_RES,
None,
None,
],
"template_backbone_rigid_mask": [NUM_TEMPLATES, NUM_RES],
"template_backbone_rigid_tensor": [
NUM_TEMPLATES,
NUM_RES,
None,
None,
],
"template_mask": [NUM_TEMPLATES],
"template_pseudo_beta": [NUM_TEMPLATES, NUM_RES, None],
"template_pseudo_beta_mask": [NUM_TEMPLATES, NUM_RES],
"template_sum_probs": [NUM_TEMPLATES, None],
"template_torsion_angles_mask": [
NUM_TEMPLATES,
NUM_RES,
None,
],
"template_torsion_angles_sin_cos": [
NUM_TEMPLATES,
NUM_RES,
None,
None,
],
"true_msa": [NUM_MSA_SEQ, NUM_RES],
"use_clamped_fape": [],
},
"block_delete_msa": {
"msa_fraction_per_block": 0.3,
"randomize_num_blocks": False,
"num_blocks": 5,
},
"masked_msa": {
"profile_prob": 0.1,
"same_prob": 0.1,
"uniform_prob": 0.1,
},
"max_recycling_iters": 3,
"msa_cluster_features": True,
"reduce_msa_clusters_by_max_templates": False,
"resample_msa_in_recycling": True,
"template_features": [
"template_all_atom_positions",
"template_sum_probs",
"template_aatype",
"template_all_atom_mask",
],
"unsupervised_features": [
"aatype",
"residue_index",
"msa",
"num_alignments",
"seq_length",
"between_segment_residues",
"deletion_matrix",
"no_recycling_iters",
],
"use_templates": templates_enabled,
"use_template_torsion_angles": embed_template_torsion_angles,
},
"seqemb_mode": { # Configuration for sequence embedding mode
"enabled": False, # If True, use seq emb instead of MSA
},
"supervised": {
"clamp_prob": 0.9,
"supervised_features": [
"all_atom_mask",
"all_atom_positions",
"resolution",
"use_clamped_fape",
"is_distillation",
],
},
"predict": {
"fixed_size": True,
"subsample_templates": False, # We want top templates.
"block_delete_msa": False,
"masked_msa_replace_fraction": 0.15,
"max_msa_clusters": 512,
"max_extra_msa": 1024,
"max_template_hits": 4,
"max_templates": 4,
"crop": False,
"crop_size": None,
"spatial_crop_prob": None,
"interface_threshold": None,
"supervised": False,
"uniform_recycling": False,
},
"eval": {
"fixed_size": True,
"subsample_templates": False, # We want top templates.
"block_delete_msa": False,
"masked_msa_replace_fraction": 0.15,
"max_msa_clusters": 128,
"max_extra_msa": 1024,
"max_template_hits": 4,
"max_templates": 4,
"crop": False,
"crop_size": None,
"spatial_crop_prob": None,
"interface_threshold": None,
"supervised": True,
"uniform_recycling": False,
},
"train": {
"fixed_size": True,
"subsample_templates": True,
"block_delete_msa": True,
"masked_msa_replace_fraction": 0.15,
"max_msa_clusters": 128,
"max_extra_msa": 1024,
"max_template_hits": 4,
"max_templates": 4,
"shuffle_top_k_prefiltered": 20,
"crop": True,
"crop_size": 256,
"spatial_crop_prob": 0.0,
"interface_threshold": None,
"supervised": True,
"clamp_prob": 0.9,
"max_distillation_msa_clusters": 1000,
"uniform_recycling": True,
"distillation_prob": 0.75,
},
"data_module": {
"use_small_bfd": False,
"data_loaders": {
"batch_size": 1,
"num_workers": 16,
"pin_memory": True,
},
},
},
# Recurring FieldReferences that can be changed globally here
"globals": {
"blocks_per_ckpt": blocks_per_ckpt,
"chunk_size": chunk_size,
# Use DeepSpeed memory-efficient attention kernel. Mutually
# exclusive with use_lma and use_flash.
"use_deepspeed_evo_attention": False,
# Use Staats & Rabe's low-memory attention algorithm. Mutually
# exclusive with use_deepspeed_evo_attention and use_flash.
"use_lma": False,
# Use FlashAttention in selected modules. Mutually exclusive with
# use_deepspeed_evo_attention and use_lma. Doesn't work that well
# on long sequences (>1000 residues).
"use_flash": False,
"offload_inference": False,
"c_z": c_z,
"c_m": c_m,
"c_t": c_t,
"c_e": c_e,
"c_s": c_s,
"eps": eps,
"is_multimer": False,
"seqemb_mode_enabled": False, # Global flag for enabling seq emb mode
},
"model": {
"_mask_trans": False,
"input_embedder": {
"tf_dim": 22,
"msa_dim": 49,
"c_z": c_z,
"c_m": c_m,
"relpos_k": 32,
},
"recycling_embedder": {
"c_z": c_z,
"c_m": c_m,
"min_bin": 3.25,
"max_bin": 20.75,
"no_bins": 15,
"inf": 1e8,
},
"template": {
"distogram": {
"min_bin": 3.25,
"max_bin": 50.75,
"no_bins": 39,
},
"template_single_embedder": {
# DISCREPANCY: c_in is supposed to be 51.
"c_in": 57,
"c_out": c_m,
},
"template_pair_embedder": {
"c_in": 88,
"c_out": c_t,
},
"template_pair_stack": {
"c_t": c_t,
# DISCREPANCY: c_hidden_tri_att here is given in the supplement
# as 64. In the code, it's 16.
"c_hidden_tri_att": 16,
"c_hidden_tri_mul": 64,
"no_blocks": 2,
"no_heads": 4,
"pair_transition_n": 2,
"dropout_rate": 0.25,
"tri_mul_first": False,
"fuse_projection_weights": False,
"blocks_per_ckpt": blocks_per_ckpt,
"tune_chunk_size": tune_chunk_size,
"inf": 1e9,
},
"template_pointwise_attention": {
"c_t": c_t,
"c_z": c_z,
# DISCREPANCY: c_hidden here is given in the supplement as 64.
# It's actually 16.
"c_hidden": 16,
"no_heads": 4,
"inf": 1e5, # 1e9,
},
"inf": 1e5, # 1e9,
"eps": eps, # 1e-6,
"enabled": templates_enabled,
"embed_angles": embed_template_torsion_angles,
"use_unit_vector": False,
# Approximate template computation, saving memory.
# In our experiments, results are equivalent to or better than
# the stock implementation. Should be enabled for all new
# training runs.
"average_templates": False,
# Offload template embeddings to CPU memory. Vastly reduced
# memory consumption at the cost of a modest increase in
# runtime. Useful for inference on very long sequences.
# Mutually exclusive with average_templates. Automatically
# enabled if offload_inference is set.
"offload_templates": False,
},
"extra_msa": {
"extra_msa_embedder": {
"c_in": 25,
"c_out": c_e,
},
"extra_msa_stack": {
"c_m": c_e,
"c_z": c_z,
"c_hidden_msa_att": 8,
"c_hidden_opm": 32,
"c_hidden_mul": 128,
"c_hidden_pair_att": 32,
"no_heads_msa": 8,
"no_heads_pair": 4,
"no_blocks": 4,
"transition_n": 4,
"msa_dropout": 0.15,
"pair_dropout": 0.25,
"opm_first": False,
"fuse_projection_weights": False,
"clear_cache_between_blocks": False,
"tune_chunk_size": tune_chunk_size,
"inf": 1e9,
"eps": eps, # 1e-10,
"ckpt": blocks_per_ckpt is not None,
},
"enabled": True,
},
"evoformer_stack": {
"c_m": c_m,
"c_z": c_z,
"c_hidden_msa_att": 32,
"c_hidden_opm": 32,
"c_hidden_mul": 128,
"c_hidden_pair_att": 32,
"c_s": c_s,
"no_heads_msa": 8,
"no_heads_pair": 4,
"no_blocks": 48,
"transition_n": 4,
"msa_dropout": 0.15,
"pair_dropout": 0.25,
"no_column_attention": False,
"opm_first": False,
"fuse_projection_weights": False,
"blocks_per_ckpt": blocks_per_ckpt,
"clear_cache_between_blocks": False,
"tune_chunk_size": tune_chunk_size,
"inf": 1e9,
"eps": eps, # 1e-10,
},
"structure_module": {
"c_s": c_s,
"c_z": c_z,
"c_ipa": 16,
"c_resnet": 128,
"no_heads_ipa": 12,
"no_qk_points": 4,
"no_v_points": 8,
"dropout_rate": 0.1,
"no_blocks": 8,
"no_transition_layers": 1,
"no_resnet_blocks": 2,
"no_angles": 7,
"trans_scale_factor": 10,
"epsilon": eps, # 1e-12,
"inf": 1e5,
},
"heads": {
"lddt": {
"no_bins": 50,
"c_in": c_s,
"c_hidden": 128,
},
"distogram": {
"c_z": c_z,
"no_bins": aux_distogram_bins,
},
"tm": {
"c_z": c_z,
"no_bins": aux_distogram_bins,
"enabled": tm_enabled,
},
"masked_msa": {
"c_m": c_m,
"c_out": 23,
},
"experimentally_resolved": {
"c_s": c_s,
"c_out": 37,
},
},
# A negative value indicates that no early stopping will occur, i.e.
# the model will always run `max_recycling_iters` number of recycling
# iterations. A positive value will enable early stopping if the
# difference in pairwise distances is less than the tolerance between
# recycling steps.
"recycle_early_stop_tolerance": -1.0,
},
"relax": {
"max_iterations": 0, # no max
"tolerance": 2.39,
"stiffness": 10.0,
"max_outer_iterations": 20,
"exclude_residues": [],
},
"loss": {
"distogram": {
"min_bin": 2.3125,
"max_bin": 21.6875,
"no_bins": 64,
"eps": eps, # 1e-6,
"weight": 0.3,
},
"experimentally_resolved": {
"eps": eps, # 1e-8,
"min_resolution": 0.1,
"max_resolution": 3.0,
"weight": 0.0,
},
"fape": {
"backbone": {
"clamp_distance": 10.0,
"loss_unit_distance": 10.0,
"weight": 0.5,
},
"sidechain": {
"clamp_distance": 10.0,
"length_scale": 10.0,
"weight": 0.5,
},
"eps": 1e-4,
"weight": 1.0,
},
"plddt_loss": {
"min_resolution": 0.1,
"max_resolution": 3.0,
"cutoff": 15.0,
"no_bins": 50,
"eps": eps, # 1e-10,
"weight": 0.01,
},
"masked_msa": {
"num_classes": 23,
"eps": eps, # 1e-8,
"weight": 2.0,
},
"supervised_chi": {
"chi_weight": 0.5,
"angle_norm_weight": 0.01,
"eps": eps, # 1e-6,
"weight": 1.0,
},
"violation": {
"violation_tolerance_factor": 12.0,
"clash_overlap_tolerance": 1.5,
"average_clashes": False,
"eps": eps, # 1e-6,
"weight": 0.0,
},
"tm": {
"max_bin": 31,
"no_bins": 64,
"min_resolution": 0.1,
"max_resolution": 3.0,
"eps": eps, # 1e-8,
"weight": 0.0,
"enabled": tm_enabled,
},
"chain_center_of_mass": {
"clamp_distance": -4.0,
"weight": 0.0,
"eps": eps,
"enabled": False,
},
"eps": eps,
},
"ema": {"decay": 0.999},
}
)
multimer_config_update = mlc.ConfigDict(
{
"globals": {"is_multimer": True},
"data": {
"common": {
"feat": {
"aatype": [NUM_RES],
"all_atom_mask": [NUM_RES, None],
"all_atom_positions": [NUM_RES, None, None],
# "all_chains_entity_ids": [], # TODO: Resolve missing features, remove processed msa feats
# "all_crops_all_chains_mask": [],
# "all_crops_all_chains_positions": [],
# "all_crops_all_chains_residue_ids": [],
"assembly_num_chains": [],
"asym_id": [NUM_RES],
"atom14_atom_exists": [NUM_RES, None],
"atom37_atom_exists": [NUM_RES, None],
"bert_mask": [NUM_MSA_SEQ, NUM_RES],
"cluster_bias_mask": [NUM_MSA_SEQ],
"cluster_profile": [NUM_MSA_SEQ, NUM_RES, None],
"cluster_deletion_mean": [NUM_MSA_SEQ, NUM_RES],
"deletion_matrix": [NUM_MSA_SEQ, NUM_RES],
"deletion_mean": [NUM_RES],
"entity_id": [NUM_RES],
"entity_mask": [NUM_RES],
"extra_deletion_matrix": [NUM_EXTRA_SEQ, NUM_RES],
"extra_msa": [NUM_EXTRA_SEQ, NUM_RES],
"extra_msa_mask": [NUM_EXTRA_SEQ, NUM_RES],
# "mem_peak": [],
"msa": [NUM_MSA_SEQ, NUM_RES],
"msa_feat": [NUM_MSA_SEQ, NUM_RES, None],
"msa_mask": [NUM_MSA_SEQ, NUM_RES],
"msa_profile": [NUM_RES, None],
"num_alignments": [],
"num_templates": [],
# "queue_size": [],
"residue_index": [NUM_RES],
"residx_atom14_to_atom37": [NUM_RES, None],
"residx_atom37_to_atom14": [NUM_RES, None],
"resolution": [],
"seq_length": [],
"seq_mask": [NUM_RES],
"sym_id": [NUM_RES],
"target_feat": [NUM_RES, None],
"template_aatype": [NUM_TEMPLATES, NUM_RES],
"template_all_atom_mask": [NUM_TEMPLATES, NUM_RES, None],
"template_all_atom_positions": [
NUM_TEMPLATES,
NUM_RES,
None,
None,
],
"true_msa": [NUM_MSA_SEQ, NUM_RES],
},
"max_recycling_iters": 20, # For training, value is 3
"unsupervised_features": [
"aatype",
"residue_index",
"msa",
"num_alignments",
"seq_length",
"between_segment_residues",
"deletion_matrix",
"no_recycling_iters",
# Additional multimer features
"msa_mask",
"seq_mask",
"asym_id",
"entity_id",
"sym_id",
],
},
"supervised": {"clamp_prob": 1.0},
# TODO: Change max_msa_clusters and max_extra_msa to multimer feats within model:
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | true |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/_rigids.py | src/cheap/openfold_utils/_rigids.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from functools import lru_cache
from typing import Tuple, Any, Sequence, Callable, Optional
import numpy as np
import torch
def rot_matmul(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Performs matrix multiplication of two rotation matrix tensors. Written
out by hand to avoid AMP downcasting.
Args:
a: [*, 3, 3] left multiplicand
b: [*, 3, 3] right multiplicand
Returns:
The product ab
"""
def row_mul(i):
return torch.stack(
[
a[..., i, 0] * b[..., 0, 0]
+ a[..., i, 1] * b[..., 1, 0]
+ a[..., i, 2] * b[..., 2, 0],
a[..., i, 0] * b[..., 0, 1]
+ a[..., i, 1] * b[..., 1, 1]
+ a[..., i, 2] * b[..., 2, 1],
a[..., i, 0] * b[..., 0, 2]
+ a[..., i, 1] * b[..., 1, 2]
+ a[..., i, 2] * b[..., 2, 2],
],
dim=-1,
)
return torch.stack(
[
row_mul(0),
row_mul(1),
row_mul(2),
],
dim=-2,
)
def rot_vec_mul(r: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
"""
Applies a rotation to a vector. Written out by hand to avoid transfer
to avoid AMP downcasting.
Args:
r: [*, 3, 3] rotation matrices
t: [*, 3] coordinate tensors
Returns:
[*, 3] rotated coordinates
"""
x, y, z = torch.unbind(t, dim=-1)
return torch.stack(
[
r[..., 0, 0] * x + r[..., 0, 1] * y + r[..., 0, 2] * z,
r[..., 1, 0] * x + r[..., 1, 1] * y + r[..., 1, 2] * z,
r[..., 2, 0] * x + r[..., 2, 1] * y + r[..., 2, 2] * z,
],
dim=-1,
)
@lru_cache(maxsize=None)
def identity_rot_mats(
batch_dims: Tuple[int],
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = True,
) -> torch.Tensor:
rots = torch.eye(3, dtype=dtype, device=device, requires_grad=requires_grad)
rots = rots.view(*((1,) * len(batch_dims)), 3, 3)
rots = rots.expand(*batch_dims, -1, -1)
rots = rots.contiguous()
return rots
@lru_cache(maxsize=None)
def identity_trans(
batch_dims: Tuple[int],
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = True,
) -> torch.Tensor:
trans = torch.zeros(
(*batch_dims, 3), dtype=dtype, device=device, requires_grad=requires_grad
)
return trans
@lru_cache(maxsize=None)
def identity_quats(
batch_dims: Tuple[int],
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = True,
) -> torch.Tensor:
quat = torch.zeros(
(*batch_dims, 4), dtype=dtype, device=device, requires_grad=requires_grad
)
with torch.no_grad():
quat[..., 0] = 1
return quat
_quat_elements = ["a", "b", "c", "d"]
_qtr_keys = [l1 + l2 for l1 in _quat_elements for l2 in _quat_elements]
_qtr_ind_dict = {key: ind for ind, key in enumerate(_qtr_keys)}
def _to_mat(pairs):
mat = np.zeros((4, 4))
for pair in pairs:
key, value = pair
ind = _qtr_ind_dict[key]
mat[ind // 4][ind % 4] = value
return mat
_QTR_MAT = np.zeros((4, 4, 3, 3))
_QTR_MAT[..., 0, 0] = _to_mat([("aa", 1), ("bb", 1), ("cc", -1), ("dd", -1)])
_QTR_MAT[..., 0, 1] = _to_mat([("bc", 2), ("ad", -2)])
_QTR_MAT[..., 0, 2] = _to_mat([("bd", 2), ("ac", 2)])
_QTR_MAT[..., 1, 0] = _to_mat([("bc", 2), ("ad", 2)])
_QTR_MAT[..., 1, 1] = _to_mat([("aa", 1), ("bb", -1), ("cc", 1), ("dd", -1)])
_QTR_MAT[..., 1, 2] = _to_mat([("cd", 2), ("ab", -2)])
_QTR_MAT[..., 2, 0] = _to_mat([("bd", 2), ("ac", -2)])
_QTR_MAT[..., 2, 1] = _to_mat([("cd", 2), ("ab", 2)])
_QTR_MAT[..., 2, 2] = _to_mat([("aa", 1), ("bb", -1), ("cc", -1), ("dd", 1)])
def quat_to_rot(quat: torch.Tensor) -> torch.Tensor:
"""
Converts a quaternion to a rotation matrix.
Args:
quat: [*, 4] quaternions
Returns:
[*, 3, 3] rotation matrices
"""
# [*, 4, 4]
quat = quat[..., None] * quat[..., None, :]
# [4, 4, 3, 3]
mat = _get_quat("_QTR_MAT", dtype=quat.dtype, device=quat.device)
# [*, 4, 4, 3, 3]
shaped_qtr_mat = mat.view((1,) * len(quat.shape[:-2]) + mat.shape)
quat = quat[..., None, None] * shaped_qtr_mat
# [*, 3, 3]
return torch.sum(quat, dim=(-3, -4))
def rot_to_quat(
rot: torch.Tensor,
):
if rot.shape[-2:] != (3, 3):
raise ValueError("Input rotation is incorrectly shaped")
rot = [[rot[..., i, j] for j in range(3)] for i in range(3)]
[[xx, xy, xz], [yx, yy, yz], [zx, zy, zz]] = rot
k = [
[
xx + yy + zz,
zy - yz,
xz - zx,
yx - xy,
],
[
zy - yz,
xx - yy - zz,
xy + yx,
xz + zx,
],
[
xz - zx,
xy + yx,
yy - xx - zz,
yz + zy,
],
[
yx - xy,
xz + zx,
yz + zy,
zz - xx - yy,
],
]
k = (1.0 / 3.0) * torch.stack([torch.stack(t, dim=-1) for t in k], dim=-2)
_, vectors = torch.linalg.eigh(k)
return vectors[..., -1]
_QUAT_MULTIPLY = np.zeros((4, 4, 4))
_QUAT_MULTIPLY[:, :, 0] = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]
_QUAT_MULTIPLY[:, :, 1] = [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0]]
_QUAT_MULTIPLY[:, :, 2] = [[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, 1, 0, 0]]
_QUAT_MULTIPLY[:, :, 3] = [[0, 0, 0, 1], [0, 0, 1, 0], [0, -1, 0, 0], [1, 0, 0, 0]]
_QUAT_MULTIPLY_BY_VEC = _QUAT_MULTIPLY[:, 1:, :]
_CACHED_QUATS = {
"_QTR_MAT": _QTR_MAT,
"_QUAT_MULTIPLY": _QUAT_MULTIPLY,
"_QUAT_MULTIPLY_BY_VEC": _QUAT_MULTIPLY_BY_VEC,
}
@lru_cache(maxsize=None)
def _get_quat(quat_key, dtype, device):
return torch.tensor(_CACHED_QUATS[quat_key], dtype=dtype, device=device)
def quat_multiply(quat1, quat2):
"""Multiply a quaternion by another quaternion."""
mat = _get_quat("_QUAT_MULTIPLY", dtype=quat1.dtype, device=quat1.device)
reshaped_mat = mat.view((1,) * len(quat1.shape[:-1]) + mat.shape)
return torch.sum(
reshaped_mat * quat1[..., :, None, None] * quat2[..., None, :, None],
dim=(-3, -2),
)
def quat_multiply_by_vec(quat, vec):
"""Multiply a quaternion by a pure-vector quaternion."""
mat = _get_quat("_QUAT_MULTIPLY_BY_VEC", dtype=quat.dtype, device=quat.device)
reshaped_mat = mat.view((1,) * len(quat.shape[:-1]) + mat.shape)
return torch.sum(
reshaped_mat * quat[..., :, None, None] * vec[..., None, :, None], dim=(-3, -2)
)
def invert_rot_mat(rot_mat: torch.Tensor):
return rot_mat.transpose(-1, -2)
def invert_quat(quat: torch.Tensor):
quat_prime = quat.clone()
quat_prime[..., 1:] *= -1
inv = quat_prime / torch.sum(quat**2, dim=-1, keepdim=True)
return inv
class Rotation:
"""
A 3D rotation. Depending on how the object is initialized, the
rotation is represented by either a rotation matrix or a
quaternion, though both formats are made available by helper functions.
To simplify gradient computation, the underlying format of the
rotation cannot be changed in-place. Like Rigid, the class is designed
to mimic the behavior of a torch Tensor, almost as if each Rotation
object were a tensor of rotations, in one format or another.
"""
def __init__(
self,
rot_mats: Optional[torch.Tensor] = None,
quats: Optional[torch.Tensor] = None,
normalize_quats: bool = True,
):
"""
Args:
rot_mats:
A [*, 3, 3] rotation matrix tensor. Mutually exclusive with
quats
quats:
A [*, 4] quaternion. Mutually exclusive with rot_mats. If
normalize_quats is not True, must be a unit quaternion
normalize_quats:
If quats is specified, whether to normalize quats
"""
if (rot_mats is None and quats is None) or (
rot_mats is not None and quats is not None
):
raise ValueError("Exactly one input argument must be specified")
if (rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or (
quats is not None and quats.shape[-1] != 4
):
raise ValueError("Incorrectly shaped rotation matrix or quaternion")
# Force full-precision
if quats is not None:
quats = quats.to(dtype=torch.float32)
if rot_mats is not None:
rot_mats = rot_mats.to(dtype=torch.float32)
if quats is not None and normalize_quats:
quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)
self._rot_mats = rot_mats
self._quats = quats
@staticmethod
def identity(
shape,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = True,
fmt: str = "quat",
) -> Rotation:
"""
Returns an identity Rotation.
Args:
shape:
The "shape" of the resulting Rotation object. See documentation
for the shape property
dtype:
The torch dtype for the rotation
device:
The torch device for the new rotation
requires_grad:
Whether the underlying tensors in the new rotation object
should require gradient computation
fmt:
One of "quat" or "rot_mat". Determines the underlying format
of the new object's rotation
Returns:
A new identity rotation
"""
if fmt == "rot_mat":
rot_mats = identity_rot_mats(
shape,
dtype,
device,
requires_grad,
)
return Rotation(rot_mats=rot_mats, quats=None)
elif fmt == "quat":
quats = identity_quats(shape, dtype, device, requires_grad)
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError(f"Invalid format: f{fmt}")
# Magic methods
def __getitem__(self, index: Any) -> Rotation:
"""
Allows torch-style indexing over the virtual shape of the rotation
object. See documentation for the shape property.
Args:
index:
A torch index. E.g. (1, 3, 2), or (slice(None,))
Returns:
The indexed rotation
"""
if type(index) != tuple:
index = (index,)
if self._rot_mats is not None:
rot_mats = self._rot_mats[index + (slice(None), slice(None))]
return Rotation(rot_mats=rot_mats)
elif self._quats is not None:
quats = self._quats[index + (slice(None),)]
return Rotation(quats=quats, normalize_quats=False)
else:
raise ValueError("Both rotations are None")
def __mul__(
self,
right: torch.Tensor,
) -> Rotation:
"""
Pointwise left multiplication of the rotation with a tensor. Can be
used to e.g. mask the Rotation.
Args:
right:
The tensor multiplicand
Returns:
The product
"""
if not (isinstance(right, torch.Tensor)):
raise TypeError("The other multiplicand must be a Tensor")
if self._rot_mats is not None:
rot_mats = self._rot_mats * right[..., None, None]
return Rotation(rot_mats=rot_mats, quats=None)
elif self._quats is not None:
quats = self._quats * right[..., None]
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError("Both rotations are None")
def __rmul__(
self,
left: torch.Tensor,
) -> Rotation:
"""
Reverse pointwise multiplication of the rotation with a tensor.
Args:
left:
The left multiplicand
Returns:
The product
"""
return self.__mul__(left)
# Properties
@property
def shape(self) -> torch.Size:
"""
Returns the virtual shape of the rotation object. This shape is
defined as the batch dimensions of the underlying rotation matrix
or quaternion. If the Rotation was initialized with a [10, 3, 3]
rotation matrix tensor, for example, the resulting shape would be
[10].
Returns:
The virtual shape of the rotation object
"""
s = None
if self._quats is not None:
s = self._quats.shape[:-1]
else:
s = self._rot_mats.shape[:-2]
return s
@property
def dtype(self) -> torch.dtype:
"""
Returns the dtype of the underlying rotation.
Returns:
The dtype of the underlying rotation
"""
if self._rot_mats is not None:
return self._rot_mats.dtype
elif self._quats is not None:
return self._quats.dtype
else:
raise ValueError("Both rotations are None")
@property
def device(self) -> torch.device:
"""
The device of the underlying rotation
Returns:
The device of the underlying rotation
"""
if self._rot_mats is not None:
return self._rot_mats.device
elif self._quats is not None:
return self._quats.device
else:
raise ValueError("Both rotations are None")
@property
def requires_grad(self) -> bool:
"""
Returns the requires_grad property of the underlying rotation
Returns:
The requires_grad property of the underlying tensor
"""
if self._rot_mats is not None:
return self._rot_mats.requires_grad
elif self._quats is not None:
return self._quats.requires_grad
else:
raise ValueError("Both rotations are None")
def get_rot_mats(self) -> torch.Tensor:
"""
Returns the underlying rotation as a rotation matrix tensor.
Returns:
The rotation as a rotation matrix tensor
"""
rot_mats = self._rot_mats
if rot_mats is None:
if self._quats is None:
raise ValueError("Both rotations are None")
else:
rot_mats = quat_to_rot(self._quats)
return rot_mats
def get_quats(self) -> torch.Tensor:
"""
Returns the underlying rotation as a quaternion tensor.
Depending on whether the Rotation was initialized with a
quaternion, this function may call torch.linalg.eigh.
Returns:
The rotation as a quaternion tensor.
"""
quats = self._quats
if quats is None:
if self._rot_mats is None:
raise ValueError("Both rotations are None")
else:
quats = rot_to_quat(self._rot_mats)
return quats
def get_cur_rot(self) -> torch.Tensor:
"""
Return the underlying rotation in its current form
Returns:
The stored rotation
"""
if self._rot_mats is not None:
return self._rot_mats
elif self._quats is not None:
return self._quats
else:
raise ValueError("Both rotations are None")
# Rotation functions
def compose_q_update_vec(
self, q_update_vec: torch.Tensor, normalize_quats: bool = True
) -> Rotation:
"""
Returns a new quaternion Rotation after updating the current
object's underlying rotation with a quaternion update, formatted
as a [*, 3] tensor whose final three columns represent x, y, z such
that (1, x, y, z) is the desired (not necessarily unit) quaternion
update.
Args:
q_update_vec:
A [*, 3] quaternion update tensor
normalize_quats:
Whether to normalize the output quaternion
Returns:
An updated Rotation
"""
quats = self.get_quats()
new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)
return Rotation(
rot_mats=None,
quats=new_quats,
normalize_quats=normalize_quats,
)
def compose_r(self, r: Rotation) -> Rotation:
"""
Compose the rotation matrices of the current Rotation object with
those of another.
Args:
r:
An update rotation object
Returns:
An updated rotation object
"""
r1 = self.get_rot_mats()
r2 = r.get_rot_mats()
new_rot_mats = rot_matmul(r1, r2)
return Rotation(rot_mats=new_rot_mats, quats=None)
def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:
"""
Compose the quaternions of the current Rotation object with those
of another.
Depending on whether either Rotation was initialized with
quaternions, this function may call torch.linalg.eigh.
Args:
r:
An update rotation object
Returns:
An updated rotation object
"""
q1 = self.get_quats()
q2 = r.get_quats()
new_quats = quat_multiply(q1, q2)
return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats)
def apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
Apply the current Rotation as a rotation matrix to a set of 3D
coordinates.
Args:
pts:
A [*, 3] set of points
Returns:
[*, 3] rotated points
"""
rot_mats = self.get_rot_mats()
return rot_vec_mul(rot_mats, pts)
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
The inverse of the apply() method.
Args:
pts:
A [*, 3] set of points
Returns:
[*, 3] inverse-rotated points
"""
rot_mats = self.get_rot_mats()
inv_rot_mats = invert_rot_mat(rot_mats)
return rot_vec_mul(inv_rot_mats, pts)
def invert(self) -> Rotation:
"""
Returns the inverse of the current Rotation.
Returns:
The inverse of the current Rotation
"""
if self._rot_mats is not None:
return Rotation(rot_mats=invert_rot_mat(self._rot_mats), quats=None)
elif self._quats is not None:
return Rotation(
rot_mats=None,
quats=invert_quat(self._quats),
normalize_quats=False,
)
else:
raise ValueError("Both rotations are None")
# "Tensor" stuff
def unsqueeze(
self,
dim: int,
) -> Rigid:
"""
Analogous to torch.unsqueeze. The dimension is relative to the
shape of the Rotation object.
Args:
dim: A positive or negative dimension index.
Returns:
The unsqueezed Rotation.
"""
if dim >= len(self.shape):
raise ValueError("Invalid dimension")
if self._rot_mats is not None:
rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)
return Rotation(rot_mats=rot_mats, quats=None)
elif self._quats is not None:
quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError("Both rotations are None")
@staticmethod
def cat(
rs: Sequence[Rotation],
dim: int,
) -> Rigid:
"""
Concatenates rotations along one of the batch dimensions. Analogous
to torch.cat().
Note that the output of this operation is always a rotation matrix,
regardless of the format of input rotations.
Args:
rs:
A list of rotation objects
dim:
The dimension along which the rotations should be
concatenated
Returns:
A concatenated Rotation object in rotation matrix format
"""
rot_mats = [r.get_rot_mats() for r in rs]
rot_mats = torch.cat(rot_mats, dim=dim if dim >= 0 else dim - 2)
return Rotation(rot_mats=rot_mats, quats=None)
def map_tensor_fn(self, fn) -> Rotation:
"""
Apply a Tensor -> Tensor function to underlying rotation tensors,
mapping over the rotation dimension(s). Can be used e.g. to sum out
a one-hot batch dimension.
Args:
fn:
A Tensor -> Tensor function to be mapped over the Rotation
Returns:
The transformed Rotation object
"""
if self._rot_mats is not None:
rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))
rot_mats = torch.stack(
list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1
)
rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))
return Rotation(rot_mats=rot_mats, quats=None)
elif self._quats is not None:
quats = torch.stack(
list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1
)
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError("Both rotations are None")
# def cuda(self) -> Rotation:
# """
# Analogous to the cuda() method of torch Tensors
# Returns:
# A copy of the Rotation in CUDA memory
# """
# if self._rot_mats is not None:
# return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)
# elif self._quats is not None:
# return Rotation(rot_mats=None, quats=self._quats.cuda(), normalize_quats=False)
# else:
# raise ValueError("Both rotations are None")
def to(
self, device: Optional[torch.device], dtype: Optional[torch.dtype]
) -> Rotation:
"""
Analogous to the to() method of torch Tensors
Args:
device:
A torch device
dtype:
A torch dtype
Returns:
A copy of the Rotation using the new device and dtype
"""
if self._rot_mats is not None:
return Rotation(
rot_mats=self._rot_mats.to(device=device, dtype=dtype),
quats=None,
)
elif self._quats is not None:
return Rotation(
rot_mats=None,
quats=self._quats.to(device=device, dtype=dtype),
normalize_quats=False,
)
else:
raise ValueError("Both rotations are None")
def detach(self) -> Rotation:
"""
Returns a copy of the Rotation whose underlying Tensor has been
detached from its torch graph.
Returns:
A copy of the Rotation whose underlying Tensor has been detached
from its torch graph
"""
if self._rot_mats is not None:
return Rotation(rot_mats=self._rot_mats.detach(), quats=None)
elif self._quats is not None:
return Rotation(
rot_mats=None,
quats=self._quats.detach(),
normalize_quats=False,
)
else:
raise ValueError("Both rotations are None")
class Rigid:
"""
A class representing a rigid transformation. Little more than a wrapper
around two objects: a Rotation object and a [*, 3] translation
Designed to behave approximately like a single torch tensor with the
shape of the shared batch dimensions of its component parts.
"""
def __init__(
self,
rots: Optional[Rotation],
trans: Optional[torch.Tensor],
):
"""
Args:
rots: A [*, 3, 3] rotation tensor
trans: A corresponding [*, 3] translation tensor
"""
# (we need device, dtype, etc. from at least one input)
batch_dims, dtype, device, requires_grad = None, None, None, None
if trans is not None:
batch_dims = trans.shape[:-1]
dtype = trans.dtype
device = trans.device
requires_grad = trans.requires_grad
elif rots is not None:
batch_dims = rots.shape
dtype = rots.dtype
device = rots.device
requires_grad = rots.requires_grad
else:
raise ValueError("At least one input argument must be specified")
if rots is None:
rots = Rotation.identity(
batch_dims,
dtype,
device,
requires_grad,
)
elif trans is None:
trans = identity_trans(
batch_dims,
dtype,
device,
requires_grad,
)
if (rots.shape != trans.shape[:-1]) or (rots.device != trans.device):
raise ValueError("Rots and trans incompatible")
# Force full precision. Happens to the rotations automatically.
trans = trans.to(dtype=torch.float32)
self._rots = rots
self._trans = trans
@staticmethod
def identity(
shape: Tuple[int],
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = True,
fmt: str = "quat",
) -> Rigid:
"""
Constructs an identity transformation.
Args:
shape:
The desired shape
dtype:
The dtype of both internal tensors
device:
The device of both internal tensors
requires_grad:
Whether grad should be enabled for the internal tensors
Returns:
The identity transformation
"""
return Rigid(
Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),
identity_trans(shape, dtype, device, requires_grad),
)
def __getitem__(
self,
index: Any,
) -> Rigid:
"""
Indexes the affine transformation with PyTorch-style indices.
The index is applied to the shared dimensions of both the rotation
and the translation.
E.g.::
r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)
t = Rigid(r, torch.rand(10, 10, 3))
indexed = t[3, 4:6]
assert(indexed.shape == (2,))
assert(indexed.get_rots().shape == (2,))
assert(indexed.get_trans().shape == (2, 3))
Args:
index: A standard torch tensor index. E.g. 8, (10, None, 3),
or (3, slice(0, 1, None))
Returns:
The indexed tensor
"""
if type(index) != tuple:
index = (index,)
return Rigid(
self._rots[index],
self._trans[index + (slice(None),)],
)
def __mul__(
self,
right: torch.Tensor,
) -> Rigid:
"""
Pointwise left multiplication of the transformation with a tensor.
Can be used to e.g. mask the Rigid.
Args:
right:
The tensor multiplicand
Returns:
The product
"""
if not (isinstance(right, torch.Tensor)):
raise TypeError("The other multiplicand must be a Tensor")
new_rots = self._rots * right
new_trans = self._trans * right[..., None]
return Rigid(new_rots, new_trans)
def __rmul__(
self,
left: torch.Tensor,
) -> Rigid:
"""
Reverse pointwise multiplication of the transformation with a
tensor.
Args:
left:
The left multiplicand
Returns:
The product
"""
return self.__mul__(left)
@property
def shape(self) -> torch.Size:
"""
Returns the shape of the shared dimensions of the rotation and
the translation.
Returns:
The shape of the transformation
"""
s = self._trans.shape[:-1]
return s
@property
def device(self) -> torch.device:
"""
Returns the device on which the Rigid's tensors are located.
Returns:
The device on which the Rigid's tensors are located
"""
return self._trans.device
def get_rots(self) -> Rotation:
"""
Getter for the rotation.
Returns:
The rotation object
"""
return self._rots
def get_trans(self) -> torch.Tensor:
"""
Getter for the translation.
Returns:
The stored translation
"""
return self._trans
def compose_q_update_vec(
self,
q_update_vec: torch.Tensor,
) -> Rigid:
"""
Composes the transformation with a quaternion update vector of
shape [*, 6], where the final 6 columns represent the x, y, and
z values of a quaternion of form (1, x, y, z) followed by a 3D
translation.
Args:
q_vec: The quaternion update vector.
Returns:
The composed transformation.
"""
q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]
new_rots = self._rots.compose_q_update_vec(q_vec)
trans_update = self._rots.apply(t_vec)
new_translation = self._trans + trans_update
return Rigid(new_rots, new_translation)
def compose(
self,
r: Rigid,
) -> Rigid:
"""
Composes the current rigid object with another.
Args:
r:
Another Rigid object
Returns:
The composition of the two transformations
"""
new_rot = self._rots.compose_r(r._rots)
new_trans = self._rots.apply(r._trans) + self._trans
return Rigid(new_rot, new_trans)
def apply(
self,
pts: torch.Tensor,
) -> torch.Tensor:
"""
Applies the transformation to a coordinate tensor.
Args:
pts: A [*, 3] coordinate tensor.
Returns:
The transformed points.
"""
rotated = self._rots.apply(pts)
return rotated + self._trans
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
Applies the inverse of the transformation to a coordinate tensor.
Args:
pts: A [*, 3] coordinate tensor
Returns:
The transformed points.
"""
pts = pts - self._trans
return self._rots.invert_apply(pts)
def invert(self) -> Rigid:
"""
Inverts the transformation.
Returns:
The inverse transformation.
"""
rot_inv = self._rots.invert()
trn_inv = rot_inv.apply(self._trans)
return Rigid(rot_inv, -1 * trn_inv)
def map_tensor_fn(self, fn) -> Rigid:
"""
Apply a Tensor -> Tensor function to underlying translation and
rotation tensors, mapping over the translation/rotation dimensions
respectively.
Args:
fn:
A Tensor -> Tensor function to be mapped over the Rigid
Returns:
The transformed Rigid object
"""
new_rots = self._rots.map_tensor_fn(fn)
new_trans = torch.stack(
list(map(fn, torch.unbind(self._trans, dim=-1))), dim=-1
)
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | true |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/_protein.py | src/cheap/openfold_utils/_protein.py | import dataclasses
import numpy as np
from typing import Optional, Sequence
import io
from Bio.PDB import PDBParser
import warnings
import string
from transformers.models.esm.openfold_utils import residue_constants
@dataclasses.dataclass(frozen=True)
class Protein:
"""Protein structure representation."""
# Cartesian coordinates of atoms in angstroms. The atom types correspond to
# residue_constants.atom_types, i.e. the first three are N, CA, CB.
atom_positions: np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
aatype: np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
atom_mask: np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
residue_index: np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
b_factors: np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
chain_index: Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
remark: Optional[str] = None
# Templates used to generate this protein (prediction-only)
parents: Optional[Sequence[str]] = None
# Chain corresponding to each parent
parents_chain_index: Optional[Sequence[int]] = None
def from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein:
"""Takes a PDB string and constructs a Protein object.
WARNING: All non-standard residue types will be converted into UNK. All
non-standard atoms will be ignored.
Args:
pdb_str: The contents of the pdb file
chain_id: If None, then the whole pdb file is parsed. If chain_id is specified (e.g. A), then only that chain
is parsed.
Returns:
A new `Protein` parsed from the pdb contents.
"""
pdb_fh = io.StringIO(pdb_str)
parser = PDBParser(QUIET=True)
structure = parser.get_structure("none", pdb_fh)
models = list(structure.get_models())
if len(models) != 1:
raise ValueError(
f"Only single model PDBs are supported. Found {len(models)} models."
)
model = models[0]
atom_positions = []
aatype = []
atom_mask = []
residue_index = []
chain_ids = []
b_factors = []
for chain in model:
if chain_id is not None and chain.id != chain_id:
continue
for res in chain:
# if res.id[2] != " ":
# warnings.warn(
# f"PDB contains an insertion code at chain {chain.id} and residue "
# f"index {res.id[1]}."
# )
# raise ValueError(
# f"PDB contains an insertion code at chain {chain.id} and residue "
# f"index {res.id[1]}. These are not supported."
# )
res_shortname = residue_constants.restype_3to1.get(res.resname, "X")
restype_idx = residue_constants.restype_order.get(
res_shortname, residue_constants.restype_num
)
pos = np.zeros((residue_constants.atom_type_num, 3))
mask = np.zeros((residue_constants.atom_type_num,))
res_b_factors = np.zeros((residue_constants.atom_type_num,))
for atom in res:
if atom.name not in residue_constants.atom_types:
continue
pos[residue_constants.atom_order[atom.name]] = atom.coord
mask[residue_constants.atom_order[atom.name]] = 1.0
res_b_factors[residue_constants.atom_order[atom.name]] = atom.bfactor
if np.sum(mask) < 0.5:
# If no known atom positions are reported for the residue then skip it.
continue
aatype.append(restype_idx)
atom_positions.append(pos)
atom_mask.append(mask)
residue_index.append(res.id[1])
chain_ids.append(chain.id)
b_factors.append(res_b_factors)
parents = None
parents_chain_index = None
if "PARENT" in pdb_str:
parents = []
parents_chain_index = []
chain_id = 0
for l in pdb_str.split("\n"):
if "PARENT" in l:
if not "N/A" in l:
parent_names = l.split()[1:]
parents.extend(parent_names)
parents_chain_index.extend([chain_id for _ in parent_names])
chain_id += 1
chain_id_mapping = {
cid: n
for n, cid in enumerate(
string.ascii_uppercase
+ string.ascii_lowercase
+ string.digits
+ string.whitespace
)
}
chain_index = np.array([chain_id_mapping[cid] for cid in chain_ids])
return Protein(
atom_positions=np.array(atom_positions),
atom_mask=np.array(atom_mask),
aatype=np.array(aatype),
residue_index=np.array(residue_index),
chain_index=chain_index,
b_factors=np.array(b_factors),
parents=parents,
parents_chain_index=parents_chain_index,
)
def get_pdb_headers(prot: Protein, chain_id: int = 0) -> Sequence[str]:
pdb_headers = []
remark = prot.remark
if(remark is not None):
pdb_headers.append(f"REMARK {remark}")
parents = prot.parents
parents_chain_index = prot.parents_chain_index
if(parents_chain_index is not None):
parents = [
p for i, p in zip(parents_chain_index, parents) if i == chain_id
]
if(parents is None or len(parents) == 0):
parents = ["N/A"]
pdb_headers.append(f"PARENT {' '.join(parents)}")
return pdb_headers
def to_pdb(prot: Protein) -> str:
"""Converts a `Protein` instance to a PDB string.
Args:
prot: The protein to convert to PDB.
Returns:
PDB string.
"""
restypes = residue_constants.restypes + ["X"]
res_1to3 = lambda r: residue_constants.restype_1to3.get(restypes[r], "UNK")
atom_types = residue_constants.atom_types
pdb_lines = []
atom_mask = prot.atom_mask
aatype = prot.aatype
atom_positions = prot.atom_positions
residue_index = prot.residue_index.astype(np.int32)
b_factors = prot.b_factors
chain_index = prot.chain_index
if np.any(aatype > residue_constants.restype_num):
raise ValueError("Invalid aatypes.")
headers = get_pdb_headers(prot)
if(len(headers) > 0):
pdb_lines.extend(headers)
n = aatype.shape[0]
atom_index = 1
prev_chain_index = 0
chain_tags = string.ascii_uppercase
# Add all atom sites.
for i in range(n):
res_name_3 = res_1to3(aatype[i])
for atom_name, pos, mask, b_factor in zip(
atom_types, atom_positions[i], atom_mask[i], b_factors[i]
):
if mask < 0.5:
continue
record_type = "ATOM"
name = atom_name if len(atom_name) == 4 else f" {atom_name}"
alt_loc = ""
insertion_code = ""
occupancy = 1.00
element = atom_name[
0
] # Protein supports only C, N, O, S, this works.
charge = ""
chain_tag = "A"
if(chain_index is not None):
chain_tag = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
atom_line = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_3:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(atom_line)
atom_index += 1
should_terminate = (i == n - 1)
if(chain_index is not None):
if(i != n - 1 and chain_index[i + 1] != prev_chain_index):
should_terminate = True
prev_chain_index = chain_index[i + 1]
if(should_terminate):
# Close the chain.
chain_end = "TER"
chain_termination_line = (
f"{chain_end:<6}{atom_index:>5} "
f"{res_1to3(aatype[i]):>3} "
f"{chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(chain_termination_line)
atom_index += 1
if(i != n - 1):
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(prot, prev_chain_index))
pdb_lines.append("END")
pdb_lines.append("")
return "\n".join(pdb_lines) | python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/_data_transforms.py | src/cheap/openfold_utils/_data_transforms.py | # Adapted from https://github.com/aqlaboratory/openfold/blob/main/openfold/data/data_transforms.py
# to remove non-MSA and non-template dependencies.
# To enable multimer mode, add necessary functions from
# https://github.com/aqlaboratory/openfold/tree/main/openfold/utils/geometry
# Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from functools import reduce, wraps
from operator import add
import numpy as np
import torch
from . import _residue_constants as rc
from ._rigids import Rigid, Rotation
from ._tensor_utils import batched_gather, tensor_tree_map, tree_map
MSA_FEATURE_NAMES = [
"msa",
"deletion_matrix",
"msa_mask",
"msa_row_mask",
"bert_mask",
"true_msa",
]
def cast_to_64bit_ints(protein):
# We keep all ints as int64
for k, v in protein.items():
if v.dtype == torch.int32:
protein[k] = v.type(torch.int64)
return protein
def curry1(f):
"""Supply all arguments but the first."""
@wraps(f)
def fc(*args, **kwargs):
return lambda x: f(x, *args, **kwargs)
return fc
def make_all_atom_aatype(protein):
protein["all_atom_aatype"] = protein["aatype"]
return protein
def make_one_hot(x, num_classes):
x_one_hot = torch.zeros(*x.shape, num_classes, device=x.device)
x_one_hot.scatter_(-1, x.unsqueeze(-1), 1)
return x_one_hot
def make_seq_mask(protein):
protein["seq_mask"] = torch.ones(protein["aatype"].shape, dtype=torch.float32)
return protein
# def squeeze_features(protein):
# """Remove singleton and repeated dimensions in protein features."""
# protein["aatype"] = torch.argmax(protein["aatype"], dim=-1)
# for k in [
# "domain_name",
# "msa",
# "num_alignments",
# "seq_length",
# "sequence",
# "superfamily",
# "deletion_matrix",
# "resolution",
# "between_segment_residues",
# "residue_index",
# "template_all_atom_mask",
# ]:
# if k in protein:
# final_dim = protein[k].shape[-1]
# if isinstance(final_dim, int) and final_dim == 1:
# if torch.is_tensor(protein[k]):
# protein[k] = torch.squeeze(protein[k], dim=-1)
# else:
# protein[k] = np.squeeze(protein[k], axis=-1)
# for k in ["seq_length", "num_alignments"]:
# if k in protein:
# protein[k] = protein[k][0]
# return protein
# def unsorted_segment_sum(data, segment_ids, num_segments):
# """
# Computes the sum along segments of a tensor. Similar to
# tf.unsorted_segment_sum, but only supports 1-D indices.
# :param data: A tensor whose segments are to be summed.
# :param segment_ids: The 1-D segment indices tensor.
# :param num_segments: The number of segments.
# :return: A tensor of same data type as the data argument.
# """
# assert (
# len(segment_ids.shape) == 1 and
# segment_ids.shape[0] == data.shape[0]
# )
# segment_ids = segment_ids.view(
# segment_ids.shape[0], *((1,) * len(data.shape[1:]))
# )
# segment_ids = segment_ids.expand(data.shape)
# shape = [num_segments] + list(data.shape[1:])
# tensor = (
# torch.zeros(*shape, device=segment_ids.device)
# .scatter_add_(0, segment_ids, data.float())
# )
# tensor = tensor.type(data.dtype)
# return tensor
def pseudo_beta_fn(aatype, all_atom_positions, all_atom_mask):
"""Create pseudo beta features."""
is_gly = torch.eq(aatype, rc.restype_order["G"])
ca_idx = rc.atom_order["CA"]
cb_idx = rc.atom_order["CB"]
pseudo_beta = torch.where(
torch.tile(is_gly[..., None], [1] * len(is_gly.shape) + [3]),
all_atom_positions[..., ca_idx, :],
all_atom_positions[..., cb_idx, :],
)
if all_atom_mask is not None:
pseudo_beta_mask = torch.where(
is_gly, all_atom_mask[..., ca_idx], all_atom_mask[..., cb_idx]
)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
@curry1
def make_pseudo_beta(protein, prefix=""):
"""Create pseudo-beta (alpha for glycine) position and mask."""
assert prefix in ["", "template_"]
(
protein[prefix + "pseudo_beta"],
protein[prefix + "pseudo_beta_mask"],
) = pseudo_beta_fn(
protein["template_aatype" if prefix else "aatype"],
protein[prefix + "all_atom_positions"],
protein["template_all_atom_mask" if prefix else "all_atom_mask"],
)
return protein
@curry1
def add_constant_field(protein, key, value):
protein[key] = torch.tensor(value, device=protein["msa"].device)
return protein
def shaped_categorical(probs, epsilon=1e-10):
ds = probs.shape
num_classes = ds[-1]
distribution = torch.distributions.categorical.Categorical(
torch.reshape(probs + epsilon, [-1, num_classes])
)
counts = distribution.sample()
return torch.reshape(counts, ds[:-1])
def make_atom14_masks(protein):
"""Construct denser atom positions (14 dimensions instead of 37)."""
restype_atom14_to_atom37 = []
restype_atom37_to_atom14 = []
restype_atom14_mask = []
for rt in rc.restypes:
atom_names = rc.restype_name_to_atom14_names[rc.restype_1to3[rt]]
restype_atom14_to_atom37.append(
[(rc.atom_order[name] if name else 0) for name in atom_names]
)
atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
restype_atom37_to_atom14.append(
[
(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0)
for name in rc.atom_types
]
)
restype_atom14_mask.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atom14_to_atom37.append([0] * 14)
restype_atom37_to_atom14.append([0] * 37)
restype_atom14_mask.append([0.0] * 14)
restype_atom14_to_atom37 = torch.tensor(
restype_atom14_to_atom37,
dtype=torch.int32,
device=protein["aatype"].device,
)
restype_atom37_to_atom14 = torch.tensor(
restype_atom37_to_atom14,
dtype=torch.int32,
device=protein["aatype"].device,
)
restype_atom14_mask = torch.tensor(
restype_atom14_mask,
dtype=torch.float32,
device=protein["aatype"].device,
)
protein_aatype = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
residx_atom14_to_atom37 = restype_atom14_to_atom37[protein_aatype]
residx_atom14_mask = restype_atom14_mask[protein_aatype]
protein["atom14_atom_exists"] = residx_atom14_mask
protein["residx_atom14_to_atom37"] = residx_atom14_to_atom37.long()
# create the gather indices for mapping back
residx_atom37_to_atom14 = restype_atom37_to_atom14[protein_aatype]
protein["residx_atom37_to_atom14"] = residx_atom37_to_atom14.long()
# create the corresponding mask
restype_atom37_mask = torch.zeros(
[21, 37], dtype=torch.float32, device=protein["aatype"].device
)
for restype, restype_letter in enumerate(rc.restypes):
restype_name = rc.restype_1to3[restype_letter]
atom_names = rc.residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = rc.atom_order[atom_name]
restype_atom37_mask[restype, atom_type] = 1
residx_atom37_mask = restype_atom37_mask[protein_aatype]
protein["atom37_atom_exists"] = residx_atom37_mask
return protein
def make_atom14_masks_np(batch):
batch = tree_map(lambda n: torch.tensor(n, device="cpu"), batch, np.ndarray)
out = make_atom14_masks(batch)
out = tensor_tree_map(lambda t: np.array(t), out)
return out
def make_atom14_positions(protein):
"""Constructs denser atom positions (14 dimensions instead of 37)."""
residx_atom14_mask = protein["atom14_atom_exists"]
residx_atom14_to_atom37 = protein["residx_atom14_to_atom37"]
# Create a mask for known ground truth positions.
residx_atom14_gt_mask = residx_atom14_mask * batched_gather(
protein["all_atom_mask"],
residx_atom14_to_atom37,
dim=-1,
no_batch_dims=len(protein["all_atom_mask"].shape[:-1]),
)
# Gather the ground truth positions.
residx_atom14_gt_positions = residx_atom14_gt_mask[..., None] * (
batched_gather(
protein["all_atom_positions"],
residx_atom14_to_atom37,
dim=-2,
no_batch_dims=len(protein["all_atom_positions"].shape[:-2]),
)
)
protein["atom14_atom_exists"] = residx_atom14_mask
protein["atom14_gt_exists"] = residx_atom14_gt_mask
protein["atom14_gt_positions"] = residx_atom14_gt_positions
# As the atom naming is ambiguous for 7 of the 20 amino acids, provide
# alternative ground truth coordinates where the naming is swapped
restype_3 = [rc.restype_1to3[res] for res in rc.restypes]
restype_3 += ["UNK"]
# Matrices for renaming ambiguous atoms.
all_matrices = {
res: torch.eye(
14,
dtype=protein["all_atom_mask"].dtype,
device=protein["all_atom_mask"].device,
)
for res in restype_3
}
for resname, swap in rc.residue_atom_renaming_swaps.items():
correspondences = torch.arange(14, device=protein["all_atom_mask"].device)
for source_atom_swap, target_atom_swap in swap.items():
source_index = rc.restype_name_to_atom14_names[resname].index(
source_atom_swap
)
target_index = rc.restype_name_to_atom14_names[resname].index(
target_atom_swap
)
correspondences[source_index] = target_index
correspondences[target_index] = source_index
renaming_matrix = protein["all_atom_mask"].new_zeros((14, 14))
for index, correspondence in enumerate(correspondences):
renaming_matrix[index, correspondence] = 1.0
all_matrices[resname] = renaming_matrix
renaming_matrices = torch.stack([all_matrices[restype] for restype in restype_3])
# Pick the transformation matrices for the given residue sequence
# shape (num_res, 14, 14).
renaming_transform = renaming_matrices[protein["aatype"]]
# Apply it to the ground truth positions. shape (num_res, 14, 3).
alternative_gt_positions = torch.einsum(
"...rac,...rab->...rbc", residx_atom14_gt_positions, renaming_transform
)
protein["atom14_alt_gt_positions"] = alternative_gt_positions
# Create the mask for the alternative ground truth (differs from the
# ground truth mask, if only one of the atoms in an ambiguous pair has a
# ground truth position).
alternative_gt_mask = torch.einsum(
"...ra,...rab->...rb", residx_atom14_gt_mask, renaming_transform
)
protein["atom14_alt_gt_exists"] = alternative_gt_mask
# Create an ambiguous atoms mask. shape: (21, 14).
restype_atom14_is_ambiguous = protein["all_atom_mask"].new_zeros((21, 14))
for resname, swap in rc.residue_atom_renaming_swaps.items():
for atom_name1, atom_name2 in swap.items():
restype = rc.restype_order[rc.restype_3to1[resname]]
atom_idx1 = rc.restype_name_to_atom14_names[resname].index(atom_name1)
atom_idx2 = rc.restype_name_to_atom14_names[resname].index(atom_name2)
restype_atom14_is_ambiguous[restype, atom_idx1] = 1
restype_atom14_is_ambiguous[restype, atom_idx2] = 1
# From this create an ambiguous_mask for the given sequence.
protein["atom14_atom_is_ambiguous"] = restype_atom14_is_ambiguous[protein["aatype"]]
return protein
def atom37_to_frames(protein, eps=1e-8):
is_multimer = "asym_id" in protein
aatype = protein["aatype"]
all_atom_positions = protein["all_atom_positions"]
all_atom_mask = protein["all_atom_mask"]
if is_multimer:
raise NotImplementedError
# all_atom_positions = Vec3Array.from_array(all_atom_positions)
batch_dims = len(aatype.shape[:-1])
restype_rigidgroup_base_atom_names = np.full([21, 8, 3], "", dtype=object)
restype_rigidgroup_base_atom_names[:, 0, :] = ["C", "CA", "N"]
restype_rigidgroup_base_atom_names[:, 3, :] = ["CA", "C", "O"]
for restype, restype_letter in enumerate(rc.restypes):
resname = rc.restype_1to3[restype_letter]
for chi_idx in range(4):
if rc.chi_angles_mask[restype][chi_idx]:
names = rc.chi_angles_atoms[resname][chi_idx]
restype_rigidgroup_base_atom_names[restype, chi_idx + 4, :] = names[1:]
restype_rigidgroup_mask = all_atom_mask.new_zeros(
(*aatype.shape[:-1], 21, 8),
)
restype_rigidgroup_mask[..., 0] = 1
restype_rigidgroup_mask[..., 3] = 1
restype_rigidgroup_mask[..., :20, 4:] = all_atom_mask.new_tensor(rc.chi_angles_mask)
lookuptable = rc.atom_order.copy()
lookuptable[""] = 0
lookup = np.vectorize(lambda x: lookuptable[x])
restype_rigidgroup_base_atom37_idx = lookup(
restype_rigidgroup_base_atom_names,
)
restype_rigidgroup_base_atom37_idx = aatype.new_tensor(
restype_rigidgroup_base_atom37_idx,
)
restype_rigidgroup_base_atom37_idx = restype_rigidgroup_base_atom37_idx.view(
*((1,) * batch_dims), *restype_rigidgroup_base_atom37_idx.shape
)
residx_rigidgroup_base_atom37_idx = batched_gather(
restype_rigidgroup_base_atom37_idx,
aatype,
dim=-3,
no_batch_dims=batch_dims,
)
if is_multimer:
raise NotImplementedError
# base_atom_pos = [batched_gather(
# pos,
# residx_rigidgroup_base_atom37_idx,
# dim=-1,
# no_batch_dims=len(all_atom_positions.shape[:-1]),
# ) for pos in all_atom_positions]
# base_atom_pos = Vec3Array.from_array(torch.stack(base_atom_pos, dim=-1))
else:
base_atom_pos = batched_gather(
all_atom_positions,
residx_rigidgroup_base_atom37_idx,
dim=-2,
no_batch_dims=len(all_atom_positions.shape[:-2]),
)
if is_multimer:
raise NotImplementedError
# point_on_neg_x_axis = base_atom_pos[:, :, 0]
# origin = base_atom_pos[:, :, 1]
# point_on_xy_plane = base_atom_pos[:, :, 2]
# gt_rotation = Rot3Array.from_two_vectors(
# origin - point_on_neg_x_axis, point_on_xy_plane - origin)
# gt_frames = Rigid3Array(gt_rotation, origin)
else:
gt_frames = Rigid.from_3_points(
p_neg_x_axis=base_atom_pos[..., 0, :],
origin=base_atom_pos[..., 1, :],
p_xy_plane=base_atom_pos[..., 2, :],
eps=eps,
)
group_exists = batched_gather(
restype_rigidgroup_mask,
aatype,
dim=-2,
no_batch_dims=batch_dims,
)
gt_atoms_exist = batched_gather(
all_atom_mask,
residx_rigidgroup_base_atom37_idx,
dim=-1,
no_batch_dims=len(all_atom_mask.shape[:-1]),
)
gt_exists = torch.min(gt_atoms_exist, dim=-1)[0] * group_exists
rots = torch.eye(3, dtype=all_atom_mask.dtype, device=aatype.device)
rots = torch.tile(rots, (*((1,) * batch_dims), 8, 1, 1))
rots[..., 0, 0, 0] = -1
rots[..., 0, 2, 2] = -1
if is_multimer:
raise NotImplementedError
# gt_frames = gt_frames.compose_rotation(
# Rot3Array.from_array(rots))
else:
rots = Rotation(rot_mats=rots)
gt_frames = gt_frames.compose(Rigid(rots, None))
restype_rigidgroup_is_ambiguous = all_atom_mask.new_zeros(
*((1,) * batch_dims), 21, 8
)
restype_rigidgroup_rots = torch.eye(
3, dtype=all_atom_mask.dtype, device=aatype.device
)
restype_rigidgroup_rots = torch.tile(
restype_rigidgroup_rots,
(*((1,) * batch_dims), 21, 8, 1, 1),
)
for resname, _ in rc.residue_atom_renaming_swaps.items():
restype = rc.restype_order[rc.restype_3to1[resname]]
chi_idx = int(sum(rc.chi_angles_mask[restype]) - 1)
restype_rigidgroup_is_ambiguous[..., restype, chi_idx + 4] = 1
restype_rigidgroup_rots[..., restype, chi_idx + 4, 1, 1] = -1
restype_rigidgroup_rots[..., restype, chi_idx + 4, 2, 2] = -1
residx_rigidgroup_is_ambiguous = batched_gather(
restype_rigidgroup_is_ambiguous,
aatype,
dim=-2,
no_batch_dims=batch_dims,
)
residx_rigidgroup_ambiguity_rot = batched_gather(
restype_rigidgroup_rots,
aatype,
dim=-4,
no_batch_dims=batch_dims,
)
if is_multimer:
raise NotImplementedError
# ambiguity_rot = Rot3Array.from_array(residx_rigidgroup_ambiguity_rot)
# # Create the alternative ground truth frames.
# alt_gt_frames = gt_frames.compose_rotation(ambiguity_rot)
else:
residx_rigidgroup_ambiguity_rot = Rotation(
rot_mats=residx_rigidgroup_ambiguity_rot
)
alt_gt_frames = gt_frames.compose(Rigid(residx_rigidgroup_ambiguity_rot, None))
gt_frames_tensor = gt_frames.to_tensor_4x4()
alt_gt_frames_tensor = alt_gt_frames.to_tensor_4x4()
protein["rigidgroups_gt_frames"] = gt_frames_tensor
protein["rigidgroups_gt_exists"] = gt_exists
protein["rigidgroups_group_exists"] = group_exists
protein["rigidgroups_group_is_ambiguous"] = residx_rigidgroup_is_ambiguous
protein["rigidgroups_alt_gt_frames"] = alt_gt_frames_tensor
return protein
def get_chi_atom_indices():
"""Returns atom indices needed to compute chi angles for all residue types.
Returns:
A tensor of shape [residue_types=21, chis=4, atoms=4]. The residue types are
in the order specified in rc.restypes + unknown residue type
at the end. For chi angles which are not defined on the residue, the
positions indices are by default set to 0.
"""
chi_atom_indices = []
for residue_name in rc.restypes:
residue_name = rc.restype_1to3[residue_name]
residue_chi_angles = rc.chi_angles_atoms[residue_name]
atom_indices = []
for chi_angle in residue_chi_angles:
atom_indices.append([rc.atom_order[atom] for atom in chi_angle])
for _ in range(4 - len(atom_indices)):
atom_indices.append([0, 0, 0, 0]) # For chi angles not defined on the AA.
chi_atom_indices.append(atom_indices)
chi_atom_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue.
return chi_atom_indices
@curry1
def atom37_to_torsion_angles(
protein,
prefix="",
):
"""
Convert coordinates to torsion angles.
This function is extremely sensitive to floating point imprecisions
and should be run with double precision whenever possible.
Args:
Dict containing:
* (prefix)aatype:
[*, N_res] residue indices
* (prefix)all_atom_positions:
[*, N_res, 37, 3] atom positions (in atom37
format)
* (prefix)all_atom_mask:
[*, N_res, 37] atom position mask
Returns:
The same dictionary updated with the following features:
"(prefix)torsion_angles_sin_cos" ([*, N_res, 7, 2])
Torsion angles
"(prefix)alt_torsion_angles_sin_cos" ([*, N_res, 7, 2])
Alternate torsion angles (accounting for 180-degree symmetry)
"(prefix)torsion_angles_mask" ([*, N_res, 7])
Torsion angles mask
"""
aatype = protein[prefix + "aatype"]
all_atom_positions = protein[prefix + "all_atom_positions"]
all_atom_mask = protein[prefix + "all_atom_mask"]
aatype = torch.clamp(aatype, max=20)
pad = all_atom_positions.new_zeros([*all_atom_positions.shape[:-3], 1, 37, 3])
prev_all_atom_positions = torch.cat(
[pad, all_atom_positions[..., :-1, :, :]], dim=-3
)
pad = all_atom_mask.new_zeros([*all_atom_mask.shape[:-2], 1, 37])
prev_all_atom_mask = torch.cat([pad, all_atom_mask[..., :-1, :]], dim=-2)
pre_omega_atom_pos = torch.cat(
[prev_all_atom_positions[..., 1:3, :], all_atom_positions[..., :2, :]],
dim=-2,
)
phi_atom_pos = torch.cat(
[prev_all_atom_positions[..., 2:3, :], all_atom_positions[..., :3, :]],
dim=-2,
)
psi_atom_pos = torch.cat(
[all_atom_positions[..., :3, :], all_atom_positions[..., 4:5, :]],
dim=-2,
)
pre_omega_mask = torch.prod(prev_all_atom_mask[..., 1:3], dim=-1) * torch.prod(
all_atom_mask[..., :2], dim=-1
)
phi_mask = prev_all_atom_mask[..., 2] * torch.prod(
all_atom_mask[..., :3], dim=-1, dtype=all_atom_mask.dtype
)
psi_mask = (
torch.prod(all_atom_mask[..., :3], dim=-1, dtype=all_atom_mask.dtype)
* all_atom_mask[..., 4]
)
chi_atom_indices = torch.as_tensor(get_chi_atom_indices(), device=aatype.device)
atom_indices = chi_atom_indices[..., aatype, :, :]
chis_atom_pos = batched_gather(
all_atom_positions, atom_indices, -2, len(atom_indices.shape[:-2])
)
chi_angles_mask = list(rc.chi_angles_mask)
chi_angles_mask.append([0.0, 0.0, 0.0, 0.0])
chi_angles_mask = all_atom_mask.new_tensor(chi_angles_mask)
chis_mask = chi_angles_mask[aatype, :]
chi_angle_atoms_mask = batched_gather(
all_atom_mask,
atom_indices,
dim=-1,
no_batch_dims=len(atom_indices.shape[:-2]),
)
chi_angle_atoms_mask = torch.prod(
chi_angle_atoms_mask, dim=-1, dtype=chi_angle_atoms_mask.dtype
)
chis_mask = chis_mask * chi_angle_atoms_mask
torsions_atom_pos = torch.cat(
[
pre_omega_atom_pos[..., None, :, :],
phi_atom_pos[..., None, :, :],
psi_atom_pos[..., None, :, :],
chis_atom_pos,
],
dim=-3,
)
torsion_angles_mask = torch.cat(
[
pre_omega_mask[..., None],
phi_mask[..., None],
psi_mask[..., None],
chis_mask,
],
dim=-1,
)
torsion_frames = Rigid.from_3_points(
torsions_atom_pos[..., 1, :],
torsions_atom_pos[..., 2, :],
torsions_atom_pos[..., 0, :],
eps=1e-8,
)
fourth_atom_rel_pos = torsion_frames.invert().apply(torsions_atom_pos[..., 3, :])
torsion_angles_sin_cos = torch.stack(
[fourth_atom_rel_pos[..., 2], fourth_atom_rel_pos[..., 1]], dim=-1
)
denom = torch.sqrt(
torch.sum(
torch.square(torsion_angles_sin_cos),
dim=-1,
dtype=torsion_angles_sin_cos.dtype,
keepdims=True,
)
+ 1e-8
)
torsion_angles_sin_cos = torsion_angles_sin_cos / denom
torsion_angles_sin_cos = (
torsion_angles_sin_cos
* all_atom_mask.new_tensor(
[1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0],
)[((None,) * len(torsion_angles_sin_cos.shape[:-2])) + (slice(None), None)]
)
chi_is_ambiguous = torsion_angles_sin_cos.new_tensor(
rc.chi_pi_periodic,
)[aatype, ...]
mirror_torsion_angles = torch.cat(
[
all_atom_mask.new_ones(*aatype.shape, 3),
1.0 - 2.0 * chi_is_ambiguous,
],
dim=-1,
)
alt_torsion_angles_sin_cos = (
torsion_angles_sin_cos * mirror_torsion_angles[..., None]
)
protein[prefix + "torsion_angles_sin_cos"] = torsion_angles_sin_cos
protein[prefix + "alt_torsion_angles_sin_cos"] = alt_torsion_angles_sin_cos
protein[prefix + "torsion_angles_mask"] = torsion_angles_mask
return protein
def get_backbone_frames(protein):
# DISCREPANCY: AlphaFold uses tensor_7s here. I don't know why.
protein["backbone_rigid_tensor"] = protein["rigidgroups_gt_frames"][..., 0, :, :]
protein["backbone_rigid_mask"] = protein["rigidgroups_gt_exists"][..., 0]
return protein
def get_chi_angles(protein):
dtype = protein["all_atom_mask"].dtype
protein["chi_angles_sin_cos"] = (protein["torsion_angles_sin_cos"][..., 3:, :]).to(
dtype
)
protein["chi_mask"] = protein["torsion_angles_mask"][..., 3:].to(dtype)
return protein
# TODO: Important: rewrite this function to fit the rest of the codebase
# @curry1
# def random_crop_to_size(
# protein,
# crop_size,
# max_templates,
# shape_schema,
# subsample_templates=False,
# seed=None,
# ):
# """Crop randomly to `crop_size`, or keep as is if shorter than that."""
# # We want each ensemble to be cropped the same way
# g = None
# if seed is not None:
# g = torch.Generator(device=protein["seq_length"].device)
# g.manual_seed(seed)
# seq_length = protein["seq_length"]
# if "template_mask" in protein:
# num_templates = protein["template_mask"].shape[-1]
# else:
# num_templates = 0
# # No need to subsample templates if there aren't any
# subsample_templates = subsample_templates and num_templates
# num_res_crop_size = min(int(seq_length), crop_size)
# def _randint(lower, upper):
# return int(torch.randint(
# lower,
# upper + 1,
# (1,),
# device=protein["seq_length"].device,
# generator=g,
# )[0])
# if subsample_templates:
# templates_crop_start = _randint(0, num_templates)
# templates_select_indices = torch.randperm(
# num_templates, device=protein["seq_length"].device, generator=g
# )
# else:
# templates_crop_start = 0
# num_templates_crop_size = min(
# num_templates - templates_crop_start, max_templates
# )
# n = seq_length - num_res_crop_size
# if "use_clamped_fape" in protein and protein["use_clamped_fape"] == 1.:
# right_anchor = n
# else:
# x = _randint(0, n)
# right_anchor = n - x
# num_res_crop_start = _randint(0, right_anchor)
# for k, v in protein.items():
# if k not in shape_schema or (
# "template" not in k and NUM_RES not in shape_schema[k]
# ):
# continue
# # randomly permute the templates before cropping them.
# if k.startswith("template") and subsample_templates:
# v = v[templates_select_indices]
# slices = []
# for i, (dim_size, dim) in enumerate(zip(shape_schema[k], v.shape)):
# is_num_res = dim_size == NUM_RES
# if i == 0 and k.startswith("template"):
# crop_size = num_templates_crop_size
# crop_start = templates_crop_start
# else:
# crop_start = num_res_crop_start if is_num_res else 0
# crop_size = num_res_crop_size if is_num_res else dim
# slices.append(slice(crop_start, crop_start + crop_size))
# protein[k] = v[slices]
# protein["seq_length"] = protein["seq_length"].new_tensor(num_res_crop_size)
# return protein
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/_losses.py | src/cheap/openfold_utils/_losses.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ml_collections
import numpy as np
import torch
import torch.nn as nn
from typing import Dict, Optional, Tuple
from . import _residue_constants as residue_constants
from ._rigids import Rotation, Rigid
# from openfold.utils.geometry.vector import Vec3Array, euclidean_distance
# from openfold.utils.all_atom_multimer import get_rc_tensor
from ._tensor_utils import tree_map, tensor_tree_map, masked_mean, permute_final_dims
import logging
logger = logging.getLogger(__name__)
def softmax_cross_entropy(logits, labels):
loss = -1 * torch.sum(
labels * torch.nn.functional.log_softmax(logits, dim=-1),
dim=-1,
)
return loss
def sigmoid_cross_entropy(logits, labels):
logits_dtype = logits.dtype
logits = logits.double()
labels = labels.double()
log_p = torch.nn.functional.logsigmoid(logits)
# log_p = torch.log(torch.sigmoid(logits))
log_not_p = torch.nn.functional.logsigmoid(-1 * logits)
# log_not_p = torch.log(torch.sigmoid(-logits))
loss = (-1.0 * labels) * log_p - (1.0 - labels) * log_not_p
loss = loss.to(dtype=logits_dtype)
return loss
def torsion_angle_loss(
a, # [*, N, 7, 2]
a_gt, # [*, N, 7, 2]
a_alt_gt, # [*, N, 7, 2]
):
# [*, N, 7]
norm = torch.norm(a, dim=-1)
# [*, N, 7, 2]
a = a / norm.unsqueeze(-1)
# [*, N, 7]
diff_norm_gt = torch.norm(a - a_gt, dim=-1)
diff_norm_alt_gt = torch.norm(a - a_alt_gt, dim=-1)
min_diff = torch.minimum(diff_norm_gt**2, diff_norm_alt_gt**2)
# [*]
l_torsion = torch.mean(min_diff, dim=(-1, -2))
l_angle_norm = torch.mean(torch.abs(norm - 1), dim=(-1, -2))
an_weight = 0.02
return l_torsion + an_weight * l_angle_norm
def compute_fape(
pred_frames: Rigid,
target_frames: Rigid,
frames_mask: torch.Tensor,
pred_positions: torch.Tensor,
target_positions: torch.Tensor,
positions_mask: torch.Tensor,
length_scale: float,
pair_mask: Optional[torch.Tensor] = None,
l1_clamp_distance: Optional[float] = None,
eps=1e-8,
) -> torch.Tensor:
"""
Computes FAPE loss.
Args:
pred_frames:
[*, N_frames] Rigid object of predicted frames
target_frames:
[*, N_frames] Rigid object of ground truth frames
frames_mask:
[*, N_frames] binary mask for the frames
pred_positions:
[*, N_pts, 3] predicted atom positions
target_positions:
[*, N_pts, 3] ground truth positions
positions_mask:
[*, N_pts] positions mask
length_scale:
Length scale by which the loss is divided
pair_mask:
[*, N_frames, N_pts] mask to use for
separating intra- from inter-chain losses.
l1_clamp_distance:
Cutoff above which distance errors are disregarded
eps:
Small value used to regularize denominators
Returns:
[*] loss tensor
"""
# [*, N_frames, N_pts, 3]
local_pred_pos = pred_frames.invert()[..., None].apply(
pred_positions[..., None, :, :],
)
local_target_pos = target_frames.invert()[..., None].apply(
target_positions[..., None, :, :],
)
error_dist = torch.sqrt(
torch.sum((local_pred_pos - local_target_pos) ** 2, dim=-1) + eps
)
if l1_clamp_distance is not None:
error_dist = torch.clamp(error_dist, min=0, max=l1_clamp_distance)
normed_error = error_dist / length_scale
normed_error = normed_error * frames_mask[..., None]
normed_error = normed_error * positions_mask[..., None, :]
if pair_mask is not None:
normed_error = normed_error * pair_mask
normed_error = torch.sum(normed_error, dim=(-1, -2))
mask = frames_mask[..., None] * positions_mask[..., None, :] * pair_mask
norm_factor = torch.sum(mask, dim=(-2, -1))
normed_error = normed_error / (eps + norm_factor)
else:
# FP16-friendly averaging. Roughly equivalent to:
#
# norm_factor = (
# torch.sum(frames_mask, dim=-1) *
# torch.sum(positions_mask, dim=-1)
# )
# normed_error = torch.sum(normed_error, dim=(-1, -2)) / (eps + norm_factor)
#
# ("roughly" because eps is necessarily duplicated in the latter)
normed_error = torch.sum(normed_error, dim=-1)
normed_error = normed_error / (eps + torch.sum(frames_mask, dim=-1))[..., None]
normed_error = torch.sum(normed_error, dim=-1)
normed_error = normed_error / (eps + torch.sum(positions_mask, dim=-1))
return normed_error
def backbone_loss(
backbone_rigid_tensor: torch.Tensor,
backbone_rigid_mask: torch.Tensor,
traj: torch.Tensor,
pair_mask: Optional[torch.Tensor] = None,
use_clamped_fape: Optional[torch.Tensor] = None,
clamp_distance: float = 10.0,
loss_unit_distance: float = 10.0,
eps: float = 1e-4,
**kwargs,
) -> torch.Tensor:
### need to check if the traj belongs to 4*4 matrix or a tensor_7
if traj.shape[-1] == 7:
pred_aff = Rigid.from_tensor_7(traj)
elif traj.shape[-1] == 4:
pred_aff = Rigid.from_tensor_4x4(traj)
pred_aff = Rigid(
Rotation(rot_mats=pred_aff.get_rots().get_rot_mats(), quats=None),
pred_aff.get_trans(),
)
# DISCREPANCY: DeepMind somehow gets a hold of a tensor_7 version of
# backbone tensor, normalizes it, and then turns it back to a rotation
# matrix. To avoid a potentially numerically unstable rotation matrix
# to quaternion conversion, we just use the original rotation matrix
# outright. This one hasn't been composed a bunch of times, though, so
# it might be fine.
gt_aff = Rigid.from_tensor_4x4(backbone_rigid_tensor)
fape_loss = compute_fape(
pred_aff,
gt_aff[None],
backbone_rigid_mask[None],
pred_aff.get_trans(),
gt_aff[None].get_trans(),
backbone_rigid_mask[None],
pair_mask=pair_mask,
l1_clamp_distance=clamp_distance,
length_scale=loss_unit_distance,
eps=eps,
)
if use_clamped_fape is not None:
unclamped_fape_loss = compute_fape(
pred_aff,
gt_aff[None],
backbone_rigid_mask[None],
pred_aff.get_trans(),
gt_aff[None].get_trans(),
backbone_rigid_mask[None],
pair_mask=pair_mask,
l1_clamp_distance=None,
length_scale=loss_unit_distance,
eps=eps,
)
fape_loss = fape_loss * use_clamped_fape + unclamped_fape_loss * (
1 - use_clamped_fape
)
# Average over the batch dimension
fape_loss = torch.mean(fape_loss)
return fape_loss
def sidechain_loss(
sidechain_frames: torch.Tensor,
sidechain_atom_pos: torch.Tensor,
rigidgroups_gt_frames: torch.Tensor,
rigidgroups_alt_gt_frames: torch.Tensor,
rigidgroups_gt_exists: torch.Tensor,
renamed_atom14_gt_positions: torch.Tensor,
renamed_atom14_gt_exists: torch.Tensor,
alt_naming_is_better: torch.Tensor,
clamp_distance: float = 10.0,
length_scale: float = 10.0,
eps: float = 1e-4,
**kwargs,
) -> torch.Tensor:
renamed_gt_frames = (
1.0 - alt_naming_is_better[..., None, None, None]
) * rigidgroups_gt_frames + alt_naming_is_better[
..., None, None, None
] * rigidgroups_alt_gt_frames
# Steamroll the inputs
sidechain_frames = sidechain_frames[-1]
batch_dims = sidechain_frames.shape[:-4]
sidechain_frames = sidechain_frames.view(*batch_dims, -1, 4, 4)
sidechain_frames = Rigid.from_tensor_4x4(sidechain_frames)
renamed_gt_frames = renamed_gt_frames.view(*batch_dims, -1, 4, 4)
renamed_gt_frames = Rigid.from_tensor_4x4(renamed_gt_frames)
rigidgroups_gt_exists = rigidgroups_gt_exists.reshape(*batch_dims, -1)
sidechain_atom_pos = sidechain_atom_pos[-1]
sidechain_atom_pos = sidechain_atom_pos.view(*batch_dims, -1, 3)
renamed_atom14_gt_positions = renamed_atom14_gt_positions.view(*batch_dims, -1, 3)
renamed_atom14_gt_exists = renamed_atom14_gt_exists.view(*batch_dims, -1)
fape = compute_fape(
sidechain_frames,
renamed_gt_frames,
rigidgroups_gt_exists,
sidechain_atom_pos,
renamed_atom14_gt_positions,
renamed_atom14_gt_exists,
pair_mask=None,
l1_clamp_distance=clamp_distance,
length_scale=length_scale,
eps=eps,
)
return fape
def fape_loss(
out: Dict[str, torch.Tensor],
batch: Dict[str, torch.Tensor],
config: ml_collections.ConfigDict,
) -> torch.Tensor:
traj = out["sm"]["frames"]
asym_id = batch.get("asym_id")
if asym_id is not None:
intra_chain_mask = (asym_id[..., None] == asym_id[..., None, :]).to(
dtype=traj.dtype
)
intra_chain_bb_loss = backbone_loss(
traj=traj,
pair_mask=intra_chain_mask,
**{**batch, **config.intra_chain_backbone},
)
interface_bb_loss = backbone_loss(
traj=traj,
pair_mask=1.0 - intra_chain_mask,
**{**batch, **config.interface_backbone},
)
weighted_bb_loss = (
intra_chain_bb_loss * config.intra_chain_backbone.weight
+ interface_bb_loss * config.interface_backbone.weight
)
else:
bb_loss = backbone_loss(
traj=traj,
**{**batch, **config.backbone},
)
weighted_bb_loss = bb_loss * config.backbone.weight
sc_loss = sidechain_loss(
out["sm"]["sidechain_frames"],
out["sm"]["positions"],
**{**batch, **config.sidechain},
)
loss = weighted_bb_loss + config.sidechain.weight * sc_loss
# Average over the batch dimension
loss = torch.mean(loss)
return loss
def supervised_chi_loss(
angles_sin_cos: torch.Tensor,
unnormalized_angles_sin_cos: torch.Tensor,
aatype: torch.Tensor,
seq_mask: torch.Tensor,
chi_mask: torch.Tensor,
chi_angles_sin_cos: torch.Tensor,
chi_weight: float,
angle_norm_weight: float,
eps=1e-6,
**kwargs,
) -> torch.Tensor:
"""
Implements Algorithm 27 (torsionAngleLoss)
Args:
angles_sin_cos:
[*, N, 7, 2] predicted angles
unnormalized_angles_sin_cos:
The same angles, but unnormalized
aatype:
[*, N] residue indices
seq_mask:
[*, N] sequence mask
chi_mask:
[*, N, 7] angle mask
chi_angles_sin_cos:
[*, N, 7, 2] ground truth angles
chi_weight:
Weight for the angle component of the loss
angle_norm_weight:
Weight for the normalization component of the loss
Returns:
[*] loss tensor
"""
pred_angles = angles_sin_cos[..., 3:, :]
residue_type_one_hot = torch.nn.functional.one_hot(
aatype,
residue_constants.restype_num + 1,
)
chi_pi_periodic = torch.einsum(
"...ij,jk->ik",
residue_type_one_hot.type(angles_sin_cos.dtype),
angles_sin_cos.new_tensor(residue_constants.chi_pi_periodic),
)
true_chi = chi_angles_sin_cos[None]
shifted_mask = (1 - 2 * chi_pi_periodic).unsqueeze(-1)
true_chi_shifted = shifted_mask * true_chi
sq_chi_error = torch.sum((true_chi - pred_angles) ** 2, dim=-1)
sq_chi_error_shifted = torch.sum((true_chi_shifted - pred_angles) ** 2, dim=-1)
sq_chi_error = torch.minimum(sq_chi_error, sq_chi_error_shifted)
# The ol' switcheroo
sq_chi_error = sq_chi_error.permute(
*range(len(sq_chi_error.shape))[1:-2], 0, -2, -1
)
sq_chi_loss = masked_mean(chi_mask[..., None, :, :], sq_chi_error, dim=(-1, -2, -3))
loss = chi_weight * sq_chi_loss
angle_norm = torch.sqrt(torch.sum(unnormalized_angles_sin_cos**2, dim=-1) + eps)
norm_error = torch.abs(angle_norm - 1.0)
norm_error = norm_error.permute(*range(len(norm_error.shape))[1:-2], 0, -2, -1)
angle_norm_loss = masked_mean(
seq_mask[..., None, :, None], norm_error, dim=(-1, -2, -3)
)
loss = loss + angle_norm_weight * angle_norm_loss
# Average over the batch dimension
loss = torch.mean(loss)
return loss
def compute_plddt(logits: torch.Tensor) -> torch.Tensor:
num_bins = logits.shape[-1]
bin_width = 1.0 / num_bins
bounds = torch.arange(
start=0.5 * bin_width, end=1.0, step=bin_width, device=logits.device
)
probs = torch.nn.functional.softmax(logits, dim=-1)
pred_lddt_ca = torch.sum(
probs * bounds.view(*((1,) * len(probs.shape[:-1])), *bounds.shape),
dim=-1,
)
return pred_lddt_ca * 100
def lddt(
all_atom_pred_pos: torch.Tensor,
all_atom_positions: torch.Tensor,
all_atom_mask: torch.Tensor,
cutoff: float = 15.0,
eps: float = 1e-10,
per_residue: bool = True,
) -> torch.Tensor:
n = all_atom_mask.shape[-2]
dmat_true = torch.sqrt(
eps
+ torch.sum(
(all_atom_positions[..., None, :] - all_atom_positions[..., None, :, :])
** 2,
dim=-1,
)
)
dmat_pred = torch.sqrt(
eps
+ torch.sum(
(all_atom_pred_pos[..., None, :] - all_atom_pred_pos[..., None, :, :]) ** 2,
dim=-1,
)
)
dists_to_score = (
(dmat_true < cutoff)
* all_atom_mask
* permute_final_dims(all_atom_mask, (1, 0))
* (1.0 - torch.eye(n, device=all_atom_mask.device))
)
dist_l1 = torch.abs(dmat_true - dmat_pred)
score = (
(dist_l1 < 0.5).type(dist_l1.dtype)
+ (dist_l1 < 1.0).type(dist_l1.dtype)
+ (dist_l1 < 2.0).type(dist_l1.dtype)
+ (dist_l1 < 4.0).type(dist_l1.dtype)
)
score = score * 0.25
dims = (-1,) if per_residue else (-2, -1)
norm = 1.0 / (eps + torch.sum(dists_to_score, dim=dims))
score = norm * (eps + torch.sum(dists_to_score * score, dim=dims))
return score
def lddt_ca(
all_atom_pred_pos: torch.Tensor,
all_atom_positions: torch.Tensor,
all_atom_mask: torch.Tensor,
cutoff: float = 15.0,
eps: float = 1e-10,
per_residue: bool = True,
) -> torch.Tensor:
ca_pos = residue_constants.atom_order["CA"]
all_atom_pred_pos = all_atom_pred_pos[..., ca_pos, :]
all_atom_positions = all_atom_positions[..., ca_pos, :]
all_atom_mask = all_atom_mask[..., ca_pos : (ca_pos + 1)] # keep dim
return lddt(
all_atom_pred_pos,
all_atom_positions,
all_atom_mask,
cutoff=cutoff,
eps=eps,
per_residue=per_residue,
)
def lddt_loss(
logits: torch.Tensor,
all_atom_pred_pos: torch.Tensor,
all_atom_positions: torch.Tensor,
all_atom_mask: torch.Tensor,
resolution: torch.Tensor,
cutoff: float = 15.0,
no_bins: int = 50,
min_resolution: float = 0.1,
max_resolution: float = 3.0,
eps: float = 1e-10,
**kwargs,
) -> torch.Tensor:
n = all_atom_mask.shape[-2]
ca_pos = residue_constants.atom_order["CA"]
all_atom_pred_pos = all_atom_pred_pos[..., ca_pos, :]
all_atom_positions = all_atom_positions[..., ca_pos, :]
all_atom_mask = all_atom_mask[..., ca_pos : (ca_pos + 1)] # keep dim
score = lddt(
all_atom_pred_pos, all_atom_positions, all_atom_mask, cutoff=cutoff, eps=eps
)
# TODO: Remove after initial pipeline testing
score = torch.nan_to_num(score, nan=torch.nanmean(score))
score[score < 0] = 0
score = score.detach()
bin_index = torch.floor(score * no_bins).long()
bin_index = torch.clamp(bin_index, max=(no_bins - 1))
lddt_ca_one_hot = torch.nn.functional.one_hot(bin_index, num_classes=no_bins)
errors = softmax_cross_entropy(logits, lddt_ca_one_hot)
all_atom_mask = all_atom_mask.squeeze(-1)
loss = torch.sum(errors * all_atom_mask, dim=-1) / (
eps + torch.sum(all_atom_mask, dim=-1)
)
loss = loss * ((resolution >= min_resolution) & (resolution <= max_resolution))
# Average over the batch dimension
loss = torch.mean(loss)
return loss
def distogram_loss(
logits,
pseudo_beta,
pseudo_beta_mask,
min_bin=2.3125,
max_bin=21.6875,
no_bins=64,
eps=1e-6,
**kwargs,
):
boundaries = torch.linspace(
min_bin,
max_bin,
no_bins - 1,
device=logits.device,
)
boundaries = boundaries**2
dists = torch.sum(
(pseudo_beta[..., None, :] - pseudo_beta[..., None, :, :]) ** 2,
dim=-1,
keepdims=True,
)
true_bins = torch.sum(dists > boundaries, dim=-1)
errors = softmax_cross_entropy(
logits,
torch.nn.functional.one_hot(true_bins, no_bins),
)
square_mask = pseudo_beta_mask[..., None] * pseudo_beta_mask[..., None, :]
# FP16-friendly sum. Equivalent to:
# mean = (torch.sum(errors * square_mask, dim=(-1, -2)) /
# (eps + torch.sum(square_mask, dim=(-1, -2))))
denom = eps + torch.sum(square_mask, dim=(-1, -2))
mean = errors * square_mask
mean = torch.sum(mean, dim=-1)
mean = mean / denom[..., None]
mean = torch.sum(mean, dim=-1)
# Average over the batch dimensions
mean = torch.mean(mean)
return mean
def _calculate_bin_centers(boundaries: torch.Tensor):
step = boundaries[1] - boundaries[0]
bin_centers = boundaries + step / 2
bin_centers = torch.cat(
[bin_centers, (bin_centers[-1] + step).unsqueeze(-1)], dim=0
)
return bin_centers
def _calculate_expected_aligned_error(
alignment_confidence_breaks: torch.Tensor,
aligned_distance_error_probs: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
bin_centers = _calculate_bin_centers(alignment_confidence_breaks)
return (
torch.sum(aligned_distance_error_probs * bin_centers, dim=-1),
bin_centers[-1],
)
def compute_predicted_aligned_error(
logits: torch.Tensor,
max_bin: int = 31,
no_bins: int = 64,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Computes aligned confidence metrics from logits.
Args:
logits: [*, num_res, num_res, num_bins] the logits output from
PredictedAlignedErrorHead.
max_bin: Maximum bin value
no_bins: Number of bins
Returns:
aligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted
aligned error probabilities over bins for each residue pair.
predicted_aligned_error: [*, num_res, num_res] the expected aligned distance
error for each pair of residues.
max_predicted_aligned_error: [*] the maximum predicted error possible.
"""
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1)
(
predicted_aligned_error,
max_predicted_aligned_error,
) = _calculate_expected_aligned_error(
alignment_confidence_breaks=boundaries,
aligned_distance_error_probs=aligned_confidence_probs,
)
return {
"aligned_confidence_probs": aligned_confidence_probs,
"predicted_aligned_error": predicted_aligned_error,
"max_predicted_aligned_error": max_predicted_aligned_error,
}
def compute_tm(
logits: torch.Tensor,
residue_weights: Optional[torch.Tensor] = None,
asym_id: Optional[torch.Tensor] = None,
interface: bool = False,
max_bin: int = 31,
no_bins: int = 64,
eps: float = 1e-8,
**kwargs,
) -> torch.Tensor:
if residue_weights is None:
residue_weights = logits.new_ones(logits.shape[-2])
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
bin_centers = _calculate_bin_centers(boundaries)
clipped_n = max(torch.sum(residue_weights), 19)
d0 = 1.24 * (clipped_n - 15) ** (1.0 / 3) - 1.8
probs = torch.nn.functional.softmax(logits, dim=-1)
tm_per_bin = 1.0 / (1 + (bin_centers**2) / (d0**2))
predicted_tm_term = torch.sum(probs * tm_per_bin, dim=-1)
n = residue_weights.shape[-1]
pair_mask = residue_weights.new_ones((n, n), dtype=torch.int32)
if interface and (asym_id is not None):
if len(asym_id.shape) > 1:
assert len(asym_id.shape) <= 2
batch_size = asym_id.shape[0]
pair_mask = residue_weights.new_ones((batch_size, n, n), dtype=torch.int32)
pair_mask *= (asym_id[..., None] != asym_id[..., None, :]).to(
dtype=pair_mask.dtype
)
predicted_tm_term *= pair_mask
pair_residue_weights = pair_mask * (
residue_weights[..., None, :] * residue_weights[..., :, None]
)
denom = eps + torch.sum(pair_residue_weights, dim=-1, keepdims=True)
normed_residue_mask = pair_residue_weights / denom
per_alignment = torch.sum(predicted_tm_term * normed_residue_mask, dim=-1)
weighted = per_alignment * residue_weights
argmax = (weighted == torch.max(weighted)).nonzero()[0]
return per_alignment[tuple(argmax)]
def tm_loss(
logits,
final_affine_tensor,
backbone_rigid_tensor,
backbone_rigid_mask,
resolution,
max_bin=31,
no_bins=64,
min_resolution: float = 0.1,
max_resolution: float = 3.0,
eps=1e-8,
**kwargs,
):
# first check whether this is a tensor_7 or tensor_4*4
if final_affine_tensor.shape[-1] == 7:
pred_affine = Rigid.from_tensor_7(final_affine_tensor)
elif final_affine_tensor.shape[-1] == 4:
pred_affine = Rigid.from_tensor_4x4(final_affine_tensor)
backbone_rigid = Rigid.from_tensor_4x4(backbone_rigid_tensor)
def _points(affine):
pts = affine.get_trans()[..., None, :, :]
return affine.invert()[..., None].apply(pts)
sq_diff = torch.sum((_points(pred_affine) - _points(backbone_rigid)) ** 2, dim=-1)
sq_diff = sq_diff.detach()
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
boundaries = boundaries**2
true_bins = torch.sum(sq_diff[..., None] > boundaries, dim=-1)
errors = softmax_cross_entropy(
logits, torch.nn.functional.one_hot(true_bins, no_bins)
)
square_mask = backbone_rigid_mask[..., None] * backbone_rigid_mask[..., None, :]
loss = torch.sum(errors * square_mask, dim=-1)
scale = 0.5 # hack to help FP16 training along
denom = eps + torch.sum(scale * square_mask, dim=(-1, -2))
loss = loss / denom[..., None]
loss = torch.sum(loss, dim=-1)
loss = loss * scale
loss = loss * ((resolution >= min_resolution) & (resolution <= max_resolution))
# Average over the batch dimension
loss = torch.mean(loss)
return loss
def between_residue_bond_loss(
pred_atom_positions: torch.Tensor, # (*, N, 37/14, 3)
pred_atom_mask: torch.Tensor, # (*, N, 37/14)
residue_index: torch.Tensor, # (*, N)
aatype: torch.Tensor, # (*, N)
tolerance_factor_soft=12.0,
tolerance_factor_hard=12.0,
eps=1e-6,
) -> Dict[str, torch.Tensor]:
"""Flat-bottom loss to penalize structural violations between residues.
This is a loss penalizing any violation of the geometry around the peptide
bond between consecutive amino acids. This loss corresponds to
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 44, 45.
Args:
pred_atom_positions: Atom positions in atom37/14 representation
pred_atom_mask: Atom mask in atom37/14 representation
residue_index: Residue index for given amino acid, this is assumed to be
monotonically increasing.
aatype: Amino acid type of given residue
tolerance_factor_soft: soft tolerance factor measured in standard deviations
of pdb distributions
tolerance_factor_hard: hard tolerance factor measured in standard deviations
of pdb distributions
Returns:
Dict containing:
* 'c_n_loss_mean': Loss for peptide bond length violations
* 'ca_c_n_loss_mean': Loss for violations of bond angle around C spanned
by CA, C, N
* 'c_n_ca_loss_mean': Loss for violations of bond angle around N spanned
by C, N, CA
* 'per_residue_loss_sum': sum of all losses for each residue
* 'per_residue_violation_mask': mask denoting all residues with violation
present.
"""
# Get the positions of the relevant backbone atoms.
this_ca_pos = pred_atom_positions[..., :-1, 1, :]
this_ca_mask = pred_atom_mask[..., :-1, 1]
this_c_pos = pred_atom_positions[..., :-1, 2, :]
this_c_mask = pred_atom_mask[..., :-1, 2]
next_n_pos = pred_atom_positions[..., 1:, 0, :]
next_n_mask = pred_atom_mask[..., 1:, 0]
next_ca_pos = pred_atom_positions[..., 1:, 1, :]
next_ca_mask = pred_atom_mask[..., 1:, 1]
has_no_gap_mask = (residue_index[..., 1:] - residue_index[..., :-1]) == 1.0
# Compute loss for the C--N bond.
c_n_bond_length = torch.sqrt(
eps + torch.sum((this_c_pos - next_n_pos) ** 2, dim=-1)
)
# The C-N bond to proline has slightly different length because of the ring.
next_is_proline = aatype[..., 1:] == residue_constants.resname_to_idx["PRO"]
gt_length = (~next_is_proline) * residue_constants.between_res_bond_length_c_n[
0
] + next_is_proline * residue_constants.between_res_bond_length_c_n[1]
gt_stddev = (
~next_is_proline
) * residue_constants.between_res_bond_length_stddev_c_n[
0
] + next_is_proline * residue_constants.between_res_bond_length_stddev_c_n[
1
]
c_n_bond_length_error = torch.sqrt(eps + (c_n_bond_length - gt_length) ** 2)
c_n_loss_per_residue = torch.nn.functional.relu(
c_n_bond_length_error - tolerance_factor_soft * gt_stddev
)
mask = this_c_mask * next_n_mask * has_no_gap_mask
c_n_loss = torch.sum(mask * c_n_loss_per_residue, dim=-1) / (
torch.sum(mask, dim=-1) + eps
)
c_n_violation_mask = mask * (
c_n_bond_length_error > (tolerance_factor_hard * gt_stddev)
)
# Compute loss for the angles.
ca_c_bond_length = torch.sqrt(
eps + torch.sum((this_ca_pos - this_c_pos) ** 2, dim=-1)
)
n_ca_bond_length = torch.sqrt(
eps + torch.sum((next_n_pos - next_ca_pos) ** 2, dim=-1)
)
c_ca_unit_vec = (this_ca_pos - this_c_pos) / ca_c_bond_length[..., None]
c_n_unit_vec = (next_n_pos - this_c_pos) / c_n_bond_length[..., None]
n_ca_unit_vec = (next_ca_pos - next_n_pos) / n_ca_bond_length[..., None]
ca_c_n_cos_angle = torch.sum(c_ca_unit_vec * c_n_unit_vec, dim=-1)
gt_angle = residue_constants.between_res_cos_angles_ca_c_n[0]
gt_stddev = residue_constants.between_res_bond_length_stddev_c_n[0]
ca_c_n_cos_angle_error = torch.sqrt(eps + (ca_c_n_cos_angle - gt_angle) ** 2)
ca_c_n_loss_per_residue = torch.nn.functional.relu(
ca_c_n_cos_angle_error - tolerance_factor_soft * gt_stddev
)
mask = this_ca_mask * this_c_mask * next_n_mask * has_no_gap_mask
ca_c_n_loss = torch.sum(mask * ca_c_n_loss_per_residue, dim=-1) / (
torch.sum(mask, dim=-1) + eps
)
ca_c_n_violation_mask = mask * (
ca_c_n_cos_angle_error > (tolerance_factor_hard * gt_stddev)
)
c_n_ca_cos_angle = torch.sum((-c_n_unit_vec) * n_ca_unit_vec, dim=-1)
gt_angle = residue_constants.between_res_cos_angles_c_n_ca[0]
gt_stddev = residue_constants.between_res_cos_angles_c_n_ca[1]
c_n_ca_cos_angle_error = torch.sqrt(eps + torch.square(c_n_ca_cos_angle - gt_angle))
c_n_ca_loss_per_residue = torch.nn.functional.relu(
c_n_ca_cos_angle_error - tolerance_factor_soft * gt_stddev
)
mask = this_c_mask * next_n_mask * next_ca_mask * has_no_gap_mask
c_n_ca_loss = torch.sum(mask * c_n_ca_loss_per_residue, dim=-1) / (
torch.sum(mask, dim=-1) + eps
)
c_n_ca_violation_mask = mask * (
c_n_ca_cos_angle_error > (tolerance_factor_hard * gt_stddev)
)
# Compute a per residue loss (equally distribute the loss to both
# neighbouring residues).
per_residue_loss_sum = (
c_n_loss_per_residue + ca_c_n_loss_per_residue + c_n_ca_loss_per_residue
)
per_residue_loss_sum = 0.5 * (
torch.nn.functional.pad(per_residue_loss_sum, (0, 1))
+ torch.nn.functional.pad(per_residue_loss_sum, (1, 0))
)
# Compute hard violations.
violation_mask = torch.max(
torch.stack(
[c_n_violation_mask, ca_c_n_violation_mask, c_n_ca_violation_mask],
dim=-2,
),
dim=-2,
)[0]
violation_mask = torch.maximum(
torch.nn.functional.pad(violation_mask, (0, 1)),
torch.nn.functional.pad(violation_mask, (1, 0)),
)
return {
"c_n_loss_mean": c_n_loss,
"ca_c_n_loss_mean": ca_c_n_loss,
"c_n_ca_loss_mean": c_n_ca_loss,
"per_residue_loss_sum": per_residue_loss_sum,
"per_residue_violation_mask": violation_mask,
}
def between_residue_clash_loss(
atom14_pred_positions: torch.Tensor,
atom14_atom_exists: torch.Tensor,
atom14_atom_radius: torch.Tensor,
residue_index: torch.Tensor,
asym_id: Optional[torch.Tensor] = None,
overlap_tolerance_soft=1.5,
overlap_tolerance_hard=1.5,
eps=1e-10,
) -> Dict[str, torch.Tensor]:
"""Loss to penalize steric clashes between residues.
This is a loss penalizing any steric clashes due to non bonded atoms in
different peptides coming too close. This loss corresponds to the part with
different residues of
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 46.
Args:
atom14_pred_positions: Predicted positions of atoms in
global prediction frame
atom14_atom_exists: Mask denoting whether atom at positions exists for given
amino acid type
atom14_atom_radius: Van der Waals radius for each atom.
residue_index: Residue index for given amino acid.
overlap_tolerance_soft: Soft tolerance factor.
overlap_tolerance_hard: Hard tolerance factor.
Returns:
Dict containing:
* 'mean_loss': average clash loss
* 'per_atom_loss_sum': sum of all clash losses per atom, shape (N, 14)
* 'per_atom_clash_mask': mask whether atom clashes with any other atom
shape (N, 14)
"""
fp_type = atom14_pred_positions.dtype
# Create the distance matrix.
# (N, N, 14, 14)
dists = torch.sqrt(
eps
+ torch.sum(
(
atom14_pred_positions[..., :, None, :, None, :]
- atom14_pred_positions[..., None, :, None, :, :]
)
** 2,
dim=-1,
)
)
# Create the mask for valid distances.
# shape (N, N, 14, 14)
dists_mask = (
atom14_atom_exists[..., :, None, :, None]
* atom14_atom_exists[..., None, :, None, :]
).type(fp_type)
# Mask out all the duplicate entries in the lower triangular matrix.
# Also mask out the diagonal (atom-pairs from the same residue) -- these atoms
# are handled separately.
dists_mask = dists_mask * (
residue_index[..., :, None, None, None]
< residue_index[..., None, :, None, None]
)
# Backbone C--N bond between subsequent residues is no clash.
c_one_hot = torch.nn.functional.one_hot(residue_index.new_tensor(2), num_classes=14)
c_one_hot = c_one_hot.reshape(
*((1,) * len(residue_index.shape[:-1])), *c_one_hot.shape
)
c_one_hot = c_one_hot.type(fp_type)
n_one_hot = torch.nn.functional.one_hot(residue_index.new_tensor(0), num_classes=14)
n_one_hot = n_one_hot.reshape(
*((1,) * len(residue_index.shape[:-1])), *n_one_hot.shape
)
n_one_hot = n_one_hot.type(fp_type)
neighbour_mask = (residue_index[..., :, None] + 1) == residue_index[..., None, :]
if asym_id is not None:
neighbour_mask = neighbour_mask & (
asym_id[..., :, None] == asym_id[..., None, :]
)
neighbour_mask = neighbour_mask[..., None, None]
c_n_bonds = (
neighbour_mask
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | true |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/_feats.py | src/cheap/openfold_utils/_feats.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import torch
import torch.nn as nn
from typing import Any
from . import _residue_constants as rc
from ._rigids import Rotation, Rigid
from ._tensor_utils import batched_gather
def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
is_gly = aatype == rc.restype_order["G"]
ca_idx = rc.atom_order["CA"]
cb_idx = rc.atom_order["CB"]
pseudo_beta = torch.where(
is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3),
all_atom_positions[..., ca_idx, :],
all_atom_positions[..., cb_idx, :],
)
if all_atom_masks is not None:
pseudo_beta_mask = torch.where(
is_gly,
all_atom_masks[..., ca_idx],
all_atom_masks[..., cb_idx],
)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
def atom14_to_atom37(atom14, batch):
atom37_data = batched_gather(
atom14,
batch["residx_atom37_to_atom14"],
dim=-2,
no_batch_dims=len(atom14.shape[:-2]),
)
atom37_data = atom37_data * batch["atom37_atom_exists"][..., None]
return atom37_data
def dgram_from_positions(
pos: torch.Tensor,
min_bin: float = 3.25,
max_bin: float = 50.75,
no_bins: float = 39,
inf: float = 1e8,
):
dgram = torch.sum(
(pos[..., None, :] - pos[..., None, :, :]) ** 2, dim=-1, keepdim=True
)
lower = torch.linspace(min_bin, max_bin, no_bins, device=pos.device) ** 2
upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1)
dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype)
return dgram
def build_template_pair_feat(
batch, min_bin, max_bin, no_bins, use_unit_vector=False, eps=1e-20, inf=1e8
):
template_mask = batch["template_pseudo_beta_mask"]
template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
# Compute distogram (this seems to differ slightly from Alg. 5)
tpb = batch["template_pseudo_beta"]
dgram = dgram_from_positions(tpb, min_bin, max_bin, no_bins, inf)
to_concat = [dgram, template_mask_2d[..., None]]
aatype_one_hot = nn.functional.one_hot(
batch["template_aatype"],
rc.restype_num + 2,
)
n_res = batch["template_aatype"].shape[-1]
to_concat.append(
aatype_one_hot[..., None, :, :].expand(
*aatype_one_hot.shape[:-2], n_res, -1, -1
)
)
to_concat.append(
aatype_one_hot[..., None, :].expand(*aatype_one_hot.shape[:-2], -1, n_res, -1)
)
n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]]
rigids = Rigid.make_transform_from_reference(
n_xyz=batch["template_all_atom_positions"][..., n, :],
ca_xyz=batch["template_all_atom_positions"][..., ca, :],
c_xyz=batch["template_all_atom_positions"][..., c, :],
eps=eps,
)
points = rigids.get_trans()[..., None, :, :]
rigid_vec = rigids[..., None].invert_apply(points)
inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec**2, dim=-1))
t_aa_masks = batch["template_all_atom_mask"]
template_mask = t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c]
template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
inv_distance_scalar = inv_distance_scalar * template_mask_2d
unit_vector = rigid_vec * inv_distance_scalar[..., None]
if not use_unit_vector:
unit_vector = unit_vector * 0.0
to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1))
to_concat.append(template_mask_2d[..., None])
act = torch.cat(to_concat, dim=-1)
act = act * template_mask_2d[..., None]
return act
def build_extra_msa_feat(batch):
msa_1hot = nn.functional.one_hot(batch["extra_msa"], 23)
msa_feat = [
msa_1hot,
batch["extra_has_deletion"].unsqueeze(-1),
batch["extra_deletion_value"].unsqueeze(-1),
]
return torch.cat(msa_feat, dim=-1)
def torsion_angles_to_frames(
r: Any, # Union[Rigid, rigid_matrix_vector.Rigid3Array],
alpha: torch.Tensor,
aatype: torch.Tensor,
rrgdf: torch.Tensor,
):
rigid_type = type(r)
# [*, N, 8, 4, 4]
default_4x4 = rrgdf[aatype, ...]
# [*, N, 8] transformations, i.e.
# One [*, N, 8, 3, 3] rotation matrix and
# One [*, N, 8, 3] translation matrix
default_r = rigid_type.from_tensor_4x4(default_4x4)
bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))
bb_rot[..., 1] = 1
# [*, N, 8, 2]
alpha = torch.cat([bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2)
# [*, N, 8, 3, 3]
# Produces rotation matrices of the form:
# [
# [1, 0 , 0 ],
# [0, a_2,-a_1],
# [0, a_1, a_2]
# ]
# This follows the original code rather than the supplement, which uses
# different indices.
all_rots = alpha.new_zeros(default_r.shape + (4, 4))
all_rots[..., 0, 0] = 1
all_rots[..., 1, 1] = alpha[..., 1]
all_rots[..., 1, 2] = -alpha[..., 0]
all_rots[..., 2, 1:3] = alpha
all_rots = rigid_type.from_tensor_4x4(all_rots)
all_frames = default_r.compose(all_rots)
chi2_frame_to_frame = all_frames[..., 5]
chi3_frame_to_frame = all_frames[..., 6]
chi4_frame_to_frame = all_frames[..., 7]
chi1_frame_to_bb = all_frames[..., 4]
chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)
chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)
chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)
all_frames_to_bb = rigid_type.cat(
[
all_frames[..., :5],
chi2_frame_to_bb.unsqueeze(-1),
chi3_frame_to_bb.unsqueeze(-1),
chi4_frame_to_bb.unsqueeze(-1),
],
dim=-1,
)
all_frames_to_global = r[..., None].compose(all_frames_to_bb)
return all_frames_to_global
def frames_and_literature_positions_to_atom14_pos(
r: Any, # Union[Rigid, rigid_matrix_vector.Rigid3Array],
aatype: torch.Tensor,
default_frames,
group_idx,
atom_mask,
lit_positions,
):
# [*, N, 14, 4, 4]
default_4x4 = default_frames[aatype, ...]
# [*, N, 14]
group_mask = group_idx[aatype, ...]
# [*, N, 14, 8]
group_mask = nn.functional.one_hot(
group_mask,
num_classes=default_frames.shape[-3],
)
# [*, N, 14, 8]
t_atoms_to_global = r[..., None, :] * group_mask
# [*, N, 14]
t_atoms_to_global = t_atoms_to_global.map_tensor_fn(lambda x: torch.sum(x, dim=-1))
# [*, N, 14]
atom_mask = atom_mask[aatype, ...].unsqueeze(-1)
# [*, N, 14, 3]
lit_positions = lit_positions[aatype, ...]
pred_positions = t_atoms_to_global.apply(lit_positions)
pred_positions = pred_positions * atom_mask
return pred_positions
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/_tensor_utils.py | src/cheap/openfold_utils/_tensor_utils.py | import torch
from typing import List
from functools import partial
def batched_gather(data, inds, dim=0, no_batch_dims=0):
ranges = []
for i, s in enumerate(data.shape[:no_batch_dims]):
r = torch.arange(s)
r = r.view(*(*((1,) * i), -1, *((1,) * (len(inds.shape) - i - 1))))
ranges.append(r)
remaining_dims = [slice(None) for _ in range(len(data.shape) - no_batch_dims)]
remaining_dims[dim - no_batch_dims if dim >= 0 else dim] = inds
ranges.extend(remaining_dims)
return data[ranges]
def permute_final_dims(tensor: torch.Tensor, inds: List[int]):
zero_index = -1 * len(inds)
first_inds = list(range(len(tensor.shape[:zero_index])))
return tensor.permute(first_inds + [zero_index + i for i in inds])
def masked_mean(mask, value, dim, eps=1e-4):
mask = mask.expand(*value.shape)
return torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim))
# With tree_map, a poor man's JAX tree_map
def dict_map(fn, dic, leaf_type):
new_dict = {}
for k, v in dic.items():
if type(v) is dict:
new_dict[k] = dict_map(fn, v, leaf_type)
else:
new_dict[k] = tree_map(fn, v, leaf_type)
return new_dict
def tree_map(fn, tree, leaf_type):
if isinstance(tree, dict):
return dict_map(fn, tree, leaf_type)
elif isinstance(tree, list):
return [tree_map(fn, x, leaf_type) for x in tree]
elif isinstance(tree, tuple):
return tuple([tree_map(fn, x, leaf_type) for x in tree])
elif isinstance(tree, leaf_type):
return fn(tree)
else:
print(type(tree))
raise ValueError("Not supported")
tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor)
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/__init__.py | src/cheap/openfold_utils/__init__.py | from ._protein import Protein as OFProtein
from ._protein import from_pdb_string as protein_from_pdb_string
from ._protein import to_pdb as protein_to_pdb
from ._rigids import Rigid, Rotation
from ._tensor_utils import (
batched_gather,
permute_final_dims,
masked_mean,
tree_map,
tensor_tree_map,
)
from ._residue_constants import make_atom14_dists_bounds
from ._data_pipeline import make_pdb_features
from ._data_transforms import (
make_pseudo_beta,
make_seq_mask,
make_atom14_masks,
make_atom14_masks_np,
make_atom14_positions,
make_all_atom_aatype,
atom37_to_frames,
get_chi_atom_indices,
atom37_to_torsion_angles,
get_backbone_frames,
get_chi_angles,
)
from ._feats import atom14_to_atom37
from ._fape import (
compute_fape,
backbone_loss,
sidechain_loss,
make_default_alphafold_loss,
)
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/_data_pipeline.py | src/cheap/openfold_utils/_data_pipeline.py | from typing import Mapping
import numpy as np
from transformers.models.esm.openfold_utils import residue_constants
from . import OFProtein
FeatureDict = Mapping[str, np.ndarray]
def _aatype_to_str_sequence(aatype):
return "".join(
[residue_constants.restypes_with_x[aatype[i]] for i in range(len(aatype))]
)
def make_sequence_features(
sequence: str, description: str, num_res: int
) -> FeatureDict:
"""Construct a feature dict of sequence features."""
features = {}
features["aatype"] = residue_constants.sequence_to_onehot(
sequence=sequence,
mapping=residue_constants.restype_order_with_x,
map_unknown_to_x=True,
)
features["between_segment_residues"] = np.zeros((num_res,), dtype=np.int32)
features["domain_name"] = np.array([description.encode("utf-8")], dtype=np.object_)
features["residue_index"] = np.array(range(num_res), dtype=np.int32)
features["seq_length"] = np.array([num_res] * num_res, dtype=np.int32)
features["sequence"] = np.array([sequence.encode("utf-8")], dtype=np.object_)
return features
def make_protein_features(
protein_object: OFProtein,
description: str,
_is_distillation: bool = False,
) -> FeatureDict:
pdb_feats = {}
aatype = protein_object.aatype
sequence = _aatype_to_str_sequence(aatype)
pdb_feats.update(
make_sequence_features(
sequence=sequence,
description=description,
num_res=len(protein_object.aatype),
)
)
all_atom_positions = protein_object.atom_positions
all_atom_mask = protein_object.atom_mask
pdb_feats["all_atom_positions"] = all_atom_positions.astype(np.float32)
pdb_feats["all_atom_mask"] = all_atom_mask.astype(np.float32)
pdb_feats["resolution"] = np.array([0.0]).astype(np.float32)
pdb_feats["is_distillation"] = np.array(1.0 if _is_distillation else 0.0).astype(
np.float32
)
return pdb_feats
def make_pdb_features(
protein_object: OFProtein,
description: str,
is_distillation: bool = True,
confidence_threshold: float = 50.0,
) -> FeatureDict:
pdb_feats = make_protein_features(
protein_object, description, _is_distillation=True
)
if is_distillation:
high_confidence = protein_object.b_factors > confidence_threshold
high_confidence = np.any(high_confidence, axis=-1)
pdb_feats["all_atom_mask"] *= high_confidence[..., None]
return pdb_feats
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/_fape.py | src/cheap/openfold_utils/_fape.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import logging
from dataclasses import dataclass, field, asdict
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from torch.distributions.bernoulli import Bernoulli
from transformers.models.esm.openfold_utils import residue_constants
from . import (
Rigid,
Rotation,
tree_map,
tensor_tree_map,
masked_mean,
permute_final_dims,
make_atom14_dists_bounds,
)
from dataclasses import dataclass
"""
Configurations from OpenFold.
TODO: port over to Hydra.
"""
@dataclass
class DistogramConfig:
min_bin: float = 2.3125
max_bin: float = 21.6875
no_bins: int = 64
eps: float = 1e-6
weight: float = 0.3
@dataclass
class ExperimentallyResolvedConfig:
eps: float = 1e-8
min_resolution: float = 0.1
max_resolution: float = 3.0
weight: float = 0.0
@dataclass
class FapeBackboneConfig:
clamp_distance: float = 10.0
loss_unit_distance: float = 10.0
weight: float = 0.5
@dataclass
class FapeSidechainConfig:
clamp_distance: float = 10.0
length_scale: float = 10.0
weight: float = 0.5
@dataclass
class FapeConfig:
eps: float = 1e-4
weight: float = 1.0
backbone: FapeBackboneConfig = field(default_factory=FapeBackboneConfig)
sidechain: FapeSidechainConfig = field(default_factory=FapeSidechainConfig)
@dataclass
class PlddtLossConfig:
min_resolution: float = 0.1
max_resolution: float = 3.0
cutoff: float = 15.0
no_bins: int = 50
eps: float = 1e-10
weight: float = 0.01
@dataclass
class MaskedMSAConfig:
eps: float = 1e-8
weight: float = 2.0
@dataclass
class SupervisedChiConfig:
chi_weight: float = 0.5
angle_norm_weight: float = 0.01
eps: float = 1e-8
weight: float = 1.0
@dataclass
class ViolationConfig:
violation_tolerance_factor: float = 12.0
clash_overlap_tolerance: float = 1.5
eps: float = 1e-6
weight: float = 0.0
@dataclass
class TMConfig:
max_bin: int = 31
no_bins: int = 64
min_resolution: float = 0.1
max_resolution: float = 3.0
eps: float = 1e-8
weight: float = 0.0
enabled: bool = False
@dataclass
class LossConfig:
distogram: DistogramConfig = field(default_factory=DistogramConfig)
experimentally_resolved: ExperimentallyResolvedConfig = field(
default_factory=ExperimentallyResolvedConfig
)
fape: FapeConfig = field(default_factory=FapeConfig)
plddt_loss: PlddtLossConfig = field(default_factory=PlddtLossConfig)
masked_msa: MaskedMSAConfig = field(default_factory=MaskedMSAConfig)
supervised_chi: SupervisedChiConfig = field(default_factory=SupervisedChiConfig)
violation: ViolationConfig = field(default_factory=ViolationConfig)
tm: TMConfig = field(default_factory=TMConfig)
"""
Loss definitions
"""
def softmax_cross_entropy(logits, labels):
loss = -1 * torch.sum(
labels * torch.nn.functional.log_softmax(logits, dim=-1),
dim=-1,
)
return loss
def sigmoid_cross_entropy(logits, labels):
logits_dtype = logits.dtype
logits = logits.double()
labels = labels.double()
log_p = torch.nn.functional.logsigmoid(logits)
# log_p = torch.log(torch.sigmoid(logits))
log_not_p = torch.nn.functional.logsigmoid(-1 * logits)
# log_not_p = torch.log(torch.sigmoid(-logits))
loss = (-1.0 * labels) * log_p - (1.0 - labels) * log_not_p
loss = loss.to(dtype=logits_dtype)
return loss
def torsion_angle_loss(
a, # [*, N, 7, 2]
a_gt, # [*, N, 7, 2]
a_alt_gt, # [*, N, 7, 2]
):
# [*, N, 7]
norm = torch.norm(a, dim=-1)
# [*, N, 7, 2]
a = a / norm.unsqueeze(-1)
# [*, N, 7]
diff_norm_gt = torch.norm(a - a_gt, dim=-1)
diff_norm_alt_gt = torch.norm(a - a_alt_gt, dim=-1)
min_diff = torch.minimum(diff_norm_gt**2, diff_norm_alt_gt**2)
# [*]
l_torsion = torch.mean(min_diff, dim=(-1, -2))
l_angle_norm = torch.mean(torch.abs(norm - 1), dim=(-1, -2))
an_weight = 0.02
return l_torsion + an_weight * l_angle_norm
def compute_fape(
pred_frames: Rigid,
target_frames: Rigid,
frames_mask: torch.Tensor,
pred_positions: torch.Tensor,
target_positions: torch.Tensor,
positions_mask: torch.Tensor,
length_scale: float,
l1_clamp_distance: Optional[float] = None,
eps=1e-8,
) -> torch.Tensor:
"""
Computes FAPE loss.
Args:
pred_frames:
[*, N_frames] Rigid object of predicted frames
target_frames:
[*, N_frames] Rigid object of ground truth frames
frames_mask:
[*, N_frames] binary mask for the frames
pred_positions:
[*, N_pts, 3] predicted atom positions
target_positions:
[*, N_pts, 3] ground truth positions
positions_mask:
[*, N_pts] positions mask
length_scale:
Length scale by which the loss is divided
l1_clamp_distance:
Cutoff above which distance errors are disregarded
eps:
Small value used to regularize denominators
Returns:
[*] loss tensor
"""
# [*, N_frames, N_pts, 3]
local_pred_pos = pred_frames.invert()[..., None].apply(
pred_positions[..., None, :, :],
)
local_target_pos = target_frames.invert()[..., None].apply(
target_positions[..., None, :, :],
)
error_dist = torch.sqrt(
torch.sum((local_pred_pos - local_target_pos) ** 2, dim=-1) + eps
)
if l1_clamp_distance is not None:
error_dist = torch.clamp(error_dist, min=0, max=l1_clamp_distance)
normed_error = error_dist / length_scale
normed_error = normed_error * frames_mask[..., None]
normed_error = normed_error * positions_mask[..., None, :]
# FP16-friendly averaging. Roughly equivalent to:
#
# norm_factor = (
# torch.sum(frames_mask, dim=-1) *
# torch.sum(positions_mask, dim=-1)
# )
# normed_error = torch.sum(normed_error, dim=(-1, -2)) / (eps + norm_factor)
#
# ("roughly" because eps is necessarily duplicated in the latter)
normed_error = torch.sum(normed_error, dim=-1)
normed_error = normed_error / (eps + torch.sum(frames_mask, dim=-1))[..., None]
normed_error = torch.sum(normed_error, dim=-1)
normed_error = normed_error / (eps + torch.sum(positions_mask, dim=-1))
return normed_error
def backbone_loss(
backbone_rigid_tensor: torch.Tensor,
backbone_rigid_mask: torch.Tensor,
traj: torch.Tensor,
use_clamped_fape: Optional[torch.Tensor] = None,
clamp_distance: float = 10.0,
loss_unit_distance: float = 10.0,
eps: float = 1e-4,
**kwargs,
) -> torch.Tensor:
pred_aff = Rigid.from_tensor_7(traj)
pred_aff = Rigid(
Rotation(rot_mats=pred_aff.get_rots().get_rot_mats(), quats=None),
pred_aff.get_trans(),
)
# DISCREPANCY: DeepMind somehow gets a hold of a tensor_7 version of
# backbone tensor, normalizes it, and then turns it back to a rotation
# matrix. To avoid a potentially numerically unstable rotation matrix
# to quaternion conversion, we just use the original rotation matrix
# outright. This one hasn't been composed a bunch of times, though, so
# it might be fine.
gt_aff = Rigid.from_tensor_4x4(backbone_rigid_tensor)
fape_loss = compute_fape(
pred_aff,
gt_aff[None],
backbone_rigid_mask[None],
pred_aff.get_trans(),
gt_aff[None].get_trans(),
backbone_rigid_mask[None],
l1_clamp_distance=clamp_distance,
length_scale=loss_unit_distance,
eps=eps,
)
if use_clamped_fape is not None:
unclamped_fape_loss = compute_fape(
pred_aff,
gt_aff[None],
backbone_rigid_mask[None],
pred_aff.get_trans(),
gt_aff[None].get_trans(),
backbone_rigid_mask[None],
l1_clamp_distance=None,
length_scale=loss_unit_distance,
eps=eps,
)
fape_loss = fape_loss * use_clamped_fape + unclamped_fape_loss * (
1 - use_clamped_fape
)
# Average over the batch dimension
fape_loss = torch.mean(fape_loss)
return fape_loss
def sidechain_loss(
sidechain_frames: torch.Tensor,
sidechain_atom_pos: torch.Tensor,
rigidgroups_gt_frames: torch.Tensor,
rigidgroups_alt_gt_frames: torch.Tensor,
rigidgroups_gt_exists: torch.Tensor,
renamed_atom14_gt_positions: torch.Tensor,
renamed_atom14_gt_exists: torch.Tensor,
alt_naming_is_better: torch.Tensor,
clamp_distance: float = 10.0,
length_scale: float = 10.0,
eps: float = 1e-4,
**kwargs,
) -> torch.Tensor:
renamed_gt_frames = (
1.0 - alt_naming_is_better[..., None, None, None]
) * rigidgroups_gt_frames + alt_naming_is_better[
..., None, None, None
] * rigidgroups_alt_gt_frames
# Steamroll the inputs
sidechain_frames = sidechain_frames[-1]
batch_dims = sidechain_frames.shape[:-4]
sidechain_frames = sidechain_frames.view(*batch_dims, -1, 4, 4)
sidechain_frames = Rigid.from_tensor_4x4(sidechain_frames)
renamed_gt_frames = renamed_gt_frames.view(*batch_dims, -1, 4, 4)
renamed_gt_frames = Rigid.from_tensor_4x4(renamed_gt_frames)
rigidgroups_gt_exists = rigidgroups_gt_exists.reshape(*batch_dims, -1)
sidechain_atom_pos = sidechain_atom_pos[-1]
sidechain_atom_pos = sidechain_atom_pos.view(*batch_dims, -1, 3)
renamed_atom14_gt_positions = renamed_atom14_gt_positions.view(*batch_dims, -1, 3)
renamed_atom14_gt_exists = renamed_atom14_gt_exists.view(*batch_dims, -1)
fape = compute_fape(
sidechain_frames,
renamed_gt_frames,
rigidgroups_gt_exists,
sidechain_atom_pos,
renamed_atom14_gt_positions,
renamed_atom14_gt_exists,
l1_clamp_distance=clamp_distance,
length_scale=length_scale,
eps=eps,
)
return fape
def fape_loss(
out: Dict[str, torch.Tensor],
batch: Dict[str, torch.Tensor],
config: LossConfig,
) -> torch.Tensor:
bb_loss = backbone_loss(
traj=out["sm"]["frames"],
**{**batch, **asdict(config.backbone)},
)
sc_loss = sidechain_loss(
out["sm"]["sidechain_frames"],
out["sm"]["positions"],
**{**batch, **asdict(config.sidechain)},
)
loss = config.backbone.weight * bb_loss + config.sidechain.weight * sc_loss
# Average over the batch dimension
loss = torch.mean(loss)
return loss
def supervised_chi_loss(
angles_sin_cos: torch.Tensor,
unnormalized_angles_sin_cos: torch.Tensor,
aatype: torch.Tensor,
seq_mask: torch.Tensor,
chi_mask: torch.Tensor,
chi_angles_sin_cos: torch.Tensor,
chi_weight: float,
angle_norm_weight: float,
eps=1e-6,
**kwargs,
) -> torch.Tensor:
"""
Implements Algorithm 27 (torsionAngleLoss)
Args:
angles_sin_cos:
[*, N, 7, 2] predicted angles
unnormalized_angles_sin_cos:
The same angles, but unnormalized
aatype:
[*, N] residue indices
seq_mask:
[*, N] sequence mask
chi_mask:
[*, N, 7] angle mask
chi_angles_sin_cos:
[*, N, 7, 2] ground truth angles
chi_weight:
Weight for the angle component of the loss
angle_norm_weight:
Weight for the normalization component of the loss
Returns:
[*] loss tensor
"""
pred_angles = angles_sin_cos[..., 3:, :]
residue_type_one_hot = torch.nn.functional.one_hot(
aatype,
residue_constants.restype_num + 1,
)
chi_pi_periodic = torch.einsum(
"...ij,jk->ik",
residue_type_one_hot.type(angles_sin_cos.dtype),
angles_sin_cos.new_tensor(residue_constants.chi_pi_periodic),
)
true_chi = chi_angles_sin_cos[None]
shifted_mask = (1 - 2 * chi_pi_periodic).unsqueeze(-1)
true_chi_shifted = shifted_mask * true_chi
sq_chi_error = torch.sum((true_chi - pred_angles) ** 2, dim=-1)
sq_chi_error_shifted = torch.sum((true_chi_shifted - pred_angles) ** 2, dim=-1)
sq_chi_error = torch.minimum(sq_chi_error, sq_chi_error_shifted)
# The ol' switcheroo
sq_chi_error = sq_chi_error.permute(
*range(len(sq_chi_error.shape))[1:-2], 0, -2, -1
)
sq_chi_loss = masked_mean(chi_mask[..., None, :, :], sq_chi_error, dim=(-1, -2, -3))
loss = chi_weight * sq_chi_loss
angle_norm = torch.sqrt(torch.sum(unnormalized_angles_sin_cos**2, dim=-1) + eps)
norm_error = torch.abs(angle_norm - 1.0)
norm_error = norm_error.permute(*range(len(norm_error.shape))[1:-2], 0, -2, -1)
angle_norm_loss = masked_mean(
seq_mask[..., None, :, None], norm_error, dim=(-1, -2, -3)
)
loss = loss + angle_norm_weight * angle_norm_loss
# Average over the batch dimension
loss = torch.mean(loss)
return loss
def compute_plddt(logits: torch.Tensor) -> torch.Tensor:
num_bins = logits.shape[-1]
bin_width = 1.0 / num_bins
bounds = torch.arange(
start=0.5 * bin_width, end=1.0, step=bin_width, device=logits.device
)
probs = torch.nn.functional.softmax(logits, dim=-1)
pred_lddt_ca = torch.sum(
probs * bounds.view(*((1,) * len(probs.shape[:-1])), *bounds.shape),
dim=-1,
)
return pred_lddt_ca * 100
def lddt(
all_atom_pred_pos: torch.Tensor,
all_atom_positions: torch.Tensor,
all_atom_mask: torch.Tensor,
cutoff: float = 15.0,
eps: float = 1e-10,
per_residue: bool = True,
) -> torch.Tensor:
n = all_atom_mask.shape[-2]
dmat_true = torch.sqrt(
eps
+ torch.sum(
(all_atom_positions[..., None, :] - all_atom_positions[..., None, :, :])
** 2,
dim=-1,
)
)
dmat_pred = torch.sqrt(
eps
+ torch.sum(
(all_atom_pred_pos[..., None, :] - all_atom_pred_pos[..., None, :, :]) ** 2,
dim=-1,
)
)
dists_to_score = (
(dmat_true < cutoff)
* all_atom_mask
* permute_final_dims(all_atom_mask, (1, 0))
* (1.0 - torch.eye(n, device=all_atom_mask.device))
)
dist_l1 = torch.abs(dmat_true - dmat_pred)
score = (
(dist_l1 < 0.5).type(dist_l1.dtype)
+ (dist_l1 < 1.0).type(dist_l1.dtype)
+ (dist_l1 < 2.0).type(dist_l1.dtype)
+ (dist_l1 < 4.0).type(dist_l1.dtype)
)
score = score * 0.25
dims = (-1,) if per_residue else (-2, -1)
norm = 1.0 / (eps + torch.sum(dists_to_score, dim=dims))
score = norm * (eps + torch.sum(dists_to_score * score, dim=dims))
return score
def lddt_ca(
all_atom_pred_pos: torch.Tensor,
all_atom_positions: torch.Tensor,
all_atom_mask: torch.Tensor,
cutoff: float = 15.0,
eps: float = 1e-10,
per_residue: bool = True,
) -> torch.Tensor:
ca_pos = residue_constants.atom_order["CA"]
all_atom_pred_pos = all_atom_pred_pos[..., ca_pos, :]
all_atom_positions = all_atom_positions[..., ca_pos, :]
all_atom_mask = all_atom_mask[..., ca_pos : (ca_pos + 1)] # keep dim
return lddt(
all_atom_pred_pos,
all_atom_positions,
all_atom_mask,
cutoff=cutoff,
eps=eps,
per_residue=per_residue,
)
def lddt_loss(
logits: torch.Tensor,
all_atom_pred_pos: torch.Tensor,
all_atom_positions: torch.Tensor,
all_atom_mask: torch.Tensor,
resolution: torch.Tensor,
cutoff: float = 15.0,
no_bins: int = 50,
min_resolution: float = 0.1,
max_resolution: float = 3.0,
eps: float = 1e-10,
**kwargs,
) -> torch.Tensor:
n = all_atom_mask.shape[-2]
ca_pos = residue_constants.atom_order["CA"]
all_atom_pred_pos = all_atom_pred_pos[..., ca_pos, :]
all_atom_positions = all_atom_positions[..., ca_pos, :]
all_atom_mask = all_atom_mask[..., ca_pos : (ca_pos + 1)] # keep dim
score = lddt(
all_atom_pred_pos, all_atom_positions, all_atom_mask, cutoff=cutoff, eps=eps
)
score = score.detach()
bin_index = torch.floor(score * no_bins).long()
bin_index = torch.clamp(bin_index, max=(no_bins - 1))
lddt_ca_one_hot = torch.nn.functional.one_hot(bin_index, num_classes=no_bins)
errors = softmax_cross_entropy(logits, lddt_ca_one_hot)
all_atom_mask = all_atom_mask.squeeze(-1)
loss = torch.sum(errors * all_atom_mask, dim=-1) / (
eps + torch.sum(all_atom_mask, dim=-1)
)
loss = loss * ((resolution >= min_resolution) & (resolution <= max_resolution))
# Average over the batch dimension
loss = torch.mean(loss)
return loss
def distogram_loss(
logits,
pseudo_beta,
pseudo_beta_mask,
min_bin=2.3125,
max_bin=21.6875,
no_bins=64,
eps=1e-6,
**kwargs,
):
boundaries = torch.linspace(
min_bin,
max_bin,
no_bins - 1,
device=logits.device,
)
boundaries = boundaries**2
dists = torch.sum(
(pseudo_beta[..., None, :] - pseudo_beta[..., None, :, :]) ** 2,
dim=-1,
keepdims=True,
)
true_bins = torch.sum(dists > boundaries, dim=-1)
errors = softmax_cross_entropy(
logits,
torch.nn.functional.one_hot(true_bins, no_bins),
)
square_mask = pseudo_beta_mask[..., None] * pseudo_beta_mask[..., None, :]
# FP16-friendly sum. Equivalent to:
# mean = (torch.sum(errors * square_mask, dim=(-1, -2)) /
# (eps + torch.sum(square_mask, dim=(-1, -2))))
denom = eps + torch.sum(square_mask, dim=(-1, -2))
mean = errors * square_mask
mean = torch.sum(mean, dim=-1)
mean = mean / denom[..., None]
mean = torch.sum(mean, dim=-1)
# Average over the batch dimensions
mean = torch.mean(mean)
return mean
def _calculate_bin_centers(boundaries: torch.Tensor):
step = boundaries[1] - boundaries[0]
bin_centers = boundaries + step / 2
bin_centers = torch.cat(
[bin_centers, (bin_centers[-1] + step).unsqueeze(-1)], dim=0
)
return bin_centers
def _calculate_expected_aligned_error(
alignment_confidence_breaks: torch.Tensor,
aligned_distance_error_probs: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
bin_centers = _calculate_bin_centers(alignment_confidence_breaks)
return (
torch.sum(aligned_distance_error_probs * bin_centers, dim=-1),
bin_centers[-1],
)
def compute_predicted_aligned_error(
logits: torch.Tensor,
max_bin: int = 31,
no_bins: int = 64,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Computes aligned confidence metrics from logits.
Args:
logits: [*, num_res, num_res, num_bins] the logits output from
PredictedAlignedErrorHead.
max_bin: Maximum bin value
no_bins: Number of bins
Returns:
aligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted
aligned error probabilities over bins for each residue pair.
predicted_aligned_error: [*, num_res, num_res] the expected aligned distance
error for each pair of residues.
max_predicted_aligned_error: [*] the maximum predicted error possible.
"""
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1)
(
predicted_aligned_error,
max_predicted_aligned_error,
) = _calculate_expected_aligned_error(
alignment_confidence_breaks=boundaries,
aligned_distance_error_probs=aligned_confidence_probs,
)
return {
"aligned_confidence_probs": aligned_confidence_probs,
"predicted_aligned_error": predicted_aligned_error,
"max_predicted_aligned_error": max_predicted_aligned_error,
}
def compute_tm(
logits: torch.Tensor,
residue_weights: Optional[torch.Tensor] = None,
max_bin: int = 31,
no_bins: int = 64,
eps: float = 1e-8,
**kwargs,
) -> torch.Tensor:
if residue_weights is None:
residue_weights = logits.new_ones(logits.shape[-2])
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
bin_centers = _calculate_bin_centers(boundaries)
clipped_n = max(torch.sum(residue_weights), 19)
d0 = 1.24 * (clipped_n - 15) ** (1.0 / 3) - 1.8
probs = torch.nn.functional.softmax(logits, dim=-1)
tm_per_bin = 1.0 / (1 + (bin_centers**2) / (d0**2))
predicted_tm_term = torch.sum(probs * tm_per_bin, dim=-1)
normed_residue_mask = residue_weights / (eps + residue_weights.sum())
per_alignment = torch.sum(predicted_tm_term * normed_residue_mask, dim=-1)
weighted = per_alignment * residue_weights
argmax = (weighted == torch.max(weighted)).nonzero()[0]
return per_alignment[tuple(argmax)]
def tm_loss(
logits,
final_affine_tensor,
backbone_rigid_tensor,
backbone_rigid_mask,
resolution,
max_bin=31,
no_bins=64,
min_resolution: float = 0.1,
max_resolution: float = 3.0,
eps=1e-8,
**kwargs,
):
pred_affine = Rigid.from_tensor_7(final_affine_tensor)
backbone_rigid = Rigid.from_tensor_4x4(backbone_rigid_tensor)
def _points(affine):
pts = affine.get_trans()[..., None, :, :]
return affine.invert()[..., None].apply(pts)
sq_diff = torch.sum((_points(pred_affine) - _points(backbone_rigid)) ** 2, dim=-1)
sq_diff = sq_diff.detach()
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
boundaries = boundaries**2
true_bins = torch.sum(sq_diff[..., None] > boundaries, dim=-1)
errors = softmax_cross_entropy(
logits, torch.nn.functional.one_hot(true_bins, no_bins)
)
square_mask = backbone_rigid_mask[..., None] * backbone_rigid_mask[..., None, :]
loss = torch.sum(errors * square_mask, dim=-1)
scale = 0.5 # hack to help FP16 training along
denom = eps + torch.sum(scale * square_mask, dim=(-1, -2))
loss = loss / denom[..., None]
loss = torch.sum(loss, dim=-1)
loss = loss * scale
loss = loss * ((resolution >= min_resolution) & (resolution <= max_resolution))
# Average over the loss dimension
loss = torch.mean(loss)
return loss
def between_residue_bond_loss(
pred_atom_positions: torch.Tensor, # (*, N, 37/14, 3)
pred_atom_mask: torch.Tensor, # (*, N, 37/14)
residue_index: torch.Tensor, # (*, N)
aatype: torch.Tensor, # (*, N)
tolerance_factor_soft=12.0,
tolerance_factor_hard=12.0,
eps=1e-6,
) -> Dict[str, torch.Tensor]:
"""Flat-bottom loss to penalize structural violations between residues.
This is a loss penalizing any violation of the geometry around the peptide
bond between consecutive amino acids. This loss corresponds to
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 44, 45.
Args:
pred_atom_positions: Atom positions in atom37/14 representation
pred_atom_mask: Atom mask in atom37/14 representation
residue_index: Residue index for given amino acid, this is assumed to be
monotonically increasing.
aatype: Amino acid type of given residue
tolerance_factor_soft: soft tolerance factor measured in standard deviations
of pdb distributions
tolerance_factor_hard: hard tolerance factor measured in standard deviations
of pdb distributions
Returns:
Dict containing:
* 'c_n_loss_mean': Loss for peptide bond length violations
* 'ca_c_n_loss_mean': Loss for violations of bond angle around C spanned
by CA, C, N
* 'c_n_ca_loss_mean': Loss for violations of bond angle around N spanned
by C, N, CA
* 'per_residue_loss_sum': sum of all losses for each residue
* 'per_residue_violation_mask': mask denoting all residues with violation
present.
"""
# Get the positions of the relevant backbone atoms.
this_ca_pos = pred_atom_positions[..., :-1, 1, :]
this_ca_mask = pred_atom_mask[..., :-1, 1]
this_c_pos = pred_atom_positions[..., :-1, 2, :]
this_c_mask = pred_atom_mask[..., :-1, 2]
next_n_pos = pred_atom_positions[..., 1:, 0, :]
next_n_mask = pred_atom_mask[..., 1:, 0]
next_ca_pos = pred_atom_positions[..., 1:, 1, :]
next_ca_mask = pred_atom_mask[..., 1:, 1]
has_no_gap_mask = (residue_index[..., 1:] - residue_index[..., :-1]) == 1.0
# Compute loss for the C--N bond.
c_n_bond_length = torch.sqrt(
eps + torch.sum((this_c_pos - next_n_pos) ** 2, dim=-1)
)
# The C-N bond to proline has slightly different length because of the ring.
next_is_proline = aatype[..., 1:] == residue_constants.resname_to_idx["PRO"]
gt_length = (~next_is_proline) * residue_constants.between_res_bond_length_c_n[
0
] + next_is_proline * residue_constants.between_res_bond_length_c_n[1]
gt_stddev = (
~next_is_proline
) * residue_constants.between_res_bond_length_stddev_c_n[
0
] + next_is_proline * residue_constants.between_res_bond_length_stddev_c_n[
1
]
c_n_bond_length_error = torch.sqrt(eps + (c_n_bond_length - gt_length) ** 2)
c_n_loss_per_residue = torch.nn.functional.relu(
c_n_bond_length_error - tolerance_factor_soft * gt_stddev
)
mask = this_c_mask * next_n_mask * has_no_gap_mask
c_n_loss = torch.sum(mask * c_n_loss_per_residue, dim=-1) / (
torch.sum(mask, dim=-1) + eps
)
c_n_violation_mask = mask * (
c_n_bond_length_error > (tolerance_factor_hard * gt_stddev)
)
# Compute loss for the angles.
ca_c_bond_length = torch.sqrt(
eps + torch.sum((this_ca_pos - this_c_pos) ** 2, dim=-1)
)
n_ca_bond_length = torch.sqrt(
eps + torch.sum((next_n_pos - next_ca_pos) ** 2, dim=-1)
)
c_ca_unit_vec = (this_ca_pos - this_c_pos) / ca_c_bond_length[..., None]
c_n_unit_vec = (next_n_pos - this_c_pos) / c_n_bond_length[..., None]
n_ca_unit_vec = (next_ca_pos - next_n_pos) / n_ca_bond_length[..., None]
ca_c_n_cos_angle = torch.sum(c_ca_unit_vec * c_n_unit_vec, dim=-1)
gt_angle = residue_constants.between_res_cos_angles_ca_c_n[0]
gt_stddev = residue_constants.between_res_bond_length_stddev_c_n[0]
ca_c_n_cos_angle_error = torch.sqrt(eps + (ca_c_n_cos_angle - gt_angle) ** 2)
ca_c_n_loss_per_residue = torch.nn.functional.relu(
ca_c_n_cos_angle_error - tolerance_factor_soft * gt_stddev
)
mask = this_ca_mask * this_c_mask * next_n_mask * has_no_gap_mask
ca_c_n_loss = torch.sum(mask * ca_c_n_loss_per_residue, dim=-1) / (
torch.sum(mask, dim=-1) + eps
)
ca_c_n_violation_mask = mask * (
ca_c_n_cos_angle_error > (tolerance_factor_hard * gt_stddev)
)
c_n_ca_cos_angle = torch.sum((-c_n_unit_vec) * n_ca_unit_vec, dim=-1)
gt_angle = residue_constants.between_res_cos_angles_c_n_ca[0]
gt_stddev = residue_constants.between_res_cos_angles_c_n_ca[1]
c_n_ca_cos_angle_error = torch.sqrt(eps + torch.square(c_n_ca_cos_angle - gt_angle))
c_n_ca_loss_per_residue = torch.nn.functional.relu(
c_n_ca_cos_angle_error - tolerance_factor_soft * gt_stddev
)
mask = this_c_mask * next_n_mask * next_ca_mask * has_no_gap_mask
c_n_ca_loss = torch.sum(mask * c_n_ca_loss_per_residue, dim=-1) / (
torch.sum(mask, dim=-1) + eps
)
c_n_ca_violation_mask = mask * (
c_n_ca_cos_angle_error > (tolerance_factor_hard * gt_stddev)
)
# Compute a per residue loss (equally distribute the loss to both
# neighbouring residues).
per_residue_loss_sum = (
c_n_loss_per_residue + ca_c_n_loss_per_residue + c_n_ca_loss_per_residue
)
per_residue_loss_sum = 0.5 * (
torch.nn.functional.pad(per_residue_loss_sum, (0, 1))
+ torch.nn.functional.pad(per_residue_loss_sum, (1, 0))
)
# Compute hard violations.
violation_mask = torch.max(
torch.stack(
[c_n_violation_mask, ca_c_n_violation_mask, c_n_ca_violation_mask],
dim=-2,
),
dim=-2,
)[0]
violation_mask = torch.maximum(
torch.nn.functional.pad(violation_mask, (0, 1)),
torch.nn.functional.pad(violation_mask, (1, 0)),
)
return {
"c_n_loss_mean": c_n_loss,
"ca_c_n_loss_mean": ca_c_n_loss,
"c_n_ca_loss_mean": c_n_ca_loss,
"per_residue_loss_sum": per_residue_loss_sum,
"per_residue_violation_mask": violation_mask,
}
def between_residue_clash_loss(
atom14_pred_positions: torch.Tensor,
atom14_atom_exists: torch.Tensor,
atom14_atom_radius: torch.Tensor,
residue_index: torch.Tensor,
overlap_tolerance_soft=1.5,
overlap_tolerance_hard=1.5,
eps=1e-10,
) -> Dict[str, torch.Tensor]:
"""Loss to penalize steric clashes between residues.
This is a loss penalizing any steric clashes due to non bonded atoms in
different peptides coming too close. This loss corresponds to the part with
different residues of
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 46.
Args:
atom14_pred_positions: Predicted positions of atoms in
global prediction frame
atom14_atom_exists: Mask denoting whether atom at positions exists for given
amino acid type
atom14_atom_radius: Van der Waals radius for each atom.
residue_index: Residue index for given amino acid.
overlap_tolerance_soft: Soft tolerance factor.
overlap_tolerance_hard: Hard tolerance factor.
Returns:
Dict containing:
* 'mean_loss': average clash loss
* 'per_atom_loss_sum': sum of all clash losses per atom, shape (N, 14)
* 'per_atom_clash_mask': mask whether atom clashes with any other atom
shape (N, 14)
"""
fp_type = atom14_pred_positions.dtype
# Create the distance matrix.
# (N, N, 14, 14)
dists = torch.sqrt(
eps
+ torch.sum(
(
atom14_pred_positions[..., :, None, :, None, :]
- atom14_pred_positions[..., None, :, None, :, :]
)
** 2,
dim=-1,
)
)
# Create the mask for valid distances.
# shape (N, N, 14, 14)
dists_mask = (
atom14_atom_exists[..., :, None, :, None]
* atom14_atom_exists[..., None, :, None, :]
).type(fp_type)
# Mask out all the duplicate entries in the lower triangular matrix.
# Also mask out the diagonal (atom-pairs from the same residue) -- these atoms
# are handled separately.
dists_mask = dists_mask * (
residue_index[..., :, None, None, None]
< residue_index[..., None, :, None, None]
)
# Backbone C--N bond between subsequent residues is no clash.
c_one_hot = torch.nn.functional.one_hot(residue_index.new_tensor(2), num_classes=14)
c_one_hot = c_one_hot.reshape(
*((1,) * len(residue_index.shape[:-1])), *c_one_hot.shape
)
c_one_hot = c_one_hot.type(fp_type)
n_one_hot = torch.nn.functional.one_hot(residue_index.new_tensor(0), num_classes=14)
n_one_hot = n_one_hot.reshape(
*((1,) * len(residue_index.shape[:-1])), *n_one_hot.shape
)
n_one_hot = n_one_hot.type(fp_type)
neighbour_mask = (residue_index[..., :, None, None, None] + 1) == residue_index[
..., None, :, None, None
]
c_n_bonds = (
neighbour_mask
* c_one_hot[..., None, None, :, None]
* n_one_hot[..., None, None, None, :]
)
dists_mask = dists_mask * (1.0 - c_n_bonds)
# Disulfide bridge between two cysteines is no clash.
cys = residue_constants.restype_name_to_atom14_names["CYS"]
cys_sg_idx = cys.index("SG")
cys_sg_idx = residue_index.new_tensor(cys_sg_idx)
cys_sg_idx = cys_sg_idx.reshape(*((1,) * len(residue_index.shape[:-1])), 1).squeeze(
-1
)
cys_sg_one_hot = torch.nn.functional.one_hot(cys_sg_idx, num_classes=14)
disulfide_bonds = (
cys_sg_one_hot[..., None, None, :, None]
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | true |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/openfold_utils/_residue_constants.py | src/cheap/openfold_utils/_residue_constants.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used in AlphaFold."""
import collections
import functools
import os
from typing import Mapping, List, Tuple
from importlib import resources
import numpy as np
import tree
# Distance from one CA to next CA [trans configuration: omega = 180].
ca_ca = 3.80209737096
# Format: The list for each AA type contains chi1, chi2, chi3, chi4 in
# this order (or a relevant subset from chi1 onwards). ALA and GLY don't have
# chi angles so their chi angle lists are empty.
chi_angles_atoms = {
"ALA": [],
# Chi5 in arginine is always 0 +- 5 degrees, so ignore it.
"ARG": [
["N", "CA", "CB", "CG"],
["CA", "CB", "CG", "CD"],
["CB", "CG", "CD", "NE"],
["CG", "CD", "NE", "CZ"],
],
"ASN": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]],
"ASP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]],
"CYS": [["N", "CA", "CB", "SG"]],
"GLN": [
["N", "CA", "CB", "CG"],
["CA", "CB", "CG", "CD"],
["CB", "CG", "CD", "OE1"],
],
"GLU": [
["N", "CA", "CB", "CG"],
["CA", "CB", "CG", "CD"],
["CB", "CG", "CD", "OE1"],
],
"GLY": [],
"HIS": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "ND1"]],
"ILE": [["N", "CA", "CB", "CG1"], ["CA", "CB", "CG1", "CD1"]],
"LEU": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
"LYS": [
["N", "CA", "CB", "CG"],
["CA", "CB", "CG", "CD"],
["CB", "CG", "CD", "CE"],
["CG", "CD", "CE", "NZ"],
],
"MET": [
["N", "CA", "CB", "CG"],
["CA", "CB", "CG", "SD"],
["CB", "CG", "SD", "CE"],
],
"PHE": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
"PRO": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"]],
"SER": [["N", "CA", "CB", "OG"]],
"THR": [["N", "CA", "CB", "OG1"]],
"TRP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
"TYR": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
"VAL": [["N", "CA", "CB", "CG1"]],
}
# If chi angles given in fixed-length array, this matrix determines how to mask
# them for each AA type. The order is as per restype_order (see below).
chi_angles_mask = [
[0.0, 0.0, 0.0, 0.0], # ALA
[1.0, 1.0, 1.0, 1.0], # ARG
[1.0, 1.0, 0.0, 0.0], # ASN
[1.0, 1.0, 0.0, 0.0], # ASP
[1.0, 0.0, 0.0, 0.0], # CYS
[1.0, 1.0, 1.0, 0.0], # GLN
[1.0, 1.0, 1.0, 0.0], # GLU
[0.0, 0.0, 0.0, 0.0], # GLY
[1.0, 1.0, 0.0, 0.0], # HIS
[1.0, 1.0, 0.0, 0.0], # ILE
[1.0, 1.0, 0.0, 0.0], # LEU
[1.0, 1.0, 1.0, 1.0], # LYS
[1.0, 1.0, 1.0, 0.0], # MET
[1.0, 1.0, 0.0, 0.0], # PHE
[1.0, 1.0, 0.0, 0.0], # PRO
[1.0, 0.0, 0.0, 0.0], # SER
[1.0, 0.0, 0.0, 0.0], # THR
[1.0, 1.0, 0.0, 0.0], # TRP
[1.0, 1.0, 0.0, 0.0], # TYR
[1.0, 0.0, 0.0, 0.0], # VAL
]
# The following chi angles are pi periodic: they can be rotated by a multiple
# of pi without affecting the structure.
chi_pi_periodic = [
[0.0, 0.0, 0.0, 0.0], # ALA
[0.0, 0.0, 0.0, 0.0], # ARG
[0.0, 0.0, 0.0, 0.0], # ASN
[0.0, 1.0, 0.0, 0.0], # ASP
[0.0, 0.0, 0.0, 0.0], # CYS
[0.0, 0.0, 0.0, 0.0], # GLN
[0.0, 0.0, 1.0, 0.0], # GLU
[0.0, 0.0, 0.0, 0.0], # GLY
[0.0, 0.0, 0.0, 0.0], # HIS
[0.0, 0.0, 0.0, 0.0], # ILE
[0.0, 0.0, 0.0, 0.0], # LEU
[0.0, 0.0, 0.0, 0.0], # LYS
[0.0, 0.0, 0.0, 0.0], # MET
[0.0, 1.0, 0.0, 0.0], # PHE
[0.0, 0.0, 0.0, 0.0], # PRO
[0.0, 0.0, 0.0, 0.0], # SER
[0.0, 0.0, 0.0, 0.0], # THR
[0.0, 0.0, 0.0, 0.0], # TRP
[0.0, 1.0, 0.0, 0.0], # TYR
[0.0, 0.0, 0.0, 0.0], # VAL
[0.0, 0.0, 0.0, 0.0], # UNK
]
# Atoms positions relative to the 8 rigid groups, defined by the pre-omega, phi,
# psi and chi angles:
# 0: 'backbone group',
# 1: 'pre-omega-group', (empty)
# 2: 'phi-group', (currently empty, because it defines only hydrogens)
# 3: 'psi-group',
# 4,5,6,7: 'chi1,2,3,4-group'
# The atom positions are relative to the axis-end-atom of the corresponding
# rotation axis. The x-axis is in direction of the rotation axis, and the y-axis
# is defined such that the dihedral-angle-definiting atom (the last entry in
# chi_angles_atoms above) is in the xy-plane (with a positive y-coordinate).
# format: [atomname, group_idx, rel_position]
rigid_group_atom_positions = {
"ALA": [
["N", 0, (-0.525, 1.363, 0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.526, -0.000, -0.000)],
["CB", 0, (-0.529, -0.774, -1.205)],
["O", 3, (0.627, 1.062, 0.000)],
],
"ARG": [
["N", 0, (-0.524, 1.362, -0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.525, -0.000, -0.000)],
["CB", 0, (-0.524, -0.778, -1.209)],
["O", 3, (0.626, 1.062, 0.000)],
["CG", 4, (0.616, 1.390, -0.000)],
["CD", 5, (0.564, 1.414, 0.000)],
["NE", 6, (0.539, 1.357, -0.000)],
["NH1", 7, (0.206, 2.301, 0.000)],
["NH2", 7, (2.078, 0.978, -0.000)],
["CZ", 7, (0.758, 1.093, -0.000)],
],
"ASN": [
["N", 0, (-0.536, 1.357, 0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.526, -0.000, -0.000)],
["CB", 0, (-0.531, -0.787, -1.200)],
["O", 3, (0.625, 1.062, 0.000)],
["CG", 4, (0.584, 1.399, 0.000)],
["ND2", 5, (0.593, -1.188, 0.001)],
["OD1", 5, (0.633, 1.059, 0.000)],
],
"ASP": [
["N", 0, (-0.525, 1.362, -0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.527, 0.000, -0.000)],
["CB", 0, (-0.526, -0.778, -1.208)],
["O", 3, (0.626, 1.062, -0.000)],
["CG", 4, (0.593, 1.398, -0.000)],
["OD1", 5, (0.610, 1.091, 0.000)],
["OD2", 5, (0.592, -1.101, -0.003)],
],
"CYS": [
["N", 0, (-0.522, 1.362, -0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.524, 0.000, 0.000)],
["CB", 0, (-0.519, -0.773, -1.212)],
["O", 3, (0.625, 1.062, -0.000)],
["SG", 4, (0.728, 1.653, 0.000)],
],
"GLN": [
["N", 0, (-0.526, 1.361, -0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.526, 0.000, 0.000)],
["CB", 0, (-0.525, -0.779, -1.207)],
["O", 3, (0.626, 1.062, -0.000)],
["CG", 4, (0.615, 1.393, 0.000)],
["CD", 5, (0.587, 1.399, -0.000)],
["NE2", 6, (0.593, -1.189, -0.001)],
["OE1", 6, (0.634, 1.060, 0.000)],
],
"GLU": [
["N", 0, (-0.528, 1.361, 0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.526, -0.000, -0.000)],
["CB", 0, (-0.526, -0.781, -1.207)],
["O", 3, (0.626, 1.062, 0.000)],
["CG", 4, (0.615, 1.392, 0.000)],
["CD", 5, (0.600, 1.397, 0.000)],
["OE1", 6, (0.607, 1.095, -0.000)],
["OE2", 6, (0.589, -1.104, -0.001)],
],
"GLY": [
["N", 0, (-0.572, 1.337, 0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.517, -0.000, -0.000)],
["O", 3, (0.626, 1.062, -0.000)],
],
"HIS": [
["N", 0, (-0.527, 1.360, 0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.525, 0.000, 0.000)],
["CB", 0, (-0.525, -0.778, -1.208)],
["O", 3, (0.625, 1.063, 0.000)],
["CG", 4, (0.600, 1.370, -0.000)],
["CD2", 5, (0.889, -1.021, 0.003)],
["ND1", 5, (0.744, 1.160, -0.000)],
["CE1", 5, (2.030, 0.851, 0.002)],
["NE2", 5, (2.145, -0.466, 0.004)],
],
"ILE": [
["N", 0, (-0.493, 1.373, -0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.527, -0.000, -0.000)],
["CB", 0, (-0.536, -0.793, -1.213)],
["O", 3, (0.627, 1.062, -0.000)],
["CG1", 4, (0.534, 1.437, -0.000)],
["CG2", 4, (0.540, -0.785, -1.199)],
["CD1", 5, (0.619, 1.391, 0.000)],
],
"LEU": [
["N", 0, (-0.520, 1.363, 0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.525, -0.000, -0.000)],
["CB", 0, (-0.522, -0.773, -1.214)],
["O", 3, (0.625, 1.063, -0.000)],
["CG", 4, (0.678, 1.371, 0.000)],
["CD1", 5, (0.530, 1.430, -0.000)],
["CD2", 5, (0.535, -0.774, 1.200)],
],
"LYS": [
["N", 0, (-0.526, 1.362, -0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.526, 0.000, 0.000)],
["CB", 0, (-0.524, -0.778, -1.208)],
["O", 3, (0.626, 1.062, -0.000)],
["CG", 4, (0.619, 1.390, 0.000)],
["CD", 5, (0.559, 1.417, 0.000)],
["CE", 6, (0.560, 1.416, 0.000)],
["NZ", 7, (0.554, 1.387, 0.000)],
],
"MET": [
["N", 0, (-0.521, 1.364, -0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.525, 0.000, 0.000)],
["CB", 0, (-0.523, -0.776, -1.210)],
["O", 3, (0.625, 1.062, -0.000)],
["CG", 4, (0.613, 1.391, -0.000)],
["SD", 5, (0.703, 1.695, 0.000)],
["CE", 6, (0.320, 1.786, -0.000)],
],
"PHE": [
["N", 0, (-0.518, 1.363, 0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.524, 0.000, -0.000)],
["CB", 0, (-0.525, -0.776, -1.212)],
["O", 3, (0.626, 1.062, -0.000)],
["CG", 4, (0.607, 1.377, 0.000)],
["CD1", 5, (0.709, 1.195, -0.000)],
["CD2", 5, (0.706, -1.196, 0.000)],
["CE1", 5, (2.102, 1.198, -0.000)],
["CE2", 5, (2.098, -1.201, -0.000)],
["CZ", 5, (2.794, -0.003, -0.001)],
],
"PRO": [
["N", 0, (-0.566, 1.351, -0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.527, -0.000, 0.000)],
["CB", 0, (-0.546, -0.611, -1.293)],
["O", 3, (0.621, 1.066, 0.000)],
["CG", 4, (0.382, 1.445, 0.0)],
# ['CD', 5, (0.427, 1.440, 0.0)],
["CD", 5, (0.477, 1.424, 0.0)], # manually made angle 2 degrees larger
],
"SER": [
["N", 0, (-0.529, 1.360, -0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.525, -0.000, -0.000)],
["CB", 0, (-0.518, -0.777, -1.211)],
["O", 3, (0.626, 1.062, -0.000)],
["OG", 4, (0.503, 1.325, 0.000)],
],
"THR": [
["N", 0, (-0.517, 1.364, 0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.526, 0.000, -0.000)],
["CB", 0, (-0.516, -0.793, -1.215)],
["O", 3, (0.626, 1.062, 0.000)],
["CG2", 4, (0.550, -0.718, -1.228)],
["OG1", 4, (0.472, 1.353, 0.000)],
],
"TRP": [
["N", 0, (-0.521, 1.363, 0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.525, -0.000, 0.000)],
["CB", 0, (-0.523, -0.776, -1.212)],
["O", 3, (0.627, 1.062, 0.000)],
["CG", 4, (0.609, 1.370, -0.000)],
["CD1", 5, (0.824, 1.091, 0.000)],
["CD2", 5, (0.854, -1.148, -0.005)],
["CE2", 5, (2.186, -0.678, -0.007)],
["CE3", 5, (0.622, -2.530, -0.007)],
["NE1", 5, (2.140, 0.690, -0.004)],
["CH2", 5, (3.028, -2.890, -0.013)],
["CZ2", 5, (3.283, -1.543, -0.011)],
["CZ3", 5, (1.715, -3.389, -0.011)],
],
"TYR": [
["N", 0, (-0.522, 1.362, 0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.524, -0.000, -0.000)],
["CB", 0, (-0.522, -0.776, -1.213)],
["O", 3, (0.627, 1.062, -0.000)],
["CG", 4, (0.607, 1.382, -0.000)],
["CD1", 5, (0.716, 1.195, -0.000)],
["CD2", 5, (0.713, -1.194, -0.001)],
["CE1", 5, (2.107, 1.200, -0.002)],
["CE2", 5, (2.104, -1.201, -0.003)],
["OH", 5, (4.168, -0.002, -0.005)],
["CZ", 5, (2.791, -0.001, -0.003)],
],
"VAL": [
["N", 0, (-0.494, 1.373, -0.000)],
["CA", 0, (0.000, 0.000, 0.000)],
["C", 0, (1.527, -0.000, -0.000)],
["CB", 0, (-0.533, -0.795, -1.213)],
["O", 3, (0.627, 1.062, -0.000)],
["CG1", 4, (0.540, 1.429, -0.000)],
["CG2", 4, (0.533, -0.776, 1.203)],
],
}
# A list of atoms (excluding hydrogen) for each AA type. PDB naming convention.
residue_atoms = {
"ALA": ["C", "CA", "CB", "N", "O"],
"ARG": ["C", "CA", "CB", "CG", "CD", "CZ", "N", "NE", "O", "NH1", "NH2"],
"ASP": ["C", "CA", "CB", "CG", "N", "O", "OD1", "OD2"],
"ASN": ["C", "CA", "CB", "CG", "N", "ND2", "O", "OD1"],
"CYS": ["C", "CA", "CB", "N", "O", "SG"],
"GLU": ["C", "CA", "CB", "CG", "CD", "N", "O", "OE1", "OE2"],
"GLN": ["C", "CA", "CB", "CG", "CD", "N", "NE2", "O", "OE1"],
"GLY": ["C", "CA", "N", "O"],
"HIS": ["C", "CA", "CB", "CG", "CD2", "CE1", "N", "ND1", "NE2", "O"],
"ILE": ["C", "CA", "CB", "CG1", "CG2", "CD1", "N", "O"],
"LEU": ["C", "CA", "CB", "CG", "CD1", "CD2", "N", "O"],
"LYS": ["C", "CA", "CB", "CG", "CD", "CE", "N", "NZ", "O"],
"MET": ["C", "CA", "CB", "CG", "CE", "N", "O", "SD"],
"PHE": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "N", "O"],
"PRO": ["C", "CA", "CB", "CG", "CD", "N", "O"],
"SER": ["C", "CA", "CB", "N", "O", "OG"],
"THR": ["C", "CA", "CB", "CG2", "N", "O", "OG1"],
"TRP": [
"C",
"CA",
"CB",
"CG",
"CD1",
"CD2",
"CE2",
"CE3",
"CZ2",
"CZ3",
"CH2",
"N",
"NE1",
"O",
],
"TYR": [
"C",
"CA",
"CB",
"CG",
"CD1",
"CD2",
"CE1",
"CE2",
"CZ",
"N",
"O",
"OH",
],
"VAL": ["C", "CA", "CB", "CG1", "CG2", "N", "O"],
}
# Naming swaps for ambiguous atom names.
# Due to symmetries in the amino acids the naming of atoms is ambiguous in
# 4 of the 20 amino acids.
# (The LDDT paper lists 7 amino acids as ambiguous, but the naming ambiguities
# in LEU, VAL and ARG can be resolved by using the 3d constellations of
# the 'ambiguous' atoms and their neighbours)
# Because for LEU, VAL and ARG, no ambiguous exist when the prediction output is chi angle instead of the location of individual atoms.
# For the rest, ASP and others, when you rotate the bond 180 degree, you get the same configuraiton due to symmetry.
residue_atom_renaming_swaps = {
"ASP": {"OD1": "OD2"},
"GLU": {"OE1": "OE2"},
"PHE": {"CD1": "CD2", "CE1": "CE2"},
"TYR": {"CD1": "CD2", "CE1": "CE2"},
}
# Van der Waals radii [Angstroem] of the atoms (from Wikipedia)
van_der_waals_radius = {
"C": 1.7,
"N": 1.55,
"O": 1.52,
"S": 1.8,
}
Bond = collections.namedtuple("Bond", ["atom1_name", "atom2_name", "length", "stddev"])
BondAngle = collections.namedtuple(
"BondAngle",
["atom1_name", "atom2_name", "atom3name", "angle_rad", "stddev"],
)
@functools.lru_cache(maxsize=None)
def load_stereo_chemical_props() -> Tuple[
Mapping[str, List[Bond]],
Mapping[str, List[Bond]],
Mapping[str, List[BondAngle]],
]:
"""Load stereo_chemical_props.txt into a nice structure.
Load literature values for bond lengths and bond angles and translate
bond angles into the length of the opposite edge of the triangle
("residue_virtual_bonds").
Returns:
residue_bonds: Dict that maps resname -> list of Bond tuples
residue_virtual_bonds: Dict that maps resname -> list of Bond tuples
residue_bond_angles: Dict that maps resname -> list of BondAngle tuples
"""
# TODO: this file should be downloaded in a setup script
stereo_chemical_props = resources.read_text(
"openfold.resources", "stereo_chemical_props.txt"
)
lines_iter = iter(stereo_chemical_props.splitlines())
# Load bond lengths.
residue_bonds = {}
next(lines_iter) # Skip header line.
for line in lines_iter:
if line.strip() == "-":
break
bond, resname, length, stddev = line.split()
atom1, atom2 = bond.split("-")
if resname not in residue_bonds:
residue_bonds[resname] = []
residue_bonds[resname].append(Bond(atom1, atom2, float(length), float(stddev)))
residue_bonds["UNK"] = []
# Load bond angles.
residue_bond_angles = {}
next(lines_iter) # Skip empty line.
next(lines_iter) # Skip header line.
for line in lines_iter:
if line.strip() == "-":
break
bond, resname, angle_degree, stddev_degree = line.split()
atom1, atom2, atom3 = bond.split("-")
if resname not in residue_bond_angles:
residue_bond_angles[resname] = []
residue_bond_angles[resname].append(
BondAngle(
atom1,
atom2,
atom3,
float(angle_degree) / 180.0 * np.pi,
float(stddev_degree) / 180.0 * np.pi,
)
)
residue_bond_angles["UNK"] = []
def make_bond_key(atom1_name, atom2_name):
"""Unique key to lookup bonds."""
return "-".join(sorted([atom1_name, atom2_name]))
# Translate bond angles into distances ("virtual bonds").
residue_virtual_bonds = {}
for resname, bond_angles in residue_bond_angles.items():
# Create a fast lookup dict for bond lengths.
bond_cache = {}
for b in residue_bonds[resname]:
bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b
residue_virtual_bonds[resname] = []
for ba in bond_angles:
bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]
bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]
# Compute distance between atom1 and atom3 using the law of cosines
# c^2 = a^2 + b^2 - 2ab*cos(gamma).
gamma = ba.angle_rad
length = np.sqrt(
bond1.length**2
+ bond2.length**2
- 2 * bond1.length * bond2.length * np.cos(gamma)
)
# Propagation of uncertainty assuming uncorrelated errors.
dl_outer = 0.5 / length
dl_dgamma = (2 * bond1.length * bond2.length * np.sin(gamma)) * dl_outer
dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer
dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer
stddev = np.sqrt(
(dl_dgamma * ba.stddev) ** 2
+ (dl_db1 * bond1.stddev) ** 2
+ (dl_db2 * bond2.stddev) ** 2
)
residue_virtual_bonds[resname].append(
Bond(ba.atom1_name, ba.atom3name, length, stddev)
)
return (residue_bonds, residue_virtual_bonds, residue_bond_angles)
# Between-residue bond lengths for general bonds (first element) and for Proline
# (second element).
between_res_bond_length_c_n = [1.329, 1.341]
between_res_bond_length_stddev_c_n = [0.014, 0.016]
# Between-residue cos_angles.
between_res_cos_angles_c_n_ca = [-0.5203, 0.0353] # degrees: 121.352 +- 2.315
between_res_cos_angles_ca_c_n = [-0.4473, 0.0311] # degrees: 116.568 +- 1.995
# This mapping is used when we need to store atom data in a format that requires
# fixed atom data size for every residue (e.g. a numpy array).
atom_types = [
"N",
"CA",
"C",
"CB",
"O",
"CG",
"CG1",
"CG2",
"OG",
"OG1",
"SG",
"CD",
"CD1",
"CD2",
"ND1",
"ND2",
"OD1",
"OD2",
"SD",
"CE",
"CE1",
"CE2",
"CE3",
"NE",
"NE1",
"NE2",
"OE1",
"OE2",
"CH2",
"NH1",
"NH2",
"OH",
"CZ",
"CZ2",
"CZ3",
"NZ",
"OXT",
]
atom_order = {atom_type: i for i, atom_type in enumerate(atom_types)}
atom_type_num = len(atom_types) # := 37.
# A compact atom encoding with 14 columns
# pylint: disable=line-too-long
# pylint: disable=bad-whitespace
restype_name_to_atom14_names = {
"ALA": ["N", "CA", "C", "O", "CB", "", "", "", "", "", "", "", "", ""],
"ARG": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"CD",
"NE",
"CZ",
"NH1",
"NH2",
"",
"",
"",
],
"ASN": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"OD1",
"ND2",
"",
"",
"",
"",
"",
"",
],
"ASP": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"OD1",
"OD2",
"",
"",
"",
"",
"",
"",
],
"CYS": ["N", "CA", "C", "O", "CB", "SG", "", "", "", "", "", "", "", ""],
"GLN": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"CD",
"OE1",
"NE2",
"",
"",
"",
"",
"",
],
"GLU": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"CD",
"OE1",
"OE2",
"",
"",
"",
"",
"",
],
"GLY": ["N", "CA", "C", "O", "", "", "", "", "", "", "", "", "", ""],
"HIS": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"ND1",
"CD2",
"CE1",
"NE2",
"",
"",
"",
"",
],
"ILE": [
"N",
"CA",
"C",
"O",
"CB",
"CG1",
"CG2",
"CD1",
"",
"",
"",
"",
"",
"",
],
"LEU": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"CD1",
"CD2",
"",
"",
"",
"",
"",
"",
],
"LYS": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"CD",
"CE",
"NZ",
"",
"",
"",
"",
"",
],
"MET": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"SD",
"CE",
"",
"",
"",
"",
"",
"",
],
"PHE": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"CD1",
"CD2",
"CE1",
"CE2",
"CZ",
"",
"",
"",
],
"PRO": ["N", "CA", "C", "O", "CB", "CG", "CD", "", "", "", "", "", "", ""],
"SER": ["N", "CA", "C", "O", "CB", "OG", "", "", "", "", "", "", "", ""],
"THR": [
"N",
"CA",
"C",
"O",
"CB",
"OG1",
"CG2",
"",
"",
"",
"",
"",
"",
"",
],
"TRP": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"CD1",
"CD2",
"NE1",
"CE2",
"CE3",
"CZ2",
"CZ3",
"CH2",
],
"TYR": [
"N",
"CA",
"C",
"O",
"CB",
"CG",
"CD1",
"CD2",
"CE1",
"CE2",
"CZ",
"OH",
"",
"",
],
"VAL": [
"N",
"CA",
"C",
"O",
"CB",
"CG1",
"CG2",
"",
"",
"",
"",
"",
"",
"",
],
"UNK": ["", "", "", "", "", "", "", "", "", "", "", "", "", ""],
}
# pylint: enable=line-too-long
# pylint: enable=bad-whitespace
# This is the standard residue order when coding AA type as a number.
# Reproduce it by taking 3-letter AA codes and sorting them alphabetically.
restypes = [
"A",
"R",
"N",
"D",
"C",
"Q",
"E",
"G",
"H",
"I",
"L",
"K",
"M",
"F",
"P",
"S",
"T",
"W",
"Y",
"V",
]
restype_order = {restype: i for i, restype in enumerate(restypes)}
restype_num = len(restypes) # := 20.
unk_restype_index = restype_num # Catch-all index for unknown restypes.
restypes_with_x = restypes + ["X"]
restype_order_with_x = {restype: i for i, restype in enumerate(restypes_with_x)}
def sequence_to_onehot(
sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False
) -> np.ndarray:
"""Maps the given sequence into a one-hot encoded matrix.
Args:
sequence: An amino acid sequence.
mapping: A dictionary mapping amino acids to integers.
map_unknown_to_x: If True, any amino acid that is not in the mapping will be
mapped to the unknown amino acid 'X'. If the mapping doesn't contain
amino acid 'X', an error will be thrown. If False, any amino acid not in
the mapping will throw an error.
Returns:
A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of
the sequence.
Raises:
ValueError: If the mapping doesn't contain values from 0 to
num_unique_aas - 1 without any gaps.
"""
num_entries = max(mapping.values()) + 1
if sorted(set(mapping.values())) != list(range(num_entries)):
raise ValueError(
"The mapping must have values from 0 to num_unique_aas-1 "
"without any gaps. Got: %s" % sorted(mapping.values())
)
one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32)
for aa_index, aa_type in enumerate(sequence):
if map_unknown_to_x:
if aa_type.isalpha() and aa_type.isupper():
aa_id = mapping.get(aa_type, mapping["X"])
else:
raise ValueError(f"Invalid character in the sequence: {aa_type}")
else:
aa_id = mapping[aa_type]
one_hot_arr[aa_index, aa_id] = 1
return one_hot_arr
restype_1to3 = {
"A": "ALA",
"R": "ARG",
"N": "ASN",
"D": "ASP",
"C": "CYS",
"Q": "GLN",
"E": "GLU",
"G": "GLY",
"H": "HIS",
"I": "ILE",
"L": "LEU",
"K": "LYS",
"M": "MET",
"F": "PHE",
"P": "PRO",
"S": "SER",
"T": "THR",
"W": "TRP",
"Y": "TYR",
"V": "VAL",
}
# NB: restype_3to1 differs from Bio.PDB.protein_letters_3to1 by being a simple
# 1-to-1 mapping of 3 letter names to one letter names. The latter contains
# many more, and less common, three letter names as keys and maps many of these
# to the same one letter name (including 'X' and 'U' which we don't use here).
restype_3to1 = {v: k for k, v in restype_1to3.items()}
# Define a restype name for all unknown residues.
unk_restype = "UNK"
resnames = [restype_1to3[r] for r in restypes] + [unk_restype]
resname_to_idx = {resname: i for i, resname in enumerate(resnames)}
# The mapping here uses hhblits convention, so that B is mapped to D, J and O
# are mapped to X, U is mapped to C, and Z is mapped to E. Other than that the
# remaining 20 amino acids are kept in alphabetical order.
# There are 2 non-amino acid codes, X (representing any amino acid) and
# "-" representing a missing amino acid in an alignment. The id for these
# codes is put at the end (20 and 21) so that they can easily be ignored if
# desired.
HHBLITS_AA_TO_ID = {
"A": 0,
"B": 2,
"C": 1,
"D": 2,
"E": 3,
"F": 4,
"G": 5,
"H": 6,
"I": 7,
"J": 20,
"K": 8,
"L": 9,
"M": 10,
"N": 11,
"O": 20,
"P": 12,
"Q": 13,
"R": 14,
"S": 15,
"T": 16,
"U": 1,
"V": 17,
"W": 18,
"X": 20,
"Y": 19,
"Z": 3,
"-": 21,
}
# Partial inversion of HHBLITS_AA_TO_ID.
ID_TO_HHBLITS_AA = {
0: "A",
1: "C", # Also U.
2: "D", # Also B.
3: "E", # Also Z.
4: "F",
5: "G",
6: "H",
7: "I",
8: "K",
9: "L",
10: "M",
11: "N",
12: "P",
13: "Q",
14: "R",
15: "S",
16: "T",
17: "V",
18: "W",
19: "Y",
20: "X", # Includes J and O.
21: "-",
}
restypes_with_x_and_gap = restypes + ["X", "-"]
MAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(
restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])
for i in range(len(restypes_with_x_and_gap))
)
def _make_standard_atom_mask() -> np.ndarray:
"""Returns [num_res_types, num_atom_types] mask array."""
# +1 to account for unknown (all 0s).
mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32)
for restype, restype_letter in enumerate(restypes):
restype_name = restype_1to3[restype_letter]
atom_names = residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = atom_order[atom_name]
mask[restype, atom_type] = 1
return mask
STANDARD_ATOM_MASK = _make_standard_atom_mask()
# A one hot representation for the first and second atoms defining the axis
# of rotation for each chi-angle in each residue.
def chi_angle_atom(atom_index: int) -> np.ndarray:
"""Define chi-angle rigid groups via one-hot representations."""
chi_angles_index = {}
one_hots = []
for k, v in chi_angles_atoms.items():
indices = [atom_types.index(s[atom_index]) for s in v]
indices.extend([-1] * (4 - len(indices)))
chi_angles_index[k] = indices
for r in restypes:
res3 = restype_1to3[r]
one_hot = np.eye(atom_type_num)[chi_angles_index[res3]]
one_hots.append(one_hot)
one_hots.append(np.zeros([4, atom_type_num])) # Add zeros for residue `X`.
one_hot = np.stack(one_hots, axis=0)
one_hot = np.transpose(one_hot, [0, 2, 1])
return one_hot
chi_atom_1_one_hot = chi_angle_atom(1)
chi_atom_2_one_hot = chi_angle_atom(2)
# An array like chi_angles_atoms but using indices rather than names.
chi_angles_atom_indices = [chi_angles_atoms[restype_1to3[r]] for r in restypes]
chi_angles_atom_indices = tree.map_structure(
lambda atom_name: atom_order[atom_name], chi_angles_atom_indices
)
chi_angles_atom_indices = np.array(
[
chi_atoms + ([[0, 0, 0, 0]] * (4 - len(chi_atoms)))
for chi_atoms in chi_angles_atom_indices
]
)
# Mapping from (res_name, atom_name) pairs to the atom's chi group index
# and atom index within that group.
chi_groups_for_atom = collections.defaultdict(list)
for res_name, chi_angle_atoms_for_res in chi_angles_atoms.items():
for chi_group_i, chi_group in enumerate(chi_angle_atoms_for_res):
for atom_i, atom in enumerate(chi_group):
chi_groups_for_atom[(res_name, atom)].append((chi_group_i, atom_i))
chi_groups_for_atom = dict(chi_groups_for_atom)
def _make_rigid_transformation_4x4(ex, ey, translation):
"""Create a rigid 4x4 transformation matrix from two axes and transl."""
# Normalize ex.
ex_normalized = ex / np.linalg.norm(ex)
# make ey perpendicular to ex
ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized
ey_normalized /= np.linalg.norm(ey_normalized)
# compute ez as cross product
eznorm = np.cross(ex_normalized, ey_normalized)
m = np.stack([ex_normalized, ey_normalized, eznorm, translation]).transpose()
m = np.concatenate([m, [[0.0, 0.0, 0.0, 1.0]]], axis=0)
return m
# create an array with (restype, atomtype) --> rigid_group_idx
# and an array with (restype, atomtype, coord) for the atom positions
# and compute affine transformation matrices (4,4) from one rigid group to the
# previous group
restype_atom37_to_rigid_group = np.zeros([21, 37], dtype=int)
restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
restype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32)
restype_atom14_to_rigid_group = np.zeros([21, 14], dtype=int)
restype_atom14_mask = np.zeros([21, 14], dtype=np.float32)
restype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32)
restype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32)
def _make_rigid_group_constants():
"""Fill the arrays above."""
for restype, restype_letter in enumerate(restypes):
resname = restype_1to3[restype_letter]
for atomname, group_idx, atom_position in rigid_group_atom_positions[resname]:
atomtype = atom_order[atomname]
restype_atom37_to_rigid_group[restype, atomtype] = group_idx
restype_atom37_mask[restype, atomtype] = 1
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | true |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/esmfold/_tri_self_attn_block.py | src/cheap/esmfold/_tri_self_attn_block.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from openfold.model.triangular_attention import (
TriangleAttentionEndingNode,
TriangleAttentionStartingNode,
)
from openfold.model.triangular_multiplicative_update import (
TriangleMultiplicationIncoming,
TriangleMultiplicationOutgoing,
)
from torch import nn
from ._misc import Attention, Dropout, PairToSequence, ResidueMLP, SequenceToPair
class TriangularSelfAttentionBlock(nn.Module):
def __init__(
self,
sequence_state_dim,
pairwise_state_dim,
sequence_head_width,
pairwise_head_width,
dropout=0,
**__kwargs,
):
super().__init__()
assert sequence_state_dim % sequence_head_width == 0
assert pairwise_state_dim % pairwise_head_width == 0
sequence_num_heads = sequence_state_dim // sequence_head_width
pairwise_num_heads = pairwise_state_dim // pairwise_head_width
assert sequence_state_dim == sequence_num_heads * sequence_head_width
assert pairwise_state_dim == pairwise_num_heads * pairwise_head_width
assert pairwise_state_dim % 2 == 0
self.sequence_state_dim = sequence_state_dim
self.pairwise_state_dim = pairwise_state_dim
self.layernorm_1 = nn.LayerNorm(sequence_state_dim)
self.sequence_to_pair = SequenceToPair(
sequence_state_dim, pairwise_state_dim // 2, pairwise_state_dim
)
self.pair_to_sequence = PairToSequence(pairwise_state_dim, sequence_num_heads)
self.seq_attention = Attention(
sequence_state_dim, sequence_num_heads, sequence_head_width, gated=True
)
self.tri_mul_out = TriangleMultiplicationOutgoing(
pairwise_state_dim,
pairwise_state_dim,
)
self.tri_mul_in = TriangleMultiplicationIncoming(
pairwise_state_dim,
pairwise_state_dim,
)
self.tri_att_start = TriangleAttentionStartingNode(
pairwise_state_dim,
pairwise_head_width,
pairwise_num_heads,
inf=1e9,
) # type: ignore
self.tri_att_end = TriangleAttentionEndingNode(
pairwise_state_dim,
pairwise_head_width,
pairwise_num_heads,
inf=1e9,
) # type: ignore
self.mlp_seq = ResidueMLP(
sequence_state_dim, 4 * sequence_state_dim, dropout=dropout
)
self.mlp_pair = ResidueMLP(
pairwise_state_dim, 4 * pairwise_state_dim, dropout=dropout
)
assert dropout < 0.4
self.drop = nn.Dropout(dropout)
self.row_drop = Dropout(dropout * 2, 2)
self.col_drop = Dropout(dropout * 2, 1)
torch.nn.init.zeros_(self.tri_mul_in.linear_z.weight)
torch.nn.init.zeros_(self.tri_mul_in.linear_z.bias)
torch.nn.init.zeros_(self.tri_mul_out.linear_z.weight)
torch.nn.init.zeros_(self.tri_mul_out.linear_z.bias)
torch.nn.init.zeros_(self.tri_att_start.mha.linear_o.weight)
torch.nn.init.zeros_(self.tri_att_start.mha.linear_o.bias)
torch.nn.init.zeros_(self.tri_att_end.mha.linear_o.weight)
torch.nn.init.zeros_(self.tri_att_end.mha.linear_o.bias)
torch.nn.init.zeros_(self.sequence_to_pair.o_proj.weight)
torch.nn.init.zeros_(self.sequence_to_pair.o_proj.bias)
torch.nn.init.zeros_(self.pair_to_sequence.linear.weight)
torch.nn.init.zeros_(self.seq_attention.o_proj.weight)
torch.nn.init.zeros_(self.seq_attention.o_proj.bias)
torch.nn.init.zeros_(self.mlp_seq.mlp[-2].weight)
torch.nn.init.zeros_(self.mlp_seq.mlp[-2].bias)
torch.nn.init.zeros_(self.mlp_pair.mlp[-2].weight)
torch.nn.init.zeros_(self.mlp_pair.mlp[-2].bias)
def forward(
self, sequence_state, pairwise_state, mask=None, chunk_size=None, **__kwargs
):
"""
Inputs:
sequence_state: B x L x sequence_state_dim
pairwise_state: B x L x L x pairwise_state_dim
mask: B x L boolean tensor of valid positions
Output:
sequence_state: B x L x sequence_state_dim
pairwise_state: B x L x L x pairwise_state_dim
"""
assert len(sequence_state.shape) == 3
assert len(pairwise_state.shape) == 4
if mask is not None:
assert len(mask.shape) == 2
batch_dim, seq_dim, sequence_state_dim = sequence_state.shape
pairwise_state_dim = pairwise_state.shape[3]
assert sequence_state_dim == self.sequence_state_dim
assert pairwise_state_dim == self.pairwise_state_dim
assert batch_dim == pairwise_state.shape[0]
assert seq_dim == pairwise_state.shape[1]
assert seq_dim == pairwise_state.shape[2]
# Update sequence state
bias = self.pair_to_sequence(pairwise_state)
# Self attention with bias + mlp.
y = self.layernorm_1(sequence_state)
y, _ = self.seq_attention(y, mask=mask, bias=bias)
sequence_state = sequence_state + self.drop(y)
sequence_state = self.mlp_seq(sequence_state)
# Update pairwise state
pairwise_state = pairwise_state + self.sequence_to_pair(sequence_state)
# Axial attention with triangular bias.
tri_mask = mask.unsqueeze(2) * mask.unsqueeze(1) if mask is not None else None
pairwise_state = pairwise_state + self.row_drop(
self.tri_mul_out(pairwise_state, mask=tri_mask)
)
pairwise_state = pairwise_state + self.col_drop(
self.tri_mul_in(pairwise_state, mask=tri_mask)
)
pairwise_state = pairwise_state + self.row_drop(
self.tri_att_start(pairwise_state, mask=tri_mask, chunk_size=chunk_size)
)
pairwise_state = pairwise_state + self.col_drop(
self.tri_att_end(pairwise_state, mask=tri_mask, chunk_size=chunk_size)
)
# MLP over pairs.
pairwise_state = self.mlp_pair(pairwise_state)
return sequence_state, pairwise_state
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/esmfold/_trunk.py | src/cheap/esmfold/_trunk.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as T
from contextlib import ExitStack
from dataclasses import dataclass, field
import torch
import torch.nn as nn
# from openfold.model.structure_module import StructureModule
from ._structure_module import StructureModule
from ._tri_self_attn_block import TriangularSelfAttentionBlock
def default(x, val):
return val if x is None else x
@dataclass
class StructureModuleConfig:
c_s: int = 384
c_z: int = 128
c_ipa: int = 16
c_resnet: int = 128
no_heads_ipa: int = 12
no_qk_points: int = 4
no_v_points: int = 8
dropout_rate: float = 0.1
no_blocks: int = 8
no_transition_layers: int = 1
no_resnet_blocks: int = 2
no_angles: int = 7
trans_scale_factor: int = 10
epsilon: float = 1e-8
inf: float = 1e5
@dataclass
class FoldingTrunkConfig:
_name: str = "FoldingTrunkConfig"
num_blocks: int = 48
sequence_state_dim: int = 1024
pairwise_state_dim: int = 128
sequence_head_width: int = 32
pairwise_head_width: int = 32
position_bins: int = 32
dropout: float = 0
layer_drop: float = 0
cpu_grad_checkpoint: bool = False
max_recycles: int = 4
chunk_size: T.Optional[int] = None
# structure_module: StructureModuleConfig = StructureModuleConfig()
structure_module: StructureModuleConfig = field(
default_factory=StructureModuleConfig
)
def get_axial_mask(mask):
"""
Helper to convert B x L mask of valid positions to axial mask used
in row column attentions.
Input:
mask: B x L tensor of booleans
Output:
mask: B x L x L tensor of booleans
"""
if mask is None:
return None
assert len(mask.shape) == 2
batch_dim, seq_dim = mask.shape
m = mask.unsqueeze(1).expand(batch_dim, seq_dim, seq_dim)
m = m.reshape(batch_dim * seq_dim, seq_dim)
return m
class RelativePosition(nn.Module):
def __init__(self, bins, pairwise_state_dim):
super().__init__()
self.bins = bins
# Note an additional offset is used so that the 0th position
# is reserved for masked pairs.
self.embedding = torch.nn.Embedding(2 * bins + 2, pairwise_state_dim)
def forward(self, residue_index, mask=None):
"""
Input:
residue_index: B x L tensor of indices (dytpe=torch.long)
mask: B x L tensor of booleans
Output:
pairwise_state: B x L x L x pairwise_state_dim tensor of embeddings
"""
assert residue_index.dtype == torch.long
if mask is not None:
assert residue_index.shape == mask.shape
diff = residue_index[:, None, :] - residue_index[:, :, None]
diff = diff.clamp(-self.bins, self.bins)
diff = diff + self.bins + 1 # Add 1 to adjust for padding index.
if mask is not None:
mask = mask[:, None, :] * mask[:, :, None]
diff[mask == False] = 0
output = self.embedding(diff)
return output
# class FoldingTrunk(nn.Module):
# def __init__(self, *args, **kwargs):
# super().__init__()
# self.cfg = FoldingTrunkConfig(**kwargs)
class FoldingTrunk(nn.Module):
def __init__(self, cfg: FoldingTrunkConfig):
super().__init__()
self.cfg = cfg
assert self.cfg.max_recycles > 0
c_s = self.cfg.sequence_state_dim
c_z = self.cfg.pairwise_state_dim
assert c_s % self.cfg.sequence_head_width == 0
assert c_z % self.cfg.pairwise_head_width == 0
block = TriangularSelfAttentionBlock
self.pairwise_positional_embedding = RelativePosition(
self.cfg.position_bins, c_z
)
self.blocks = nn.ModuleList(
[
block(
sequence_state_dim=c_s,
pairwise_state_dim=c_z,
sequence_head_width=self.cfg.sequence_head_width,
pairwise_head_width=self.cfg.pairwise_head_width,
dropout=self.cfg.dropout,
)
for i in range(self.cfg.num_blocks)
]
)
self.recycle_bins = 15
self.recycle_s_norm = nn.LayerNorm(c_s)
self.recycle_z_norm = nn.LayerNorm(c_z)
self.recycle_disto = nn.Embedding(self.recycle_bins, c_z)
self.recycle_disto.weight[0].detach().zero_()
self.structure_module = StructureModule(**self.cfg.structure_module) # type: ignore
# self.structure_module = StructureModule(self.cfg.structure_module) # type: ignore
self.trunk2sm_s = nn.Linear(c_s, self.structure_module.c_s)
self.trunk2sm_z = nn.Linear(c_z, self.structure_module.c_z)
self.chunk_size = self.cfg.chunk_size
def set_chunk_size(self, chunk_size):
# This parameter means the axial attention will be computed
# in a chunked manner. This should make the memory used more or less O(L) instead of O(L^2).
# It's equivalent to running a for loop over chunks of the dimension we're iterative over,
# where the chunk_size is the size of the chunks, so 128 would mean to parse 128-lengthed chunks.
self.chunk_size = chunk_size
def trunk_iter(self, s, z, residx, mask):
z = z + self.pairwise_positional_embedding(residx, mask=mask)
for block in self.blocks:
s, z = block(
s, z, mask=mask, residue_index=residx, chunk_size=self.chunk_size
)
return s, z
def forward(
self,
seq_feats,
pair_feats,
true_aa,
residx,
mask,
no_recycles: T.Optional[int] = None,
):
"""
Inputs:
seq_feats: B x L x C tensor of sequence features
pair_feats: B x L x L x C tensor of pair features
residx: B x L long tensor giving the position in the sequence
mask: B x L boolean tensor indicating valid residues
Output:
predicted_structure: B x L x (num_atoms_per_residue * 3) tensor wrapped in a Coordinates object
"""
device = seq_feats.device
s_s_0 = seq_feats
s_z_0 = pair_feats
if no_recycles is None:
no_recycles = self.cfg.max_recycles
else:
assert no_recycles >= 0, "Number of recycles must not be negative."
no_recycles += 1 # First 'recycle' is just the standard forward pass through the model.
s_s = s_s_0
s_z = s_z_0
recycle_s = torch.zeros_like(s_s)
recycle_z = torch.zeros_like(s_z)
recycle_bins = torch.zeros(*s_z.shape[:-1], device=device, dtype=torch.int64)
assert no_recycles > 0
for recycle_idx in range(no_recycles):
with ExitStack() if recycle_idx == no_recycles - 1 else torch.no_grad():
# === Recycling ===
recycle_s = self.recycle_s_norm(recycle_s.detach())
recycle_z = self.recycle_z_norm(recycle_z.detach())
recycle_z += self.recycle_disto(recycle_bins.detach())
s_s, s_z = self.trunk_iter(
s_s_0 + recycle_s, s_z_0 + recycle_z, residx, mask
)
# === Structure module ===
sm_s = self.trunk2sm_s(s_s)
sm_z = self.trunk2sm_z(s_z)
structure = self.structure_module(
{"single": sm_s, "pair": sm_z},
true_aa,
mask.float(),
)
structure["sm_s"] = sm_s
structure["sm_z"] = sm_z
recycle_s = s_s
recycle_z = s_z
# Distogram needs the N, CA, C coordinates, and bin constants same as alphafold.
recycle_bins = FoldingTrunk.distogram(
structure["positions"][-1][:, :, :3],
3.375,
21.375,
self.recycle_bins,
)
assert isinstance(structure, dict) # type: ignore
structure["s_s"] = s_s
structure["s_z"] = s_z
return structure
# def from_sm_s(self, sm_s, true_aa, s_s_0=None, s_z_0=None, residx=None, mask=None, no_recycles: T.Optional[int] = None):
# """
# For experiments where we diffuse from the structure module level
# """
def from_seq_feat(
self,
true_aa,
s_s_0,
s_z_0=None,
residx=None,
mask=None,
no_recycles: T.Optional[int] = None,
):
"""Modified forward pass that starts from the sequence feature, e.g. during inference-time generation."""
device = s_s_0.device
N, L, _ = s_s_0.shape
from . import ESMFOLD_Z_DIM
# initialize defaults #########################################################
s_z_0 = default(s_z_0, s_s_0.new_zeros(N, L, L, ESMFOLD_Z_DIM))
residx = default(residx, torch.arange(L, device=device).expand(N, L))
mask = default(mask, torch.ones(N, L, device=device))
if no_recycles is None:
no_recycles = self.cfg.max_recycles
else:
assert no_recycles >= 0, "Number of recycles must not be negative."
no_recycles += 1 # First 'recycle' is just the standard forward pass through the model.
################################################################################
s_s = s_s_0 # (B, L, 1024)
s_z = s_z_0 # (B, L, L, 128)
recycle_s = torch.zeros_like(s_s)
recycle_z = torch.zeros_like(s_z)
recycle_bins = torch.zeros(*s_z.shape[:-1], device=device, dtype=torch.int64)
for recycle_idx in range(no_recycles):
with ExitStack() if recycle_idx == no_recycles - 1 else torch.no_grad():
# === Recycling ===
recycle_s = self.recycle_s_norm(recycle_s.detach())
recycle_z = self.recycle_z_norm(recycle_z.detach())
recycle_z += self.recycle_disto(recycle_bins.detach())
s_s, s_z = self.trunk_iter(
s_s_0 + recycle_s, s_z_0 + recycle_z, residx, mask
)
# === Structure module ===
sm_s = self.trunk2sm_s(s_s)
sm_z = self.trunk2sm_z(s_z)
structure = self.structure_module(
{"single": sm_s, "pair": sm_z},
true_aa,
mask.float(),
)
structure["sm_s"] = sm_s
structure["sm_z"] = sm_z
recycle_s = s_s
recycle_z = s_z
# Distogram needs the N, CA, C coordinates, and bin constants same as alphafold.
recycle_bins = FoldingTrunk.distogram(
structure["positions"][-1][:, :, :3],
3.375,
21.375,
self.recycle_bins,
)
assert isinstance(structure, dict) # type: ignore
structure["s_s"] = s_s
structure["s_z"] = s_z
return structure, true_aa, residx, mask
@staticmethod
def distogram(coords, min_bin, max_bin, num_bins):
# Coords are [... L x 3 x 3], where it's [N, CA, C] x 3 coordinates.
boundaries = torch.linspace(
min_bin,
max_bin,
num_bins - 1,
device=coords.device,
)
boundaries = boundaries**2
N, CA, C = [x.squeeze(-2) for x in coords.chunk(3, dim=-2)]
# Infer CB coordinates.
b = CA - N
c = C - CA
a = b.cross(c, dim=-1)
CB = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + CA
dists = (
(CB[..., None, :, :] - CB[..., :, None, :])
.pow(2)
.sum(dim=-1, keepdims=True)
)
bins = torch.sum(dists > boundaries, dim=-1) # [..., L, L]
return bins
@classmethod
def from_pretrained(cls, device=None, eval_mode=True):
from ._misc import get_esmfold_model_state
esmfold_cfg, esmfold_weights_cpu = get_esmfold_model_state()
model = cls(esmfold_cfg.trunk)
trunk_weights_cpu = {
k: v for k, v in esmfold_weights_cpu.items() if k[:6] == "trunk."
}
trunk_weights_cpu = {k[6:]: v for k, v in trunk_weights_cpu.items()}
missing_keys = model.load_state_dict(trunk_weights_cpu, strict=False)
assert len(missing_keys.missing_keys) == 0
if not device is None:
model = model.to(device)
if eval_mode:
model = model.eval()
return model
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/esmfold/_esmfold.py | src/cheap/esmfold/_esmfold.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as T
from dataclasses import dataclass, field
from functools import partial
import pathlib as Path
import time
import torch
import torch.nn as nn
from torch import nn
from torch.nn import LayerNorm
import esm
from esm import Alphabet
from lightning.pytorch.utilities import rank_zero_info
from ..openfold_utils._data_transforms import make_atom14_masks
from ..openfold_utils._losses import compute_predicted_aligned_error, compute_tm
from ..openfold_utils import _residue_constants as residue_constants
# for all ESMFold specific imports, use local modules to allow for customization
from ._categorical_mixture import categorical_lddt
from ._misc import (
batch_encode_sequences,
collate_dense_tensors,
output_to_pdb,
)
from ._trunk import FoldingTrunk, FoldingTrunkConfig
@dataclass
class ESMFoldConfig:
trunk: FoldingTrunkConfig = field(default_factory=FoldingTrunkConfig)
lddt_head_hid_dim: int = 128
esm_type: str = "esm2_3B" # added
use_esm_attn_map: bool = False # added
load_fn = esm.pretrained.load_model_and_alphabet
esm_registry = {
"esm2_8M": partial(load_fn, "esm2_t6_8M_UR50D_500K"),
"esm2_8M_270K": esm.pretrained.esm2_t6_8M_UR50D,
"esm2_35M": partial(load_fn, "esm2_t12_35M_UR50D_500K"),
"esm2_35M_270K": esm.pretrained.esm2_t12_35M_UR50D,
"esm2_150M": partial(load_fn, "esm2_t30_150M_UR50D_500K"),
"esm2_150M_270K": partial(load_fn, "esm2_t30_150M_UR50D_270K"),
"esm2_650M": esm.pretrained.esm2_t33_650M_UR50D,
"esm2_650M_270K": partial(load_fn, "esm2_t33_650M_270K_UR50D"),
"esm2_3B": esm.pretrained.esm2_t36_3B_UR50D,
"esm2_3B_270K": partial(load_fn, "esm2_t36_3B_UR50D_500K"),
"esm2_15B": esm.pretrained.esm2_t48_15B_UR50D,
}
class ESMFold(nn.Module):
def __init__(self, esmfold_config=None, **kwargs):
super().__init__()
rank_zero_info("Creating ESMFold...")
start = time.time()
self.cfg = esmfold_config if esmfold_config else ESMFoldConfig(**kwargs)
cfg = self.cfg
self.distogram_bins = 64
self.esm, self.esm_dict = esm_registry.get(cfg.esm_type)()
self.esm.requires_grad_(False)
self.esm.half()
self.esm_feats = self.esm.embed_dim
self.esm_attns = self.esm.num_layers * self.esm.attention_heads
self.register_buffer("af2_to_esm", ESMFold._af2_to_esm(self.esm_dict))
self.esm_s_combine = nn.Parameter(torch.zeros(self.esm.num_layers + 1))
c_s = cfg.trunk.sequence_state_dim
c_z = cfg.trunk.pairwise_state_dim
self.esm_s_mlp = nn.Sequential(
LayerNorm(self.esm_feats),
nn.Linear(self.esm_feats, c_s),
nn.ReLU(),
nn.Linear(c_s, c_s),
)
if cfg.use_esm_attn_map:
self.esm_z_mlp = nn.Sequential(
LayerNorm(self.esm_attns),
nn.Linear(self.esm_attns, c_z),
nn.ReLU(),
nn.Linear(c_z, c_z),
)
# 0 is padding, N is unknown residues, N + 1 is mask.
self.n_tokens_embed = residue_constants.restype_num + 3
self.pad_idx = 0
self.unk_idx = self.n_tokens_embed - 2
self.mask_idx = self.n_tokens_embed - 1
self.embedding = nn.Embedding(self.n_tokens_embed, c_s, padding_idx=0)
# self.trunk = FoldingTrunk(**cfg.trunk)
self.trunk = FoldingTrunk(cfg.trunk)
self.distogram_head = nn.Linear(c_z, self.distogram_bins)
self.ptm_head = nn.Linear(c_z, self.distogram_bins)
self.lm_head = nn.Linear(c_s, self.n_tokens_embed)
self.lddt_bins = 50
self.lddt_head = nn.Sequential(
nn.LayerNorm(cfg.trunk.structure_module.c_s),
nn.Linear(cfg.trunk.structure_module.c_s, cfg.lddt_head_hid_dim),
nn.Linear(cfg.lddt_head_hid_dim, cfg.lddt_head_hid_dim),
nn.Linear(cfg.lddt_head_hid_dim, 37 * self.lddt_bins),
)
end = time.time()
rank_zero_info(f"ESMFold model loaded in {(end - start):.2f} seconds.")
@staticmethod
def _af2_to_esm(d: Alphabet):
# Remember that t is shifted from residue_constants by 1 (0 is padding).
esm_reorder = [d.padding_idx] + [
d.get_idx(v) for v in residue_constants.restypes_with_x
]
return torch.tensor(esm_reorder)
def _af2_idx_to_esm_idx(self, aa, mask):
aa = (aa + 1).masked_fill(mask != 1, 0)
return self.af2_to_esm[aa]
def _compute_language_model_representations(
self, esmaa: torch.Tensor, return_intermediates=False
) -> torch.Tensor:
"""Adds bos/eos tokens for the language model, since the structure module doesn't use these."""
batch_size = esmaa.size(0)
bosi, eosi = self.esm_dict.cls_idx, self.esm_dict.eos_idx
bos = esmaa.new_full((batch_size, 1), bosi)
eos = esmaa.new_full((batch_size, 1), self.esm_dict.padding_idx)
esmaa = torch.cat([bos, esmaa, eos], dim=1)
# Use the first padding index as eos during inference.
esmaa[range(batch_size), (esmaa != 1).sum(1)] = eosi
res = self.esm(
esmaa,
repr_layers=range(self.esm.num_layers + 1),
need_head_weights=self.cfg.use_esm_attn_map,
)
esm_s = torch.stack(
[v for _, v in sorted(res["representations"].items())], dim=2
)
esm_s = esm_s[:, 1:-1] # B, L, nLayers, C
esm_z = (
res["attentions"].permute(0, 4, 3, 1, 2).flatten(3, 4)[:, 1:-1, 1:-1, :]
if self.cfg.use_esm_attn_map
else None
)
if not return_intermediates:
return esm_s, esm_z
else:
intermediates = {"lm_res": res, "esm_s": esm_s}
return esm_s, esm_z, intermediates
def _mask_inputs_to_esm(self, esmaa, pattern):
new_esmaa = esmaa.clone()
new_esmaa[pattern == 1] = self.esm_dict.mask_idx
return new_esmaa
def embed_for_folding_trunk(
self,
aa: torch.Tensor,
mask: T.Optional[torch.Tensor] = None,
residx: T.Optional[torch.Tensor] = None,
masking_pattern: T.Optional[torch.Tensor] = None,
return_intermediates: bool = False,
):
"""First half of original `forward` function to get s_s_0 and s_z_0.
Runs a forward pass given input tokens. Use `model.infer` to
run inference from a sequence.
Args:
aa (torch.Tensor): Tensor containing indices corresponding to amino acids. Indices match
openfold.np.residue_constants.restype_order_with_x.
mask (torch.Tensor): Binary tensor with 1 meaning position is unmasked and 0 meaning position is masked.
residx (torch.Tensor): Residue indices of amino acids. Will assume contiguous if not provided.
masking_pattern (torch.Tensor): Optional masking to pass to the input. Binary tensor of the same size
as `aa`. Positions with 1 will be masked. ESMFold sometimes produces different samples when
different masks are provided.
num_recycles (int): How many recycle iterations to perform. If None, defaults to training max
recycles, which is 3.
"""
if mask is None:
mask = torch.ones_like(aa)
B = aa.shape[0]
L = aa.shape[1]
device = aa.device
if residx is None:
residx = torch.arange(L, device=device).expand_as(aa)
# === ESM ===
esmaa = self._af2_idx_to_esm_idx(aa, mask)
if masking_pattern is not None:
esmaa = self._mask_inputs_to_esm(esmaa, masking_pattern)
if return_intermediates:
esm_s, esm_z, intermediates = self._compute_language_model_representations(
esmaa, return_intermediates
)
else:
esm_s, esm_z = self._compute_language_model_representations(
esmaa, return_intermediates
)
# Convert esm_s to the precision used by the trunk and
# the structure module. These tensors may be a lower precision if, for example,
# we're running the language model in fp16 precision.
esm_s = esm_s.to(self.esm_s_combine.dtype)
esm_s = esm_s.detach()
# === preprocessing ===
esm_s = (self.esm_s_combine.softmax(0).unsqueeze(0) @ esm_s).squeeze(2)
if return_intermediates:
intermediates["s_post_softmax"] = esm_s
s_s_0 = self.esm_s_mlp(esm_s)
if return_intermediates:
intermediates["s_post_mlp"] = s_s_0
if self.cfg.use_esm_attn_map:
esm_z = esm_z.to(self.esm_s_combine.dtype)
esm_z = esm_z.detach()
s_z_0 = self.esm_z_mlp(esm_z)
else:
s_z_0 = s_s_0.new_zeros(B, L, L, self.cfg.trunk.pairwise_state_dim)
s_s_0 += self.embedding(aa)
if return_intermediates:
intermediates["s"] = s_s_0
intermediates["aa_embed"] = self.embedding(aa)
if return_intermediates:
return s_s_0, s_z_0, aa, residx, mask, intermediates
else:
return s_s_0, s_z_0, aa, residx, mask
def folding_trunk(
self, s_s_0, s_z_0, aa, residx, mask, num_recycles: T.Optional[int] = None
):
assert not self.trunk is None
structure: dict = self.trunk(
s_s_0, s_z_0, aa, residx, mask, no_recycles=num_recycles
)
structure = self.post_processing(structure, aa, residx, mask)
return structure
def post_processing(self, structure, aa, residx, mask):
B, L = aa.shape
disto_logits = self.distogram_head(structure["s_z"])
disto_logits = (disto_logits + disto_logits.transpose(1, 2)) / 2
structure["distogram_logits"] = disto_logits
lm_logits = self.lm_head(structure["s_s"])
structure["lm_logits"] = lm_logits
structure["aatype"] = aa
make_atom14_masks(structure)
for k in [
"atom14_atom_exists",
"atom37_atom_exists",
]:
structure[k] *= mask.unsqueeze(-1)
structure["residue_index"] = residx
lddt_head = self.lddt_head(structure["states"]).reshape(
structure["states"].shape[0], B, L, -1, self.lddt_bins
)
structure["lddt_head"] = lddt_head
plddt = categorical_lddt(lddt_head[-1], bins=self.lddt_bins)
structure["plddt"] = (
100 * plddt
) # we predict plDDT between 0 and 1, scale to be between 0 and 100.
ptm_logits = self.ptm_head(structure["s_z"])
seqlen = mask.type(torch.int64).sum(1)
structure["ptm_logits"] = ptm_logits
structure["ptm"] = torch.stack(
[
compute_tm(
batch_ptm_logits[None, :sl, :sl],
max_bins=31,
no_bins=self.distogram_bins,
)
for batch_ptm_logits, sl in zip(ptm_logits, seqlen)
]
)
structure.update(
compute_predicted_aligned_error(
ptm_logits, max_bin=31, no_bins=self.distogram_bins
)
)
return structure
def structure_module_pass(self, sm_s, sm_z, true_aa, mask):
"""Exposes the structure module weights"""
structure = self.trunk.structure_module(
{"single": sm_s, "pair": sm_z},
true_aa,
mask.float(),
)
return structure
def forward(self, aa, mask, residx, masking_pattern=None, num_recycles=None):
assert not self.trunk is None
s_s_0, s_z_0, aa, residx, mask = self.embed_for_folding_trunk(
aa, mask, residx, masking_pattern
)
structure = self.folding_trunk(s_s_0, s_z_0, aa, residx, mask, num_recycles)
return structure
@torch.no_grad()
def infer(
self,
sequences: T.Union[str, T.List[str]],
residx=None,
masking_pattern: T.Optional[torch.Tensor] = None,
num_recycles: T.Optional[int] = None,
residue_index_offset: T.Optional[int] = 512,
chain_linker: T.Optional[str] = "G" * 25,
):
"""Runs a forward pass given input sequences.
Args:
sequences (Union[str, List[str]]): A list of sequences to make predictions for. Multimers can also be passed in,
each chain should be separated by a ':' token (e.g. "<chain1>:<chain2>:<chain3>").
residx (torch.Tensor): Residue indices of amino acids. Will assume contiguous if not provided.
masking_pattern (torch.Tensor): Optional masking to pass to the input. Binary tensor of the same size
as `aa`. Positions with 1 will be masked. ESMFold sometimes produces different samples when
different masks are provided.
num_recycles (int): How many recycle iterations to perform. If None, defaults to training max
recycles (cfg.trunk.max_recycles), which is 4.
residue_index_offset (int): Residue index separation between chains if predicting a multimer. Has no effect on
single chain predictions. Default: 512.
chain_linker (str): Linker to use between chains if predicting a multimer. Has no effect on single chain
predictions. Default: length-25 poly-G ("G" * 25).
"""
assert not self.trunk is None
if isinstance(sequences, str):
sequences = [sequences]
aatype, mask, _residx, linker_mask, chain_index = batch_encode_sequences(
sequences, residue_index_offset, chain_linker
)
if residx is None:
residx = _residx
elif not isinstance(residx, torch.Tensor):
residx = collate_dense_tensors(residx)
aatype, mask, residx, linker_mask = map(
lambda x: x.to(self.device), (aatype, mask, residx, linker_mask)
)
output = self.forward(
aatype,
mask=mask,
residx=residx,
masking_pattern=masking_pattern,
num_recycles=num_recycles,
)
output["atom37_atom_exists"] = output[
"atom37_atom_exists"
] * linker_mask.unsqueeze(2)
output["mean_plddt"] = (output["plddt"] * output["atom37_atom_exists"]).sum(
dim=(1, 2)
) / output["atom37_atom_exists"].sum(dim=(1, 2))
output["chain_index"] = chain_index
return output
@torch.no_grad()
def infer_embedding(
self,
sequences: T.Union[str, T.List[str]],
residx=None,
masking_pattern: T.Optional[torch.Tensor] = None,
residue_index_offset: T.Optional[int] = 512,
chain_linker: T.Optional[str] = "G" * 25,
return_intermediates: bool = False,
):
"""From a list of sequence strings, obtain embeddings.
Args:
sequences (Union[str, List[str]]): A list of sequences to make predictions for. Multimers can also be passed in,
each chain should be separated by a ':' token (e.g. "<chain1>:<chain2>:<chain3>").
residx (torch.Tensor): Residue indices of amino acids. Will assume contiguous if not provided.
masking_pattern (torch.Tensor): Optional masking to pass to the input. Binary tensor of the same size
as `aa`. Positions with 1 will be masked. ESMFold sometimes produces different samples when
different masks are provided.
num_recycles (int): How many recycle iterations to perform. If None, defaults to training max
recycles (cfg.trunk.max_recycles), which is 4.
residue_index_offset (int): Residue index separation between chains if predicting a multimer. Has no effect on
single chain predictions. Default: 512.
chain_linker (str): Linker to use between chains if predicting a multimer. Has no effect on single chain
predictions. Default: length-25 poly-G ("G" * 25).
"""
if isinstance(sequences, str):
sequences = [sequences]
aatype, mask, _residx, linker_mask, chain_index = batch_encode_sequences(
sequences, residue_index_offset, chain_linker
)
if residx is None:
residx = _residx
elif not isinstance(residx, torch.Tensor):
residx = collate_dense_tensors(residx)
aatype, mask, residx, linker_mask = map(
lambda x: x.to(self.device), (aatype, mask, residx, linker_mask)
)
if not return_intermediates:
with torch.no_grad():
s_s_0, s_z_0, _, residx, mask = self.embed_for_folding_trunk(
aatype, mask, residx, masking_pattern, return_intermediates
)
return {
"s": s_s_0,
"z": s_z_0,
"mask": mask,
"pos": residx,
}
else:
with torch.no_grad():
s_s_0, s_z_0, _, residx, mask, intermediates = (
self.embed_for_folding_trunk(
aatype, mask, residx, masking_pattern, return_intermediates
)
)
intermediates["z"] = s_z_0
intermediates["mask"] = mask
intermediates["pos"] = residx
return intermediates
def output_to_pdb(self, output: T.Dict) -> T.List[str]:
"""Returns the pbd (file) string from the model given the model output."""
return output_to_pdb(output)
def infer_pdbs(self, seqs: T.List[str], *args, **kwargs) -> T.List[str]:
"""Returns list of pdb (files) strings from the model given a list of input sequences."""
output = self.infer(seqs, *args, **kwargs)
return self.output_to_pdb(output)
def infer_pdb(self, sequence: str, *args, **kwargs) -> str:
"""Returns the pdb (file) string from the model given an input sequence."""
return self.infer_pdbs([sequence], *args, **kwargs)[0]
def from_sm_s(self, sm_s, *args, **kwargs):
structure, aa, residx, mask = self.trunk.from_sm_s(sm_s, *args, **kwargs)
return self.post_processing(structure, aa, residx, mask)
def set_chunk_size(self, chunk_size: T.Optional[int]):
# This parameter means the axial attention will be computed
# in a chunked manner. This should make the memory used more or less O(L) instead of O(L^2).
# It's equivalent to running a for loop over chunks of the dimension we're iterative over,
# where the chunk_size is the size of the chunks, so 128 would mean to parse 128-lengthed chunks.
# Setting the value to None will return to default behavior, disable chunking.
self.trunk.set_chunk_size(chunk_size)
@property
def device(self):
return self.esm_s_combine.device
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/esmfold/_pretrained.py | src/cheap/esmfold/_pretrained.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import torch
from ._esmfold import ESMFold
# https://github.com/facebookresearch/esm/blob/main/esm/esmfold/v1/pretrained.py
def _load_model(model_name):
if model_name.endswith(".pt"): # local, treat as filepath
model_path = Path(model_name)
model_data = torch.load(str(model_path), map_location="cpu")
else: # load from hub
url = f"https://dl.fbaipublicfiles.com/fair-esm/models/{model_name}.pt"
model_data = torch.hub.load_state_dict_from_url(
url, progress=False, map_location="cpu"
)
cfg = model_data["cfg"]["model"]
model_state = model_data["model"]
model = ESMFold(esmfold_config=cfg)
expected_keys = set(model.state_dict().keys())
found_keys = set(model_state.keys())
missing_essential_keys = []
for missing_key in expected_keys - found_keys:
if not missing_key.startswith("esm."):
missing_essential_keys.append(missing_key)
if missing_essential_keys:
raise RuntimeError(f"Keys '{', '.join(missing_essential_keys)}' are missing.")
model.load_state_dict(model_state, strict=False)
return model
def esmfold_v0():
"""
ESMFold v0 model with 3B ESM-2, 48 folding blocks.
This version was used for the paper (Lin et al, 2022). It was trained
on all PDB chains until 2020-05, to ensure temporal holdout with CASP14
and the CAMEO validation and test set reported there.
"""
return _load_model("esmfold_3B_v0")
def esmfold_v1():
"""
ESMFold v1 model using 3B ESM-2, 48 folding blocks.
ESMFold provides fast high accuracy atomic level structure prediction
directly from the individual sequence of a protein. ESMFold uses the ESM2
protein language model to extract meaningful representations from the
protein sequence.
"""
return _load_model("esmfold_3B_v1")
def esmfold_structure_module_only_8M():
"""
ESMFold baseline model using 8M ESM-2, 0 folding blocks.
ESM-2 here is trained out to 500K updates.
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_8M")
def esmfold_structure_module_only_8M_270K():
"""
ESMFold baseline model using 8M ESM-2, 0 folding blocks.
ESM-2 here is trained out to 270K updates.
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_8M_270K")
def esmfold_structure_module_only_35M():
"""
ESMFold baseline model using 35M ESM-2, 0 folding blocks.
ESM-2 here is trained out to 500K updates.
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_35M")
def esmfold_structure_module_only_35M_270K():
"""
ESMFold baseline model using 35M ESM-2, 0 folding blocks.
ESM-2 here is trained out to 270K updates.
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_35M_270K")
def esmfold_structure_module_only_150M():
"""
ESMFold baseline model using 150M ESM-2, 0 folding blocks.
ESM-2 here is trained out to 500K updates.
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_150M")
def esmfold_structure_module_only_150M_270K():
"""
ESMFold baseline model using 150M ESM-2, 0 folding blocks.
ESM-2 here is trained out to 270K updates.
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_150M_270K")
def esmfold_structure_module_only_650M():
"""
ESMFold baseline model using 650M ESM-2, 0 folding blocks.
ESM-2 here is trained out to 500K updates.
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_650M")
def esmfold_structure_module_only_650M_270K():
"""
ESMFold baseline model using 650M ESM-2, 0 folding blocks.
ESM-2 here is trained out to 270K updates.
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_650M_270K")
def esmfold_structure_module_only_3B():
"""
ESMFold baseline model using 3B ESM-2, 0 folding blocks.
ESM-2 here is trained out to 500K updates.
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_3B")
def esmfold_structure_module_only_3B_270K():
"""
ESMFold baseline model using 3B ESM-2, 0 folding blocks.
ESM-2 here is trained out to 270K updates.
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_3B_270K")
def esmfold_structure_module_only_15B():
"""
ESMFold baseline model using 15B ESM-2, 0 folding blocks.
ESM-2 here is trained out to 270K updates.
The 15B parameter ESM-2 was not trained out to 500K updates
This is a model designed to test the capabilities of the language model
when ablated for number of parameters in the language model.
See table S1 in (Lin et al, 2022).
"""
return _load_model("esmfold_structure_module_only_15B")
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/esmfold/_structure_module.py | src/cheap/esmfold/_structure_module.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import importlib
import math
import sys
from operator import mul
import torch
import torch.nn as nn
from typing import Optional, Tuple, Sequence
# TODO(amyxlu): isolate openfold dependencies
from openfold.model.primitives import Linear, LayerNorm, ipa_point_weights_init_
from openfold.np.residue_constants import (
restype_rigid_group_default_frame,
restype_atom14_to_rigid_group,
restype_atom14_mask,
restype_atom14_rigid_group_positions,
)
from openfold.utils.feats import (
frames_and_literature_positions_to_atom14_pos,
torsion_angles_to_frames,
)
from openfold.utils.precision_utils import is_fp16_enabled
from openfold.utils.rigid_utils import Rotation, Rigid
from openfold.utils.tensor_utils import (
dict_multimap,
permute_final_dims,
flatten_final_dims,
)
attn_core_inplace_cuda = importlib.import_module("attn_core_inplace_cuda")
class AngleResnetBlock(nn.Module):
def __init__(self, c_hidden):
"""
Args:
c_hidden:
Hidden channel dimension
"""
super(AngleResnetBlock, self).__init__()
self.c_hidden = c_hidden
self.linear_1 = Linear(self.c_hidden, self.c_hidden, init="relu")
self.linear_2 = Linear(self.c_hidden, self.c_hidden, init="final")
self.relu = nn.ReLU()
def forward(self, a: torch.Tensor) -> torch.Tensor:
s_initial = a
a = self.relu(a)
a = self.linear_1(a)
a = self.relu(a)
a = self.linear_2(a)
return a + s_initial
class AngleResnet(nn.Module):
"""
Implements Algorithm 20, lines 11-14
"""
def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon):
"""
Args:
c_in:
Input channel dimension
c_hidden:
Hidden channel dimension
no_blocks:
Number of resnet blocks
no_angles:
Number of torsion angles to generate
epsilon:
Small constant for normalization
"""
super(AngleResnet, self).__init__()
self.c_in = c_in
self.c_hidden = c_hidden
self.no_blocks = no_blocks
self.no_angles = no_angles
self.eps = epsilon
self.linear_in = Linear(self.c_in, self.c_hidden)
self.linear_initial = Linear(self.c_in, self.c_hidden)
self.layers = nn.ModuleList()
for _ in range(self.no_blocks):
layer = AngleResnetBlock(c_hidden=self.c_hidden)
self.layers.append(layer)
self.linear_out = Linear(self.c_hidden, self.no_angles * 2)
self.relu = nn.ReLU()
def forward(
self, s: torch.Tensor, s_initial: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
s:
[*, C_hidden] single embedding
s_initial:
[*, C_hidden] single embedding as of the start of the
StructureModule
Returns:
[*, no_angles, 2] predicted angles
"""
# NOTE: The ReLU's applied to the inputs are absent from the supplement
# pseudocode but present in the source. For maximal compatibility with
# the pretrained weights, I'm going with the source.
# [*, C_hidden]
s_initial = self.relu(s_initial)
s_initial = self.linear_initial(s_initial)
s = self.relu(s)
s = self.linear_in(s)
s = s + s_initial
for l in self.layers:
s = l(s)
s = self.relu(s)
# [*, no_angles * 2]
s = self.linear_out(s)
# [*, no_angles, 2]
s = s.view(s.shape[:-1] + (-1, 2))
unnormalized_s = s
norm_denom = torch.sqrt(
torch.clamp(
torch.sum(s**2, dim=-1, keepdim=True),
min=self.eps,
)
)
s = s / norm_denom
return unnormalized_s, s
class InvariantPointAttention(nn.Module):
"""
Implements Algorithm 22.
"""
def __init__(
self,
c_s: int,
c_z: int,
c_hidden: int,
no_heads: int,
no_qk_points: int,
no_v_points: int,
inf: float = 1e5,
eps: float = 1e-8,
):
"""
Args:
c_s:
Single representation channel dimension
c_z:
Pair representation channel dimension
c_hidden:
Hidden channel dimension
no_heads:
Number of attention heads
no_qk_points:
Number of query/key points to generate
no_v_points:
Number of value points to generate
"""
super(InvariantPointAttention, self).__init__()
self.c_s = c_s
self.c_z = c_z
self.c_hidden = c_hidden
self.no_heads = no_heads
self.no_qk_points = no_qk_points
self.no_v_points = no_v_points
self.inf = inf
self.eps = eps
# These linear layers differ from their specifications in the
# supplement. There, they lack bias and use Glorot initialization.
# Here as in the official source, they have bias and use the default
# Lecun initialization.
hc = self.c_hidden * self.no_heads
self.linear_q = Linear(self.c_s, hc)
self.linear_kv = Linear(self.c_s, 2 * hc)
hpq = self.no_heads * self.no_qk_points * 3
self.linear_q_points = Linear(self.c_s, hpq)
hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3
self.linear_kv_points = Linear(self.c_s, hpkv)
hpv = self.no_heads * self.no_v_points * 3
self.linear_b = Linear(self.c_z, self.no_heads)
self.head_weights = nn.Parameter(torch.zeros((no_heads)))
ipa_point_weights_init_(self.head_weights)
concat_out_dim = self.no_heads * (
self.c_z + self.c_hidden + self.no_v_points * 4
)
self.linear_out = Linear(concat_out_dim, self.c_s, init="final")
self.softmax = nn.Softmax(dim=-1)
self.softplus = nn.Softplus()
def forward(
self,
s: torch.Tensor,
z: Optional[torch.Tensor],
r: Rigid,
mask: torch.Tensor,
inplace_safe: bool = False,
_offload_inference: bool = False,
_z_reference_list: Optional[Sequence[torch.Tensor]] = None,
) -> torch.Tensor:
"""
Args:
s:
[*, N_res, C_s] single representation
z:
[*, N_res, N_res, C_z] pair representation
r:
[*, N_res] transformation object
mask:
[*, N_res] mask
Returns:
[*, N_res, C_s] single representation update
"""
if _offload_inference and inplace_safe:
z = _z_reference_list
else:
z = [z]
#######################################
# Generate scalar and point activations
#######################################
# [*, N_res, H * C_hidden]
q = self.linear_q(s)
kv = self.linear_kv(s)
# [*, N_res, H, C_hidden]
q = q.view(q.shape[:-1] + (self.no_heads, -1))
# [*, N_res, H, 2 * C_hidden]
kv = kv.view(kv.shape[:-1] + (self.no_heads, -1))
# [*, N_res, H, C_hidden]
k, v = torch.split(kv, self.c_hidden, dim=-1)
# [*, N_res, H * P_q * 3]
q_pts = self.linear_q_points(s)
# This is kind of clunky, but it's how the original does it
# [*, N_res, H * P_q, 3]
q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1)
q_pts = torch.stack(q_pts, dim=-1)
q_pts = r[..., None].apply(q_pts)
# [*, N_res, H, P_q, 3]
q_pts = q_pts.view(q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3))
# [*, N_res, H * (P_q + P_v) * 3]
kv_pts = self.linear_kv_points(s)
# [*, N_res, H * (P_q + P_v), 3]
kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1)
kv_pts = torch.stack(kv_pts, dim=-1)
kv_pts = r[..., None].apply(kv_pts)
# [*, N_res, H, (P_q + P_v), 3]
kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3))
# [*, N_res, H, P_q/P_v, 3]
k_pts, v_pts = torch.split(
kv_pts, [self.no_qk_points, self.no_v_points], dim=-2
)
##########################
# Compute attention scores
##########################
# [*, N_res, N_res, H]
b = self.linear_b(z[0])
if _offload_inference:
assert sys.getrefcount(z[0]) == 2
z[0] = z[0].cpu()
# [*, H, N_res, N_res]
if is_fp16_enabled():
with torch.cuda.amp.autocast(enabled=False):
a = torch.matmul(
permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden]
permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res]
)
else:
a = torch.matmul(
permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden]
permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res]
)
a *= math.sqrt(1.0 / (3 * self.c_hidden))
a += math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))
# [*, N_res, N_res, H, P_q, 3]
pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5)
if inplace_safe:
pt_att *= pt_att
else:
pt_att = pt_att**2
# [*, N_res, N_res, H, P_q]
pt_att = sum(torch.unbind(pt_att, dim=-1))
head_weights = self.softplus(self.head_weights).view(
*((1,) * len(pt_att.shape[:-2]) + (-1, 1))
)
head_weights = head_weights * math.sqrt(
1.0 / (3 * (self.no_qk_points * 9.0 / 2))
)
if inplace_safe:
pt_att *= head_weights
else:
pt_att = pt_att * head_weights
# [*, N_res, N_res, H]
pt_att = torch.sum(pt_att, dim=-1) * (-0.5)
# [*, N_res, N_res]
square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2)
square_mask = self.inf * (square_mask - 1)
# [*, H, N_res, N_res]
pt_att = permute_final_dims(pt_att, (2, 0, 1))
if inplace_safe:
a += pt_att
del pt_att
a += square_mask.unsqueeze(-3)
# in-place softmax
attn_core_inplace_cuda.forward_(
a,
reduce(mul, a.shape[:-1]),
a.shape[-1],
)
else:
a = a + pt_att
a = a + square_mask.unsqueeze(-3)
a = self.softmax(a)
################
# Compute output
################
# [*, N_res, H, C_hidden]
o = torch.matmul(a, v.transpose(-2, -3).to(dtype=a.dtype)).transpose(-2, -3)
# [*, N_res, H * C_hidden]
o = flatten_final_dims(o, 2)
# [*, H, 3, N_res, P_v]
if inplace_safe:
v_pts = permute_final_dims(v_pts, (1, 3, 0, 2))
o_pt = [torch.matmul(a, v.to(a.dtype)) for v in torch.unbind(v_pts, dim=-3)]
o_pt = torch.stack(o_pt, dim=-3)
else:
o_pt = torch.sum(
(
a[..., None, :, :, None]
* permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :]
),
dim=-2,
)
# [*, N_res, H, P_v, 3]
o_pt = permute_final_dims(o_pt, (2, 0, 3, 1))
o_pt = r[..., None, None].invert_apply(o_pt)
# [*, N_res, H * P_v]
o_pt_norm = flatten_final_dims(
torch.sqrt(torch.sum(o_pt**2, dim=-1) + self.eps), 2
)
# [*, N_res, H * P_v, 3]
o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3)
if _offload_inference:
z[0] = z[0].to(o_pt.device)
# [*, N_res, H, C_z]
o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype))
# [*, N_res, H * C_z]
o_pair = flatten_final_dims(o_pair, 2)
# [*, N_res, C_s]
s = self.linear_out(
torch.cat((o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1).to(
dtype=z[0].dtype
)
)
return s
class BackboneUpdate(nn.Module):
"""
Implements part of Algorithm 23.
"""
def __init__(self, c_s):
"""
Args:
c_s:
Single representation channel dimension
"""
super(BackboneUpdate, self).__init__()
self.c_s = c_s
self.linear = Linear(self.c_s, 6, init="final")
def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
[*, N_res, C_s] single representation
Returns:
[*, N_res, 6] update vector
"""
# [*, 6]
update = self.linear(s)
return update
class StructureModuleTransitionLayer(nn.Module):
def __init__(self, c):
super(StructureModuleTransitionLayer, self).__init__()
self.c = c
self.linear_1 = Linear(self.c, self.c, init="relu")
self.linear_2 = Linear(self.c, self.c, init="relu")
self.linear_3 = Linear(self.c, self.c, init="final")
self.relu = nn.ReLU()
def forward(self, s):
s_initial = s
s = self.linear_1(s)
s = self.relu(s)
s = self.linear_2(s)
s = self.relu(s)
s = self.linear_3(s)
s = s + s_initial
return s
class StructureModuleTransition(nn.Module):
def __init__(self, c, num_layers, dropout_rate):
super(StructureModuleTransition, self).__init__()
self.c = c
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.layers = nn.ModuleList()
for _ in range(self.num_layers):
l = StructureModuleTransitionLayer(self.c)
self.layers.append(l)
self.dropout = nn.Dropout(self.dropout_rate)
self.layer_norm = LayerNorm(self.c)
def forward(self, s):
for l in self.layers:
s = l(s)
s = self.dropout(s)
s = self.layer_norm(s)
return s
class StructureModule(nn.Module):
def __init__(
self,
c_s,
c_z,
c_ipa,
c_resnet,
no_heads_ipa,
no_qk_points,
no_v_points,
dropout_rate,
no_blocks,
no_transition_layers,
no_resnet_blocks,
no_angles,
trans_scale_factor,
epsilon,
inf,
**kwargs,
):
"""
Args:
c_s:
Single representation channel dimension
c_z:
Pair representation channel dimension
c_ipa:
IPA hidden channel dimension
c_resnet:
Angle resnet (Alg. 23 lines 11-14) hidden channel dimension
no_heads_ipa:
Number of IPA heads
no_qk_points:
Number of query/key points to generate during IPA
no_v_points:
Number of value points to generate during IPA
dropout_rate:
Dropout rate used throughout the layer
no_blocks:
Number of structure module blocks
no_transition_layers:
Number of layers in the single representation transition
(Alg. 23 lines 8-9)
no_resnet_blocks:
Number of blocks in the angle resnet
no_angles:
Number of angles to generate in the angle resnet
trans_scale_factor:
Scale of single representation transition hidden dimension
epsilon:
Small number used in angle resnet normalization
inf:
Large number used for attention masking
"""
super(StructureModule, self).__init__()
self.c_s = c_s
self.c_z = c_z
self.c_ipa = c_ipa
self.c_resnet = c_resnet
self.no_heads_ipa = no_heads_ipa
self.no_qk_points = no_qk_points
self.no_v_points = no_v_points
self.dropout_rate = dropout_rate
self.no_blocks = no_blocks
self.no_transition_layers = no_transition_layers
self.no_resnet_blocks = no_resnet_blocks
self.no_angles = no_angles
self.trans_scale_factor = trans_scale_factor
self.epsilon = epsilon
self.inf = inf
# Buffers to be lazily initialized later
# self.default_frames
# self.group_idx
# self.atom_mask
# self.lit_positions
self.layer_norm_s = LayerNorm(self.c_s)
self.layer_norm_z = LayerNorm(self.c_z)
self.linear_in = Linear(self.c_s, self.c_s)
self.ipa = InvariantPointAttention(
self.c_s,
self.c_z,
self.c_ipa,
self.no_heads_ipa,
self.no_qk_points,
self.no_v_points,
inf=self.inf,
eps=self.epsilon,
)
self.ipa_dropout = nn.Dropout(self.dropout_rate)
self.layer_norm_ipa = LayerNorm(self.c_s)
self.transition = StructureModuleTransition(
self.c_s,
self.no_transition_layers,
self.dropout_rate,
)
self.bb_update = BackboneUpdate(self.c_s)
self.angle_resnet = AngleResnet(
self.c_s,
self.c_resnet,
self.no_resnet_blocks,
self.no_angles,
self.epsilon,
)
def forward(
self,
evoformer_output_dict,
aatype,
mask=None,
inplace_safe=False,
_offload_inference=False,
):
"""
Args:
evoformer_output_dict:
Dictionary containing:
"single":
[*, N_res, C_s] single representation
"pair":
[*, N_res, N_res, C_z] pair representation
aatype:
[*, N_res] amino acid indices
mask:
Optional [*, N_res] sequence mask
Returns:
A dictionary of outputs
"""
s = evoformer_output_dict["single"]
if mask is None:
# [*, N]
mask = s.new_ones(s.shape[:-1])
# [*, N, C_s]
s = self.layer_norm_s(s)
s_ = s.clone()
# [*, N, N, C_z]
z = self.layer_norm_z(evoformer_output_dict["pair"])
z_ = z.clone()
z_reference_list = None
if _offload_inference:
assert sys.getrefcount(evoformer_output_dict["pair"]) == 2
evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu()
z_reference_list = [z]
z = None
# [*, N, C_s]
s_initial = s
s = self.linear_in(s)
# [*, N]
rigids = Rigid.identity(
s.shape[:-1],
s.dtype,
s.device,
self.training,
fmt="quat",
)
outputs = []
for i in range(self.no_blocks):
# [*, N, C_s]
s = s + self.ipa(
s,
z,
rigids,
mask,
inplace_safe=inplace_safe,
_offload_inference=_offload_inference,
_z_reference_list=z_reference_list,
)
s = self.ipa_dropout(s)
s = self.layer_norm_ipa(s)
s = self.transition(s)
# [*, N]
rigids = rigids.compose_q_update_vec(self.bb_update(s))
# To hew as closely as possible to AlphaFold, we convert our
# quaternion-based transformations to rotation-matrix ones
# here
backb_to_global = Rigid(
Rotation(rot_mats=rigids.get_rots().get_rot_mats(), quats=None),
rigids.get_trans(),
)
backb_to_global = backb_to_global.scale_translation(self.trans_scale_factor)
# [*, N, 7, 2]
unnormalized_angles, angles = self.angle_resnet(s, s_initial)
all_frames_to_global = self.torsion_angles_to_frames(
backb_to_global,
angles,
aatype,
)
pred_xyz = self.frames_and_literature_positions_to_atom14_pos(
all_frames_to_global,
aatype,
)
scaled_rigids = rigids.scale_translation(self.trans_scale_factor)
preds = {
"frames": scaled_rigids.to_tensor_7(),
"sidechain_frames": all_frames_to_global.to_tensor_4x4(),
"unnormalized_angles": unnormalized_angles,
"angles": angles,
"positions": pred_xyz,
"states": s,
# #### ADDED ####
# "s_initial": s_initial,
# "s_after_layernorm": s_,
# "z_after_layernorm": z_,
###############
}
outputs.append(preds)
rigids = rigids.stop_rot_gradient()
del z, z_reference_list
if _offload_inference:
evoformer_output_dict["pair"] = evoformer_output_dict["pair"].to(s.device)
outputs = dict_multimap(torch.stack, outputs)
outputs["single"] = s
return outputs
def _init_residue_constants(self, float_dtype, device):
if not hasattr(self, "default_frames"):
self.register_buffer(
"default_frames",
torch.tensor(
restype_rigid_group_default_frame,
dtype=float_dtype,
device=device,
requires_grad=False,
),
persistent=False,
)
if not hasattr(self, "group_idx"):
self.register_buffer(
"group_idx",
torch.tensor(
restype_atom14_to_rigid_group,
device=device,
requires_grad=False,
),
persistent=False,
)
if not hasattr(self, "atom_mask"):
self.register_buffer(
"atom_mask",
torch.tensor(
restype_atom14_mask,
dtype=float_dtype,
device=device,
requires_grad=False,
),
persistent=False,
)
if not hasattr(self, "lit_positions"):
self.register_buffer(
"lit_positions",
torch.tensor(
restype_atom14_rigid_group_positions,
dtype=float_dtype,
device=device,
requires_grad=False,
),
persistent=False,
)
def torsion_angles_to_frames(self, r, alpha, f):
# Lazily initialize the residue constants on the correct device
self._init_residue_constants(alpha.dtype, alpha.device)
# Separated purely to make testing less annoying
return torsion_angles_to_frames(r, alpha, f, self.default_frames)
def frames_and_literature_positions_to_atom14_pos(
self, r, f
): # [*, N, 8] # [*, N]
# Lazily initialize the residue constants on the correct device
self._init_residue_constants(r.get_rots().dtype, r.get_rots().device)
return frames_and_literature_positions_to_atom14_pos(
r,
f,
self.default_frames,
self.group_idx,
self.atom_mask,
self.lit_positions,
)
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/esmfold/__init__.py | src/cheap/esmfold/__init__.py | ESMFOLD_S_DIM = 1024 # dimension of the s_s_0 tensor input to ESMFold folding trunk
ESMFOLD_Z_DIM = 128 # dimension of the paired representation s_z_0 input
from ._trunk import RelativePosition, FoldingTrunk, FoldingTrunkConfig
from ._misc import batch_encode_sequences, output_to_pdb, make_s_z_0
from ._pretrained import esmfold_v1
from ._esmfold_embed_only import esmfold_v1_embed_only, ESMFoldEmbed
from ._esmfold import ESMFoldConfig, ESMFold
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/esmfold/_misc.py | src/cheap/esmfold/_misc.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as T
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from torch import nn
from ..openfold_utils import _residue_constants as residue_constants
from ..openfold_utils._protein import Protein as OFProtein
from ..openfold_utils._protein import to_pdb
from ..openfold_utils._feats import atom14_to_atom37
def encode_sequence(
seq: str,
residue_index_offset: T.Optional[int] = 512,
chain_linker: T.Optional[str] = "G" * 25,
) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
if chain_linker is None:
chain_linker = ""
if residue_index_offset is None:
residue_index_offset = 0
chains = seq.split(":")
seq = chain_linker.join(chains)
unk_idx = residue_constants.restype_order_with_x["X"]
encoded = torch.tensor(
[residue_constants.restype_order_with_x.get(aa, unk_idx) for aa in seq]
)
residx = torch.arange(len(encoded))
if residue_index_offset > 0:
start = 0
for i, chain in enumerate(chains):
residx[start : start + len(chain) + len(chain_linker)] += (
i * residue_index_offset
)
start += len(chain) + len(chain_linker)
linker_mask = torch.ones_like(encoded, dtype=torch.float32)
chain_index = []
offset = 0
for i, chain in enumerate(chains):
if i > 0:
chain_index.extend([i - 1] * len(chain_linker))
chain_index.extend([i] * len(chain))
offset += len(chain)
linker_mask[offset : offset + len(chain_linker)] = 0
offset += len(chain_linker)
chain_index = torch.tensor(chain_index, dtype=torch.int64)
return encoded, residx, linker_mask, chain_index
def batch_encode_sequences(
sequences: T.Sequence[str],
residue_index_offset: T.Optional[int] = 512,
chain_linker: T.Optional[str] = "G" * 25,
) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
aatype_list = []
residx_list = []
linker_mask_list = []
chain_index_list = []
for seq in sequences:
aatype_seq, residx_seq, linker_mask_seq, chain_index_seq = encode_sequence(
seq,
residue_index_offset=residue_index_offset,
chain_linker=chain_linker,
)
aatype_list.append(aatype_seq)
residx_list.append(residx_seq)
linker_mask_list.append(linker_mask_seq)
chain_index_list.append(chain_index_seq)
aatype = collate_dense_tensors(aatype_list)
mask = collate_dense_tensors(
[aatype.new_ones(len(aatype_seq)) for aatype_seq in aatype_list]
)
residx = collate_dense_tensors(residx_list)
linker_mask = collate_dense_tensors(linker_mask_list)
chain_index_list = collate_dense_tensors(chain_index_list, -1)
return aatype, mask, residx, linker_mask, chain_index_list
def output_to_pdb(output: T.Dict) -> T.List[str]:
"""Returns the pbd (file) string from the model given the model output."""
# atom14_to_atom37 must be called first, as it fails on latest numpy if the
# input is a numpy array. It will work if the input is a torch tensor.
final_atom_positions = atom14_to_atom37(output["positions"][-1], output)
output = {k: v.to("cpu").numpy() for k, v in output.items()}
final_atom_positions = final_atom_positions.cpu().numpy()
final_atom_mask = output["atom37_atom_exists"]
pdbs = []
for i in range(output["aatype"].shape[0]):
aa = output["aatype"][i]
pred_pos = final_atom_positions[i]
mask = final_atom_mask[i]
resid = output["residue_index"][i] + 1
pred = OFProtein(
aatype=aa,
atom_positions=pred_pos,
atom_mask=mask,
residue_index=resid,
b_factors=output["plddt"][i],
chain_index=output["chain_index"][i] if "chain_index" in output else None,
)
pdbs.append(to_pdb(pred))
return pdbs
def collate_dense_tensors(
samples: T.List[torch.Tensor], pad_v: float = 0
) -> torch.Tensor:
"""
Takes a list of tensors with the following dimensions:
[(d_11, ..., d_1K),
(d_21, ..., d_2K),
...,
(d_N1, ..., d_NK)]
and stack + pads them into a single tensor of:
(N, max_i=1,N { d_i1 }, ..., max_i=1,N {diK})
"""
if len(samples) == 0:
return torch.Tensor()
if len(set(x.dim() for x in samples)) != 1:
raise RuntimeError(
f"Samples has varying dimensions: {[x.dim() for x in samples]}"
)
(device,) = tuple(set(x.device for x in samples)) # assumes all on same device
max_shape = [max(lst) for lst in zip(*[x.shape for x in samples])]
result = torch.empty(
len(samples), *max_shape, dtype=samples[0].dtype, device=device
)
result.fill_(pad_v)
for i in range(len(samples)):
result_i = result[i]
t = samples[i]
result_i[tuple(slice(0, k) for k in t.shape)] = t
return result
class Attention(nn.Module):
def __init__(self, embed_dim, num_heads, head_width, gated=False):
super().__init__()
assert embed_dim == num_heads * head_width
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_width = head_width
self.proj = nn.Linear(embed_dim, embed_dim * 3, bias=False)
self.o_proj = nn.Linear(embed_dim, embed_dim, bias=True)
self.gated = gated
if gated:
self.g_proj = nn.Linear(embed_dim, embed_dim)
torch.nn.init.zeros_(self.g_proj.weight)
torch.nn.init.ones_(self.g_proj.bias)
self.rescale_factor = self.head_width**-0.5
torch.nn.init.zeros_(self.o_proj.bias)
def forward(self, x, mask=None, bias=None, indices=None):
"""
Basic self attention with optional mask and external pairwise bias.
To handle sequences of different lengths, use mask.
Inputs:
x: batch of input sequneces (.. x L x C)
mask: batch of boolean masks where 1=valid, 0=padding position (.. x L_k). optional.
bias: batch of scalar pairwise attention biases (.. x Lq x Lk x num_heads). optional.
Outputs:
sequence projection (B x L x embed_dim), attention maps (B x L x L x num_heads)
"""
t = rearrange(self.proj(x), "... l (h c) -> ... h l c", h=self.num_heads)
q, k, v = t.chunk(3, dim=-1)
q = self.rescale_factor * q
a = torch.einsum("...qc,...kc->...qk", q, k)
# Add external attention bias.
if bias is not None:
a = a + rearrange(bias, "... lq lk h -> ... h lq lk")
# Do not attend to padding tokens.
if mask is not None:
mask = repeat(
mask, "... lk -> ... h lq lk", h=self.num_heads, lq=q.shape[-2]
)
a = a.masked_fill(mask == False, -np.inf)
a = F.softmax(a, dim=-1)
y = torch.einsum("...hqk,...hkc->...qhc", a, v)
y = rearrange(y, "... h c -> ... (h c)", h=self.num_heads)
if self.gated:
y = self.g_proj(x).sigmoid() * y
y = self.o_proj(y)
return y, rearrange(a, "... lq lk h -> ... h lq lk")
class Dropout(nn.Module):
"""
Implementation of dropout with the ability to share the dropout mask
along a particular dimension.
"""
def __init__(self, r: float, batch_dim: T.Union[int, T.List[int]]):
super(Dropout, self).__init__()
self.r = r
if type(batch_dim) == int:
batch_dim = [batch_dim]
self.batch_dim = batch_dim
self.dropout = nn.Dropout(self.r)
def forward(self, x: torch.Tensor) -> torch.Tensor:
shape = list(x.shape)
if self.batch_dim is not None:
for bd in self.batch_dim:
shape[bd] = 1
return x * self.dropout(x.new_ones(shape))
class SequenceToPair(nn.Module):
def __init__(self, sequence_state_dim, inner_dim, pairwise_state_dim):
super().__init__()
self.layernorm = nn.LayerNorm(sequence_state_dim)
self.proj = nn.Linear(sequence_state_dim, inner_dim * 2, bias=True)
self.o_proj = nn.Linear(2 * inner_dim, pairwise_state_dim, bias=True)
torch.nn.init.zeros_(self.proj.bias)
torch.nn.init.zeros_(self.o_proj.bias)
def forward(self, sequence_state):
"""
Inputs:
sequence_state: B x L x sequence_state_dim
Output:
pairwise_state: B x L x L x pairwise_state_dim
Intermediate state:
B x L x L x 2*inner_dim
"""
assert len(sequence_state.shape) == 3
s = self.layernorm(sequence_state)
s = self.proj(s)
q, k = s.chunk(2, dim=-1)
prod = q[:, None, :, :] * k[:, :, None, :]
diff = q[:, None, :, :] - k[:, :, None, :]
x = torch.cat([prod, diff], dim=-1)
x = self.o_proj(x)
return x
class PairToSequence(nn.Module):
def __init__(self, pairwise_state_dim, num_heads):
super().__init__()
self.layernorm = nn.LayerNorm(pairwise_state_dim)
self.linear = nn.Linear(pairwise_state_dim, num_heads, bias=False)
def forward(self, pairwise_state):
"""
Inputs:
pairwise_state: B x L x L x pairwise_state_dim
Output:
pairwise_bias: B x L x L x num_heads
"""
assert len(pairwise_state.shape) == 4
z = self.layernorm(pairwise_state)
pairwise_bias = self.linear(z)
return pairwise_bias
class ResidueMLP(nn.Module):
def __init__(self, embed_dim, inner_dim, norm=nn.LayerNorm, dropout=0):
super().__init__()
self.mlp = nn.Sequential(
norm(embed_dim),
nn.Linear(embed_dim, inner_dim),
nn.ReLU(),
nn.Linear(inner_dim, embed_dim),
nn.Dropout(dropout),
)
def forward(self, x):
return x + self.mlp(x)
def make_s_z_0(s_s_0):
from . import ESMFOLD_Z_DIM
B, L, _ = s_s_0.shape
return s_s_0.new_zeros(B, L, L, ESMFOLD_Z_DIM)
def get_esmfold_model_state(model_name="esmfold_3B_v1"):
if model_name.endswith(".pt"): # local, treat as filepath
model_path = Path(model_name)
model_data = torch.load(str(model_path), map_location="cpu")
else: # load from hub
url = f"https://dl.fbaipublicfiles.com/fair-esm/models/{model_name}.pt"
model_data = torch.hub.load_state_dict_from_url(
url, progress=False, map_location="cpu"
)
esmfold_config = model_data["cfg"]["model"]
model_state = model_data["model"]
return esmfold_config, model_state
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/esmfold/_categorical_mixture.py | src/cheap/esmfold/_categorical_mixture.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class CategoricalMixture:
def __init__(self, param, bins=50, start=0, end=1):
# All tensors are of shape ..., bins.
self.logits = param
bins = torch.linspace(
start, end, bins + 1, device=self.logits.device, dtype=self.logits.dtype
)
self.v_bins = (bins[:-1] + bins[1:]) / 2
def log_prob(self, true):
# Shapes are:
# self.probs: ... x bins
# true : ...
true_index = (
(
true.unsqueeze(-1)
- self.v_bins[
[
None,
]
* true.ndim
]
)
.abs()
.argmin(-1)
)
nll = self.logits.log_softmax(-1)
return torch.take_along_dim(nll, true_index.unsqueeze(-1), dim=-1).squeeze(-1)
def mean(self):
return (self.logits.softmax(-1) @ self.v_bins.unsqueeze(1)).squeeze(-1)
def categorical_lddt(logits, bins=50):
# Logits are ..., 37, bins.
return CategoricalMixture(logits, bins=bins).mean()
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/esmfold/_esmfold_embed_only.py | src/cheap/esmfold/_esmfold_embed_only.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as T
import time
from functools import partial
import torch
import torch.nn as nn
from torch import nn
from torch.nn import LayerNorm
import esm
from esm import Alphabet
# ================================================================
# Taken from OpenFold residue constants to avoid import
# ================================================================
restypes = [
"A",
"R",
"N",
"D",
"C",
"Q",
"E",
"G",
"H",
"I",
"L",
"K",
"M",
"F",
"P",
"S",
"T",
"W",
"Y",
"V",
]
restype_order = {restype: i for i, restype in enumerate(restypes)}
restype_num = len(restypes) # := 20.
unk_restype_index = restype_num # Catch-all index for unknown restypes.
restypes_with_x = restypes + ["X"]
restype_order_with_x = {restype: i for i, restype in enumerate(restypes_with_x)}
# ================================================================
# Taken from utils to avoid additional imports
# ================================================================
def encode_sequence(
seq: str,
residue_index_offset: T.Optional[int] = 512,
chain_linker: T.Optional[str] = "G" * 25,
) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
if chain_linker is None:
chain_linker = ""
if residue_index_offset is None:
residue_index_offset = 0
chains = seq.split(":")
seq = chain_linker.join(chains)
unk_idx = restype_order_with_x["X"]
encoded = torch.tensor([restype_order_with_x.get(aa, unk_idx) for aa in seq])
residx = torch.arange(len(encoded))
if residue_index_offset > 0:
start = 0
for i, chain in enumerate(chains):
residx[start : start + len(chain) + len(chain_linker)] += (
i * residue_index_offset
)
start += len(chain) + len(chain_linker)
linker_mask = torch.ones_like(encoded, dtype=torch.float32)
chain_index = []
offset = 0
for i, chain in enumerate(chains):
if i > 0:
chain_index.extend([i - 1] * len(chain_linker))
chain_index.extend([i] * len(chain))
offset += len(chain)
linker_mask[offset : offset + len(chain_linker)] = 0
offset += len(chain_linker)
chain_index = torch.tensor(chain_index, dtype=torch.int64)
return encoded, residx, linker_mask, chain_index
def collate_dense_tensors(
samples: T.List[torch.Tensor], pad_v: float = 0
) -> torch.Tensor:
"""
Takes a list of tensors with the following dimensions:
[(d_11, ..., d_1K),
(d_21, ..., d_2K),
...,
(d_N1, ..., d_NK)]
and stack + pads them into a single tensor of:
(N, max_i=1,N { d_i1 }, ..., max_i=1,N {diK})
"""
if len(samples) == 0:
return torch.Tensor()
if len(set(x.dim() for x in samples)) != 1:
raise RuntimeError(
f"Samples has varying dimensions: {[x.dim() for x in samples]}"
)
(device,) = tuple(set(x.device for x in samples)) # assumes all on same device
max_shape = [max(lst) for lst in zip(*[x.shape for x in samples])]
result = torch.empty(
len(samples), *max_shape, dtype=samples[0].dtype, device=device
)
result.fill_(pad_v)
for i in range(len(samples)):
result_i = result[i]
t = samples[i]
result_i[tuple(slice(0, k) for k in t.shape)] = t
return result
def batch_encode_sequences(
sequences: T.Sequence[str],
residue_index_offset: T.Optional[int] = 512,
chain_linker: T.Optional[str] = "G" * 25,
) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
aatype_list = []
residx_list = []
linker_mask_list = []
chain_index_list = []
for seq in sequences:
aatype_seq, residx_seq, linker_mask_seq, chain_index_seq = encode_sequence(
seq,
residue_index_offset=residue_index_offset,
chain_linker=chain_linker,
)
aatype_list.append(aatype_seq)
residx_list.append(residx_seq)
linker_mask_list.append(linker_mask_seq)
chain_index_list.append(chain_index_seq)
aatype = collate_dense_tensors(aatype_list)
mask = collate_dense_tensors(
[aatype.new_ones(len(aatype_seq)) for aatype_seq in aatype_list]
)
residx = collate_dense_tensors(residx_list)
linker_mask = collate_dense_tensors(linker_mask_list)
chain_index_list = collate_dense_tensors(chain_index_list, -1)
return aatype, mask, residx, linker_mask, chain_index_list
# ================================================================
# Modified definition to avoid structure module and OpenFold imports
# ================================================================
load_fn = esm.pretrained.load_model_and_alphabet
esm_registry = {
"esm2_8M": partial(load_fn, "esm2_t6_8M_UR50D_500K"),
"esm2_8M_270K": esm.pretrained.esm2_t6_8M_UR50D,
"esm2_35M": partial(load_fn, "esm2_t12_35M_UR50D_500K"),
"esm2_35M_270K": esm.pretrained.esm2_t12_35M_UR50D,
"esm2_150M": partial(load_fn, "esm2_t30_150M_UR50D_500K"),
"esm2_150M_270K": partial(load_fn, "esm2_t30_150M_UR50D_270K"),
"esm2_650M": esm.pretrained.esm2_t33_650M_UR50D,
"esm2_650M_270K": partial(load_fn, "esm2_t33_650M_270K_UR50D"),
"esm2_3B": esm.pretrained.esm2_t36_3B_UR50D,
"esm2_3B_270K": partial(load_fn, "esm2_t36_3B_UR50D_500K"),
"esm2_15B": esm.pretrained.esm2_t48_15B_UR50D,
}
class ESMFoldEmbed(nn.Module):
"""
Modified ESMFold base that removes structure trunk but retains projection layers
after ESM2, such that initialization does not depend on OpenFold installation.
Does not use attention maps.
"""
def __init__(self):
super().__init__()
print("Creating ESMFold embedding only model...")
# esm2 weights are loaded from registry (requires ESM installation)
self.esm, self.esm_dict = esm_registry.get("esm2_3B")()
self.esm.requires_grad_(False)
self.esm.half()
self.esm_feats = self.esm.embed_dim
self.esm_attns = self.esm.num_layers * self.esm.attention_heads
self.register_buffer("af2_to_esm", ESMFoldEmbed._af2_to_esm(self.esm_dict))
self.esm_s_combine = nn.Parameter(torch.zeros(self.esm.num_layers + 1))
# from ESMFold config
self.c_s = 1024
self.c_z = 128
self.esm_s_mlp = nn.Sequential(
LayerNorm(self.esm_feats),
nn.Linear(self.esm_feats, self.c_s),
nn.ReLU(),
nn.Linear(self.c_s, self.c_s),
)
# 0 is padding, N is unknown residues, N + 1 is mask.
self.n_tokens_embed = restype_num + 3
self.pad_idx = 0
self.unk_idx = self.n_tokens_embed - 2
self.mask_idx = self.n_tokens_embed - 1
self.embedding = nn.Embedding(self.n_tokens_embed, self.c_s, padding_idx=0)
@staticmethod
def _af2_to_esm(d: Alphabet):
# Remember that t is shifted from residue_constants by 1 (0 is padding).
esm_reorder = [d.padding_idx] + [d.get_idx(v) for v in restypes_with_x]
return torch.tensor(esm_reorder)
def _af2_idx_to_esm_idx(self, aa, mask):
aa = (aa + 1).masked_fill(mask != 1, 0)
return self.af2_to_esm[aa]
def _compute_language_model_representations(
self,
esmaa: torch.Tensor,
) -> torch.Tensor:
"""Adds bos/eos tokens for the language model, since the structure module doesn't use these."""
batch_size = esmaa.size(0)
bosi, eosi = self.esm_dict.cls_idx, self.esm_dict.eos_idx
bos = esmaa.new_full((batch_size, 1), bosi)
eos = esmaa.new_full((batch_size, 1), self.esm_dict.padding_idx)
esmaa = torch.cat([bos, esmaa, eos], dim=1)
# Use the first padding index as eos during inference.
esmaa[range(batch_size), (esmaa != 1).sum(1)] = eosi
res = self.esm(
esmaa,
repr_layers=range(self.esm.num_layers + 1),
need_head_weights=False,
)
esm_s = torch.stack(
[v for _, v in sorted(res["representations"].items())], dim=2
)
esm_s = esm_s[:, 1:-1] # B, L, nLayers, C
esm_z = None
return esm_s, esm_z
def _mask_inputs_to_esm(self, esmaa, pattern):
new_esmaa = esmaa.clone()
new_esmaa[pattern == 1] = self.esm_dict.mask_idx
return new_esmaa
def embed_for_folding_trunk(
self,
aa: torch.Tensor,
mask: T.Optional[torch.Tensor] = None,
residx: T.Optional[torch.Tensor] = None,
masking_pattern: T.Optional[torch.Tensor] = None,
):
"""First half of original `forward` function to get s_s_0 and s_z_0.
Runs a forward pass given input tokens. Use `model.infer` to
run inference from a sequence.
Args:
aa (torch.Tensor): Tensor containing indices corresponding to amino acids. Indices match
openfold.np.restype_order_with_x.
mask (torch.Tensor): Binary tensor with 1 meaning position is unmasked and 0 meaning position is masked.
residx (torch.Tensor): Residue indices of amino acids. Will assume contiguous if not provided.
masking_pattern (torch.Tensor): Optional masking to pass to the input. Binary tensor of the same size
as `aa`. Positions with 1 will be masked. ESMFold sometimes produces different samples when
different masks are provided.
num_recycles (int): How many recycle iterations to perform. If None, defaults to training max
recycles, which is 3.
"""
if mask is None:
mask = torch.ones_like(aa)
B = aa.shape[0]
L = aa.shape[1]
device = aa.device
if residx is None:
residx = torch.arange(L, device=device).expand_as(aa)
# === ESM ===
esmaa = self._af2_idx_to_esm_idx(aa, mask)
if masking_pattern is not None:
esmaa = self._mask_inputs_to_esm(esmaa, masking_pattern)
esm_s, esm_z = self._compute_language_model_representations(esmaa)
# Convert esm_s to the precision used by the trunk and
# the structure module. These tensors may be a lower precision if, for example,
# we're running the language model in fp16 precision.
esm_s = esm_s.to(self.esm_s_combine.dtype)
esm_s = esm_s.detach()
# Process outputs for trunk input
esm_s = (self.esm_s_combine.softmax(0).unsqueeze(0) @ esm_s).squeeze(2)
s_s_0 = self.esm_s_mlp(esm_s)
s_z_0 = s_s_0.new_zeros(B, L, L, self.c_z)
s_s_0 += self.embedding(aa)
return s_s_0, s_z_0, aa, residx, mask
@torch.no_grad()
def infer_embedding(
self,
sequences: T.Union[str, T.List[str]],
residx=None,
masking_pattern: T.Optional[torch.Tensor] = None,
residue_index_offset: T.Optional[int] = 512,
chain_linker: T.Optional[str] = "G" * 25,
):
"""From a list of sequence strings, obtain embeddings.
Args:
sequences (Union[str, List[str]]): A list of sequences to make predictions for. Multimers can also be passed in,
each chain should be separated by a ':' token (e.g. "<chain1>:<chain2>:<chain3>").
residx (torch.Tensor): Residue indices of amino acids. Will assume contiguous if not provided.
masking_pattern (torch.Tensor): Optional masking to pass to the input. Binary tensor of the same size
as `aa`. Positions with 1 will be masked. ESMFold sometimes produces different samples when
different masks are provided.
num_recycles (int): How many recycle iterations to perform. If None, defaults to training max
recycles (cfg.trunk.max_recycles), which is 4.
residue_index_offset (int): Residue index separation between chains if predicting a multimer. Has no effect on
single chain predictions. Default: 512.
chain_linker (str): Linker to use between chains if predicting a multimer. Has no effect on single chain
predictions. Default: length-25 poly-G ("G" * 25).
"""
if isinstance(sequences, str):
sequences = [sequences]
aatype, mask, _residx, linker_mask, chain_index = batch_encode_sequences(
sequences, residue_index_offset, chain_linker
)
if residx is None:
residx = _residx
elif not isinstance(residx, torch.Tensor):
residx = collate_dense_tensors(residx)
aatype, mask, residx, linker_mask = map(
lambda x: x.to(self.device), (aatype, mask, residx, linker_mask)
)
with torch.no_grad():
s_s_0, s_z_0, _, residx, mask = self.embed_for_folding_trunk(
aatype, mask, residx, masking_pattern
)
return {
"s": s_s_0,
"z": s_z_0,
"mask": mask,
"pos": residx,
}
@property
def device(self):
return self.esm_s_combine.device
# ================================================================
# Load pretrained weights
# ================================================================
def _load_esmfold_state_dict():
url = f"https://dl.fbaipublicfiles.com/fair-esm/models/esmfold_3B_v1.pt"
model_data = torch.hub.load_state_dict_from_url(
url, progress=False, map_location="cpu"
)
return model_data["model"]
def esmfold_v1_embed_only():
start = time.time()
esmfold_state_dict = _load_esmfold_state_dict()
esmfold_embed = ESMFoldEmbed()
incompatible_keys = esmfold_embed.load_state_dict(esmfold_state_dict, strict=False)
# the only missing keys should be the ESM2 LM, for which weights are already loaded
for key in incompatible_keys.missing_keys:
assert key[:4] == "esm."
end = time.time()
print(f"ESMFold embedding only model created in {end - start:.2f} seconds")
return esmfold_embed
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/losses/__init__.py | src/cheap/losses/__init__.py | from ._functions import (
masked_huber_loss,
masked_l1_loss,
masked_mse_loss,
masked_token_accuracy,
masked_token_cross_entropy_loss,
)
from ._modules import SequenceAuxiliaryLoss, BackboneAuxiliaryLoss
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/losses/_modules.py | src/cheap/losses/_modules.py | import typing as T
from openfold.utils.loss import backbone_loss
import pandas as pd
import torch
import wandb
from . import masked_token_cross_entropy_loss, masked_token_accuracy
from ..esmfold._misc import batch_encode_sequences
from ..proteins import LatentToSequence, LatentToStructure
from ..utils import outputs_to_avg_metric
class SequenceAuxiliaryLoss:
def __init__(
self,
sequence_constructor: LatentToSequence,
weight: float = 1.0,
loss_fn: T.Callable = masked_token_cross_entropy_loss,
):
self.sequence_constructor = sequence_constructor
self.loss_fn = loss_fn
self.weight = weight
def __call__(
self,
latent,
aatype,
mask,
cur_weight=None,
return_reconstructed_sequences: bool = False,
):
"""If cur weight is specified, it will override self.weight."""
device = latent.device
self.sequence_constructor.to(device)
aatype, mask = aatype.to(device), mask.to(device)
# grab logits and calculate masked cross entropy (must pass non-default arguments)
logits, _, recons_strs = self.sequence_constructor.to_sequence(
latent, mask, return_logits=True, drop_mask_idx=False
)
loss = self.loss_fn(logits, aatype, mask)
acc = masked_token_accuracy(logits, aatype, mask)
weight = self.weight if cur_weight is None else cur_weight
logdict = {
"seq_loss": loss.item(),
"seq_acc": acc.item(),
}
if return_reconstructed_sequences:
return weight * loss, logdict, recons_strs
else:
return (
weight * loss,
logdict,
)
class BackboneAuxiliaryLoss:
def __init__(self, structure_constructor: LatentToStructure, weight=1.0):
self.structure_constructor = structure_constructor
self.weight = weight
def __call__(
self,
latent,
gt_structures,
sequences,
num_recycles=1,
inner_batch_size=None,
cur_weight=None,
):
device = latent.device
self.structure_constructor.to(device)
# check shapes
batch_size, seq_len, _ = latent.shape
assert gt_structures["backbone_rigid_tensor"].shape == torch.Size(
[batch_size, seq_len, 4, 4]
)
assert gt_structures["backbone_rigid_mask"].shape == torch.Size(
[batch_size, seq_len]
)
# todo: maybe also log pdb strs
# pred_structures = self.trunk.from_seq_feat(true_aa, latent)[0]
pred_pdb_strs, pred_raw_outputs = self.structure_constructor.to_structure(
latent,
sequences,
num_recycles,
batch_size=inner_batch_size,
return_raw_features=True,
)
assert pred_raw_outputs["frames"].shape == torch.Size(
[8, batch_size, seq_len, 7]
)
loss = backbone_loss(
backbone_rigid_tensor=gt_structures["backbone_rigid_tensor"].to(device),
backbone_rigid_mask=gt_structures["backbone_rigid_mask"].to(device),
traj=pred_raw_outputs["frames"],
)
weight = self.weight if cur_weight is None else cur_weight
metrics = outputs_to_avg_metric(pred_raw_outputs)
logdict = {"backbone_loss": loss.item()} | metrics
return weight * loss, logdict
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/losses/_functions.py | src/cheap/losses/_functions.py | import typing as T
import torch
import torch.nn.functional as F
import numpy as np
import einops
import torch
def make_mask(broadcast_shape, mask):
while len(mask.shape) < len(broadcast_shape):
mask = mask[..., None]
return mask.expand(broadcast_shape)
def masked_mse_loss(pred: torch.Tensor, target: torch.Tensor, mask=None, reduce="mean"):
"""Computes the mean squared error loss.
assumes that the axis order is (B, L, ...)
"""
if mask is None:
return torch.mean((pred - target) ** 2)
else:
mask = make_mask(pred.shape, mask)
if reduce == "mean":
return ((((pred - target) ** 2) * mask).sum()) / mask.sum()
elif reduce == "batch":
dims = tuple(range(1, len(pred.shape)))
return ((((pred - target) ** 2) * mask).sum(dim=dims)) / mask.sum(dim=dims)
else:
raise ValueError(
f"Unknown reduce type: {reduce}. Expected: 'mean' or 'batch'."
)
def masked_huber_loss(
pred: torch.Tensor, target: torch.Tensor, mask=None, reduce="mean"
):
"""Computes the huber loss; assumes that the axis order is (B, L, ...)"""
if mask is None:
return F.huber_loss(pred, target)
else:
mask = make_mask(pred.shape, mask)
pred = pred * mask
target = target * mask
return F.huber_loss(pred, target, reduction=reduce)
def masked_l1_loss(pred: torch.Tensor, target: torch.Tensor, mask=None, reduce="mean"):
"""Computes the L1 loss; assumes that the axis order is (B, L, ...)"""
if mask is None:
return F.l1_loss(pred, target)
else:
mask = make_mask(pred.shape, mask)
pred = pred * mask
target = target * mask
return F.l1_loss(pred, target, reduction=reduce)
def masked_token_cross_entropy_loss(
pred_logits: torch.Tensor,
targets: torch.Tensor,
mask: T.Optional[torch.Tensor] = None,
ignore_index: T.Optional[int] = -100,
):
# pred_logits: (B, L, C) logits.
# target: (B, L) indices.
# mask: (B, L) int or bool.
pred_logits = einops.rearrange(pred_logits, "b l c -> (b l) c")
targets = einops.rearrange(targets, "b l -> (b l)")
# The vocab uses 0, which overlaps with the padding idx used by the
# ESMFold collator, so we use the mask to remove padding positions from
# array entirely, and then ignore the UNK_IDX when computing the loss.
if not mask is None:
mask = einops.rearrange(mask, "b l -> (b l)").to(torch.bool)
pred_logits = pred_logits[mask, :]
targets = targets[mask]
return F.cross_entropy(pred_logits, targets, ignore_index=ignore_index)
def masked_token_accuracy(
pred_logits: torch.Tensor,
target: torch.Tensor,
mask: T.Optional[torch.Tensor] = None,
):
# pred_logits: (B, L, C) logits.
# target: (B, L) indices.
# mask: (B, L) int or bool.
pred_logits = einops.rearrange(pred_logits, "b l c -> (b l) c")
targets = einops.rearrange(target, "b l -> (b l)")
if not mask is None:
mask = einops.rearrange(mask, "b l -> (b l)").to(torch.bool)
pred_logits = pred_logits[mask, :]
targets = targets[mask]
pred = pred_logits.argmax(-1)
assert pred.shape == targets.shape
return (pred == targets).sum() / len(pred)
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/datasets/__precomputed.py | src/cheap/datasets/__precomputed.py | from pathlib import Path
import typing as T
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import h5py
from ..utils import StructureFeaturizer
from ..typed import PathLike
ACCEPTED_LM_EMBEDDER_TYPES = [
"esmfold", # 1024 -- i.e. t36_3B with projection layers, used for final model
"esmfold_pre_mlp", # 2560
"esm2_t48_15B_UR50D", # 5120
"esm2_t36_3B_UR50D", # 2560
"esm2_t33_650M_UR50D", # 1280
"esm2_t30_150M_UR50D",
"esm2_t12_35M_UR50D", # 480
"esm2_t6_8M_UR50D", # 320
]
class H5Dataset(Dataset):
"""Loads presaved embeddings as a H5 dataset"""
def __init__(
self,
shard_dir: PathLike,
split: T.Optional[str] = None,
embedder: str = "esmfold",
max_seq_len: int = 64,
dtype: str = "fp32",
filtered_ids_list: T.Optional[T.List[str]] = None,
max_num_samples: T.Optional[int] = None,
):
super().__init__()
self.filtered_ids_list = filtered_ids_list
self.dtype = dtype
self.max_seq_len = max_seq_len
self.shard_dir = Path(shard_dir)
self.embedder = embedder
self.max_num_samples = max_num_samples
self.data = self.load_partition(
split, embedder, max_num_samples, filtered_ids_list
)
pdb_ids = list(self.data.keys())
self.pdb_ids = list(pdb_ids)
def drop_protein(self, pid):
drop = False
return drop
def load_partition(
self,
split: T.Optional[str] = None,
embedder: T.Optional[str] = None,
max_num_samples: T.Optional[int] = None,
filtered_ids_list: T.Optional[T.List[str]] = None,
):
"""
2024/02/15: path format:
${shard_dir}/${split}/${embedder}/${seqlen}/${precision}/shard0000.h5
"""
# make sure that the specifications are valid
datadir = self.shard_dir
if not split is None:
assert split in ("train", "val")
datadir = datadir / split
if not embedder is None:
assert embedder in ACCEPTED_LM_EMBEDDER_TYPES
datadir = datadir / embedder
datadir = datadir / f"seqlen_{self.max_seq_len}" / self.dtype
outdict = {}
# load the shard hdf5 file
with h5py.File(datadir / "shard0000.h5", "r") as f:
emb = torch.from_numpy(np.array(f["embeddings"]))
sequence = list(f["sequences"])
pdb_ids = list(f["pdb_id"])
# if prespecified a set of pdb ids, only load those
if not filtered_ids_list is None:
pdb_ids = set(pdb_ids).intersection(set(filtered_ids_list))
disjoint = set(filtered_ids_list) - set(pdb_ids)
print(
f"Did not find {len(disjoint)} IDs, including {list(disjoint)[:3]}, etc."
)
pdb_ids = list(pdb_ids)
# possible trim to a subset to enable faster loading
if not max_num_samples is None:
pdb_ids = pdb_ids[:max_num_samples]
# loop through and decode the protein string one by one
for i in range(len(pdb_ids)):
pid = pdb_ids[i].decode()
if not self.drop_protein(pid):
outdict[pid] = (emb[i, ...], sequence[i].decode())
return outdict
def __len__(self):
return len(self.pdb_ids)
def get(self, idx: int) -> T.Tuple[str, T.Tuple[torch.Tensor, torch.Tensor]]:
assert isinstance(self.pdb_ids, list)
pid = self.pdb_ids[idx]
return pid, self.data[pid]
def __getitem__(
self, idx: int
) -> T.Tuple[str, T.Tuple[torch.Tensor, torch.Tensor]]:
# wrapper for non-structure dataloaders, rearrange output tuple
pdb_id, (emb, seq) = self.get(idx)
return emb, seq, pdb_id
class StructureH5Dataset(H5Dataset):
"""Return ground-truth structure features as well, for structure-based losses."""
def __init__(
self,
shard_dir: PathLike,
pdb_path_dir: PathLike,
split: T.Optional[str] = None,
embedder: str = "esmfold",
max_seq_len: int = 128,
dtype: str = "fp32",
path_to_filtered_ids_list: T.Optional[T.List[str]] = None,
max_num_samples: T.Optional[int] = None,
):
if not path_to_filtered_ids_list is None:
with open(path_to_filtered_ids_list, "r") as f:
filtered_ids_list = f.read().splitlines()
else:
filtered_ids_list = None
super().__init__(
split=split,
shard_dir=shard_dir,
embedder=embedder,
max_seq_len=max_seq_len,
dtype=dtype,
filtered_ids_list=filtered_ids_list,
max_num_samples=max_num_samples,
)
self.structure_featurizer = StructureFeaturizer()
self.pdb_path_dir = Path(pdb_path_dir)
self.max_seq_len = max_seq_len
def __getitem__(self, idx: int):
pdb_id, (emb, seq) = self.get(idx)
pdb_path = self.pdb_path_dir / pdb_id
with open(pdb_path, "r") as f:
pdb_str = f.read()
# try:
# structure_features = self.structure_featurizer(pdb_str, self.max_seq_len)
# return emb, seq, structure_features
# except KeyError as e:
# with open("bad_ids.txt", "a") as f:
# print(pdb_id, e)
# f.write(f"{pdb_id}\n")
# pass
structure_features = self.structure_featurizer(pdb_str, self.max_seq_len)
return emb, seq, structure_features
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/datasets/_datamodules.py | src/cheap/datasets/_datamodules.py | import typing as T
import torch
from torch.utils.data import DataLoader
from ..typed import PathLike
from .__precomputed import H5Dataset, StructureH5Dataset
from .__fasta import FastaDataset
class H5DataModule:
def __init__(
self,
shard_dir: PathLike,
embedder: str = "esmfold",
seq_len: int = 128,
batch_size: int = 32,
num_workers: int = 0,
dtype: str = "fp32",
shuffle_val_dataset: bool = False,
):
super().__init__()
self.shard_dir = shard_dir
self.embedder = embedder
self.dtype = dtype
self.seq_len = seq_len
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle_val_dataset = shuffle_val_dataset
self.dataset_fn = H5Dataset
self.setup()
def setup(self, stage: str = "fit"):
kwargs = {}
kwargs["embedder"] = self.embedder
if stage == "fit":
self.train_dataset = self.dataset_fn(
shard_dir=self.shard_dir,
split="train",
max_seq_len=self.seq_len,
dtype=self.dtype,
**kwargs,
)
self.val_dataset = self.dataset_fn(
shard_dir=self.shard_dir,
split="val",
max_seq_len=self.seq_len,
dtype=self.dtype,
**kwargs,
)
elif stage == "predict":
self.test_dataset = self.dataset_fn(
split=self.shard_dir,
shard_dir="val",
max_seq_len=self.seq_len,
dtype=self.dtype,
**kwargs,
)
else:
raise ValueError(f"stage must be one of ['fit', 'predict'], got {stage}")
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
shuffle=self.shuffle_val_dataset,
)
def test_dataloader(self):
return self.val_dataloader()
def predict_dataloader(self):
return self.val_dataloader()
class StructureH5DataModule:
"""Loads ground-truth structure as well as embedding for structure-based losses."""
def __init__(
self,
shard_dir: PathLike,
pdb_path_dir: PathLike,
embedder: str = "esmfold",
seq_len: int = 128,
batch_size: int = 32,
num_workers: int = 0,
path_to_filtered_ids_list: T.Optional[T.List[str]] = None,
max_num_samples: T.Optional[int] = None,
shuffle_val_dataset: bool = False,
):
super().__init__()
self.shard_dir = shard_dir
self.pdb_path_dir = pdb_path_dir
self.embedder = embedder
self.seq_len = seq_len
self.batch_size = batch_size
self.num_workers = num_workers
self.dtype = "fp32"
self.path_to_filtered_ids_list = path_to_filtered_ids_list
self.max_num_samples = max_num_samples
self.shuffle_val_dataset = shuffle_val_dataset
def setup(self, stage: str = "fit"):
if stage == "fit":
self.train_dataset = StructureH5Dataset(
split="train",
shard_dir=self.shard_dir,
pdb_path_dir=self.pdb_path_dir,
embedder=self.embedder,
max_seq_len=self.seq_len,
dtype=self.dtype,
path_to_filtered_ids_list=self.path_to_filtered_ids_list,
max_num_samples=self.max_num_samples,
)
self.val_dataset = StructureH5Dataset(
"val",
shard_dir=self.shard_dir,
pdb_path_dir=self.pdb_path_dir,
embedder=self.embedder,
max_seq_len=self.seq_len,
dtype=self.dtype,
path_to_filtered_ids_list=self.path_to_filtered_ids_list,
max_num_samples=self.max_num_samples,
)
elif stage == "predict":
self.test_dataset = StructureH5Dataset(
"val",
shard_dir=self.shard_dir,
pdb_path_dir=self.pdb_path_dir,
embedder=self.embedder,
max_seq_len=self.seq_len,
dtype=self.dtype,
path_to_filtered_ids_list=self.path_to_filtered_ids_list,
max_num_samples=self.max_num_samples,
)
else:
raise ValueError(f"stage must be one of ['fit', 'predict'], got {stage}")
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=self.shuffle_val_dataset,
)
def test_dataloader(self):
return self.val_dataloader()
def predict_dataloader(self):
return self.val_dataloader()
class FastaDataModule:
def __init__(
self,
fasta_file: PathLike,
batch_size: int,
train_frac: float = 0.8,
num_workers: int = 0,
shuffle_val_dataset: bool = False,
seq_len: int = 512,
):
self.fasta_file = fasta_file
self.train_frac, self.val_frac = train_frac, 1 - train_frac
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle_val_dataset = shuffle_val_dataset
self.seq_len = seq_len
def setup(self):
ds = FastaDataset(self.fasta_file, cache_indices=True)
seed = torch.Generator().manual_seed(42)
self.train_dataset, self.val_dataset = torch.utils.data.random_split(
ds, [self.train_frac, self.val_frac], generator=seed
)
def train_dataloader(self, sampler=None):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
shuffle=(sampler is None),
sampler=sampler,
)
def val_dataloader(self, sampler=None):
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
shuffle=self.shuffle_val_dataset,
sampler=sampler,
)
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/datasets/__fasta.py | src/cheap/datasets/__fasta.py | from typing import (
Any,
TypeVar,
Callable,
Dict,
Union,
)
import threading
from pathlib import Path
from operator import methodcaller
import subprocess
import torch
from torch.utils.data import DataLoader
import numpy as np
T = TypeVar("T")
PathLike = Union[str, Path]
"""
Adapted from
https://github.com/rmrao/evo/blob/main/evo/dataset.py
"""
class ThreadsafeFile:
def __init__(
self,
filepath: PathLike,
open_func: Callable[[PathLike], T],
close_func: Callable[[T], None] = methodcaller("close"),
):
self._threadlocal = threading.local()
self._filepath = filepath
self._open_func = open_func
self._close_func = close_func
def __getattr__(self, name: str):
return getattr(self.file, name)
@property
def file(self) -> T:
if not hasattr(self._threadlocal, "file"):
self._threadlocal.file = self._open_func(self._filepath)
return self._threadlocal.file
def __getstate__(self) -> Dict[str, Any]:
return {k: v for k, v in self.__dict__.items() if k != "_threadlocal"}
def __setstate__(self, state: Dict[str, Any]) -> None:
self.__dict__ = state
self._threadlocal = threading.local()
def __del__(self):
if hasattr(self._threadlocal, "file"):
self._close_func(self._threadlocal.file)
del self._threadlocal.file
class SizedDataset(torch.utils.data.Dataset):
def __init__(self, sizes: np.ndarray, *args, **kwargs):
super().__init__(*args, **kwargs) # type: ignore
self._sizes = sizes
def __len__(self):
return len(self.sizes)
@property
def sizes(self):
return self._sizes
class FastaDataset(SizedDataset):
"""
For loading protein sequence datasets in the common FASTA data format
Modified from github.com/pytorch/fairseq.
"""
def __init__(self, data_file: PathLike, cache_indices: bool = False):
self.data_file = Path(data_file)
if not self.data_file.exists():
raise FileNotFoundError(
f"{self.data_file}\n"
"If using hydra, make sure you are using abolute instead of relative paths."
)
self.file = ThreadsafeFile(data_file, open)
self.cache = Path(f"{data_file}.idx.npy")
if cache_indices:
if self.cache.exists():
self.offsets, sizes = np.load(self.cache)
else:
self.offsets, sizes = self._build_index()
np.save(self.cache, np.stack([self.offsets, sizes]))
else:
self.offsets, sizes = self._build_index()
super().__init__(sizes)
def __getitem__(self, idx):
return self.get(idx)
def get(self, idx: int):
self.file.seek(self.offsets[idx])
if idx == len(self) - 1:
data = self.file.read()
else:
data = self.file.read(self.offsets[idx + 1] - self.offsets[idx])
desc, *seq = data.split("\n")
return desc[1:], "".join(seq)
def __len__(self):
return self.offsets.size
def _build_index(self):
# Use grep and awk to get 100M/s on local SSD.
# Should process your enormous 100G fasta in ~10 min single core...
bytes_offsets = subprocess.check_output(
f"cat {self.data_file} | tqdm --bytes --total $(wc -c < {self.data_file})"
"| grep --byte-offset '^>' -o | cut -d: -f1",
shell=True,
)
fasta_lengths = subprocess.check_output(
f"cat {self.data_file} | tqdm --bytes --total $(wc -c < {self.data_file})"
'| awk \'/^>/ {print "";next;} { printf("%s",$0);}\' | tail -n+2 | awk '
"'{print length($1)}'",
shell=True,
)
bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=" ")
sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=" ")
return bytes_np, sizes_np
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/datasets/__init__.py | src/cheap/datasets/__init__.py | from ._datamodules import H5DataModule, StructureH5DataModule, FastaDataModule
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/utils/_latent_scaler.py | src/cheap/utils/_latent_scaler.py | from pathlib import Path
import os
import typing as T
import numpy as np
import torch
from torch.hub import download_url_to_file
from ._nn_utils import npy
from ..constants import TENSOR_STATS_DIR, HF_HUB_PREFIX
from ..typed import PathLike
ArrayLike = T.Union[np.ndarray, T.List[float], torch.Tensor]
GLOBAL_SEQEMB_STATS = {
"uniref": {
"max": 3038.4783,
"min": -920.1115,
"mean": 1.2394488,
"std": 70.907074,
},
"cath": {
"max": 2853.481,
"min": -878.217,
"mean": 1.289,
"std": 71.788,
},
}
def _ensure_exists(path):
path = Path(path)
if not path.exists():
path.mkdir(parents=True)
def _download_normalization_tensors(channel_stat: str, cache_dir: PathLike, dataset: str = "cath", lm_embedder_type="esmfold"):
target_cache_dir = Path(cache_dir) / f"{dataset}/{lm_embedder_type}/subset_5000_nov28"
_ensure_exists(target_cache_dir)
url = f"{HF_HUB_PREFIX}/statistics/cath/esmfold/subset_5000_nov28/channel_{channel_stat}.pkl.npy"
download_url_to_file(url, target_cache_dir / f"channel_{channel_stat}.pkl.npy")
def _get_npy_path(cache_dir, dataset="cath", lm_embedder_type="esmfold"):
"""Constructs path to the npy file, downloading it if it doesn't exist."""
assert dataset in ["uniref", "cath"]
paths = {
"max": cache_dir
/ dataset
/ lm_embedder_type
/ "subset_5000_nov28"
/ "channel_max.pkl.npy",
"min": cache_dir
/ dataset
/ lm_embedder_type
/ "subset_5000_nov28"
/ "channel_min.pkl.npy",
"mean": cache_dir
/ dataset
/ lm_embedder_type
/ "subset_5000_nov28"
/ "channel_mean.pkl.npy",
"std": cache_dir
/ dataset
/ lm_embedder_type
/ "subset_5000_nov28"
/ "channel_std.pkl.npy",
}
for channel_stat, path in paths.items():
if not path.exists():
_download_normalization_tensors(channel_stat, cache_dir, dataset, lm_embedder_type)
return paths
def _array_conversion(
x: T.Union[float, ArrayLike],
minv: T.Union[float, ArrayLike],
maxv: T.Union[float, ArrayLike],
) -> ArrayLike:
assert type(minv) == type(maxv)
if isinstance(minv, float):
return x, minv, maxv
elif isinstance(x, np.ndarray):
minv = npy(minv)
maxv = npy(maxv)
return x, minv, maxv
elif isinstance(x, torch.Tensor):
if isinstance(minv, torch.Tensor):
minv = minv.to(x.device)
maxv = maxv.to(x.device)
return x, minv, maxv
elif isinstance(minv, np.ndarray):
# Usually this is the case during training
minv = torch.from_numpy(minv).to(x.device)
maxv = torch.from_numpy(maxv).to(x.device)
return x, minv, maxv
else:
raise TypeError("Invalid input type.")
def _minmax_scaling(
x: ArrayLike,
data_minv: T.Union[float, ArrayLike],
data_maxv: T.Union[float, ArrayLike],
scaled_minv: float = -1.0,
scaled_maxv: float = 1.0,
) -> ArrayLike:
"""
Scales all values to between a max and min value, either globally or channel-wise.
If global, data_minv and data_maxv should be floats. Otherwise,
they should be ArrayLike denoting the max and min for each channel with shape (1024,)
The default scaling range is between -1 and 1, following DDPM.
Follows the sklearn API:
https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html
"""
x, data_minv, data_maxv = _array_conversion(x, data_minv, data_maxv)
X_std = (x - data_minv) / (data_maxv - data_minv)
X_scaled = X_std * (scaled_maxv - scaled_minv) + scaled_minv
return X_scaled
def _undo_minmax_scaling(
x_scaled: ArrayLike,
data_minv: T.Union[float, ArrayLike],
data_maxv: T.Union[float, ArrayLike],
scaled_minv: float = -1.0,
scaled_maxv: float = 1.0,
):
x_scaled, data_minv, data_maxv = _array_conversion(x_scaled, data_minv, data_maxv)
x_std = (x_scaled - scaled_minv) / (scaled_maxv - scaled_minv)
x = x_std * (data_maxv - data_minv) + data_minv
return x
def _standardize(
x: ArrayLike, meanv: T.Union[float, ArrayLike], stdv: T.Union[float, ArrayLike]
) -> ArrayLike:
"""
Standardize to center on zero mean with unit std, either globally or channel-wise.
If global, data_minv and data_maxv should be floats. Otherwise,
they should be ArrayLike denoting the max and min for each channel with shape (1024,)
"""
x, meanv, stdv = _array_conversion(x, meanv, stdv)
return (x - meanv) / stdv
def _undo_standardize(
x_scaled: ArrayLike,
meanv: T.Union[float, ArrayLike],
stdv: T.Union[float, ArrayLike],
):
x_scaled, meanv, stdv = _array_conversion(x_scaled, meanv, stdv)
return x_scaled * stdv + meanv
def _scaled_l2_norm(x, scale_factor=1.0):
"""
Scale to L2 unit norm along channel for each sample, following the
treatment of CLIP embeddings in DALLE-2.
Optionally scale up by sqrt(embed_dim), following suggestion from
https://github.com/lucidrains/DALLE2-pytorch/issues/60
"""
x = x / x.norm(dim=-1, p="fro", keepdim=True)
x *= scale_factor
return x
def _check_valid_mode(mode: str):
return mode in [
"global_minmaxnorm",
"global_standardize",
"channel_minmaxnorm",
"channel_standardize",
"identity",
]
def _check_valid_origin_dataset(origin_dataset: str):
return origin_dataset in ["uniref", "cath"]
def _clamp(tensor: ArrayLike, min_values: ArrayLike, max_values: ArrayLike):
"""
Clamp values to min/max values defined by an array.
"""
tensor, min_values, max_values = _array_conversion(tensor, min_values, max_values)
return torch.where(
tensor < min_values,
min_values,
torch.where(tensor > max_values, max_values, tensor),
)
def load_channelwise_stats(cache_dir, origin_dataset, lm_embedder_type="esmfold"):
npy_paths = _get_npy_path(cache_dir, origin_dataset, lm_embedder_type)
return {
"max": np.load(npy_paths["max"]),
"min": np.load(npy_paths["min"]),
"mean": np.load(npy_paths["mean"]),
"std": np.load(npy_paths["std"]),
}
class LatentScaler:
def __init__(
self,
mode: T.Optional[str] = "channel_minmaxnorm",
origin_dataset: str = "cath",
lm_embedder_type: str = "esmfold",
):
self.mode = mode
assert _check_valid_mode(mode), f"Invalid mode {mode}."
if (mode is None) or (mode == "identity"):
pass
else:
assert _check_valid_origin_dataset(origin_dataset)
self.mode = mode
self.origin_dataset = origin_dataset
self.lm_embedder_type = lm_embedder_type
if "channel_" in mode:
stat_dict = load_channelwise_stats(
TENSOR_STATS_DIR, origin_dataset, lm_embedder_type
)
else:
stat_dict = GLOBAL_SEQEMB_STATS[origin_dataset]
self.maxv, self.minv, self.meanv, self.stdv = (
stat_dict["max"],
stat_dict["min"],
stat_dict["mean"],
stat_dict["std"],
)
def scale(self, x: ArrayLike):
if (self.mode is None) or (self.mode == "identity"):
return x
else:
with torch.no_grad():
if self.mode == "global_minmaxnorm":
x_scaled = _minmax_scaling(x, self.minv, self.maxv)
elif self.mode == "global_standardize":
x_scaled = _standardize(x, self.meanv, self.stdv)
elif self.mode == "channel_minmaxnorm":
x_scaled = _minmax_scaling(x, self.minv, self.maxv)
elif self.mode == "channel_standardize":
x_scaled = _standardize(x, self.meanv, self.stdv)
else:
raise NotImplementedError
return x_scaled
def unscale(self, x_scaled: ArrayLike):
if (self.mode is None) or (self.mode == "identity"):
return x_scaled
else:
with torch.no_grad():
if self.mode == "global_minmaxnorm":
x_scaled = _undo_minmax_scaling(x_scaled, self.minv, self.maxv)
elif self.mode == "global_standardize":
x_scaled = _undo_standardize(x_scaled, self.meanv, self.stdv)
elif self.mode == "channel_minmaxnorm":
x_scaled = _undo_minmax_scaling(x_scaled, self.minv, self.maxv)
elif self.mode == "channel_standardize":
x_scaled = _undo_standardize(x_scaled, self.meanv, self.stdv)
else:
raise NotImplementedError
return x_scaled
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/utils/_structure_featurizer.py | src/cheap/utils/_structure_featurizer.py | import typing as T
from pathlib import Path
import numpy as np
import torch
from ..openfold_utils import (
make_pdb_features,
make_all_atom_aatype,
make_seq_mask,
make_atom14_masks,
make_atom14_positions,
atom37_to_frames,
get_backbone_frames,
OFProtein,
protein_from_pdb_string,
)
from ._transforms import trim_or_pad_length_first
PathLike = T.Union[Path, str]
FEATURES_REQUIRING_PADDING = [
"aatype",
"between_segment_residues",
"residue_index",
"seq_length",
"all_atom_positions",
"all_atom_mask",
# 'resolution',
# 'is_distillation',
"all_atom_aatype",
"seq_mask",
"atom14_atom_exists",
"residx_atom14_to_atom37",
"residx_atom37_to_atom14",
"atom37_atom_exists",
"atom14_gt_exists",
"atom14_gt_positions",
"atom14_alt_gt_positions",
"atom14_alt_gt_exists",
"atom14_atom_is_ambiguous",
"rigidgroups_gt_frames",
"rigidgroups_gt_exists",
"rigidgroups_group_exists",
"rigidgroups_group_is_ambiguous",
"rigidgroups_alt_gt_frames",
"backbone_rigid_tensor",
"backbone_rigid_mask",
]
class StructureFeaturizer:
def _openfold_features_from_pdb(
self, pdb_str: str, pdb_id: T.Optional[str] = None
) -> OFProtein:
"""Create rigid groups from a PDB file on disk.
The inputs to the Frame-Aligned Point Error (FAPE) loss used in AlphaFold2 are
tuples of translations and rotations from the reference frame. In the OpenFold
implementation, this is stored as `Rigid` objects. This function calls the
OpenFold wrapper functions which creates an `OFProtein` object,
and then extracts several `Rigid` objects.
Args:
pdb_str (str): String representing the contents of a PDB file
Returns:
OFProtein: _description_
"""
pdb_id = "" if pdb_id is None else pdb_id
protein_object = protein_from_pdb_string(pdb_str)
# TODO: what is the `is_distillation` argument?
protein_features = make_pdb_features(
protein_object, description=pdb_id, is_distillation=False
)
return protein_features
def _process_structure_features(
self, features: T.Dict[str, np.ndarray], seq_len: int
):
"""Process feature dtypes and pad to max length."""
for k, v in features.items():
# Handle data types in converting from numpy to torch
if v.dtype == np.dtype("int32"):
features[k] = torch.from_numpy(v).long() # int32 -> int64
elif v.dtype == np.dtype("O"):
features[k] = v.astype(str)[0]
else:
# the rest are all float32. TODO: does this be float64?
features[k] = torch.from_numpy(v)
# Trim or pad to a fixed length for all per-specific features
if k in FEATURES_REQUIRING_PADDING:
features[k] = trim_or_pad_length_first(features[k], seq_len)
# 'seq_length' is a tensor with shape equal to the aatype array length,
# and filled with the value of the original sequence length.
if k == "seq_length":
features[k] = torch.full((seq_len,), features[k][0])
# Make the mask
idxs = torch.arange(seq_len, dtype=torch.long)
mask = idxs < features["seq_length"]
features["mask"] = mask.long()
features["aatype"] = features["aatype"].argmax(dim=-1)
return features
def __call__(self, pdb_str: str, seq_len: int, pdb_id: T.Optional[str] = None):
features = self._openfold_features_from_pdb(pdb_str, pdb_id)
features = self._process_structure_features(features, seq_len)
features = make_all_atom_aatype(features)
features = make_seq_mask(features)
features = make_atom14_masks(features)
features = make_atom14_positions(features)
features = atom37_to_frames(features)
features = get_backbone_frames(features)
# f = make_pseudo_beta("")
# p = f(p)
# f = atom37_to_torsion_angles("")
# p = f(p)
# p = get_chi_angles(p)
return features
def view_py3Dmol(pdbstr):
import py3Dmol
view = py3Dmol.view(width=400, height=300)
view.addModelsAsFrames(pdbstr)
view.setStyle({"model": -1}, {"cartoon": {"color": "green"}})
view.zoomTo()
view.show()
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/utils/_nn_utils.py | src/cheap/utils/_nn_utils.py | from typing import Union
import torch
import numpy as np
ArrayLike = Union[np.ndarray, torch.Tensor]
def npy(x: ArrayLike):
if isinstance(x, torch.Tensor):
return x.detach().cpu().numpy()
else:
return np.array(x)
def to_tensor(x, device=None, dtype=None):
if isinstance(x, torch.Tensor):
pass
elif isinstance(x, np.ndarray):
x = torch.from_numpy(x)
else:
x = torch.tensor(x)
if device is not None:
x = x.to(device)
if dtype is not None:
x = x.type(dtype)
return x
def count_parameters(model, require_grad_only=True):
if require_grad_only:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
else:
return sum(p.numel() for p in model.parameters())
def get_model_device(model: torch.nn.Module) -> torch.device:
return next(model.parameters()).device
def outputs_to_avg_metric(outputs):
avg_metrics = {}
metrics_to_log = [
"plddt",
"ptm",
"aligned_confidence_probs",
"predicted_aligned_error",
]
for metric in metrics_to_log:
value = npy(outputs[metric])
if value.ndim == 1:
median = value
elif value.ndim == 2:
median = np.median(value, axis=1)
else:
assert value.ndim > 2
median = np.median(value, axis=tuple(range(1, value.ndim)))
avg_metrics[metric] = median
return avg_metrics
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/utils/__init__.py | src/cheap/utils/__init__.py | from ._latent_scaler import LatentScaler
from ._scheduler import get_lr_scheduler
from ._nn_utils import (
npy,
to_tensor,
count_parameters,
get_model_device,
outputs_to_avg_metric,
)
from ._transforms import (
trim_or_pad_batch_first,
trim_or_pad_length_first,
get_random_sequence_crop,
get_random_sequence_crop_batch,
)
from ._structure_featurizer import StructureFeaturizer, view_py3Dmol
from ._analysis import calc_sequence_recovery
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/utils/_scheduler.py | src/cheap/utils/_scheduler.py | from transformers import (
get_scheduler,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
Adafactor,
)
import torch
def get_lr_scheduler(
optimizer: torch.optim.Optimizer,
sched_type: str = "constant",
num_warmup_steps: int = 0,
num_training_steps: int = 10_000_000,
num_cycles: int = 1,
):
# Set this to something where the scaling factor is actually meaningful
if sched_type == "cosine_with_restarts":
return get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_cycles=num_cycles,
)
elif sched_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_cycles=num_cycles,
)
else:
return get_scheduler(
name=sched_type,
optimizer=optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
)
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/utils/_transforms.py | src/cheap/utils/_transforms.py | from typing import List, Tuple
import torch
import random
import einops
def mask_from_seq_lens(x: torch.Tensor, seqlen: torch.Tensor):
mask = torch.arange(x.shape[1], device=x.device)
mask = einops.repeat(mask[None, :], "1 L -> N L", N=x.shape[0]) < seqlen[:, None]
return mask.long()
def get_random_sequence_crop(s, length):
if len(s) > length:
start = random.randint(0, len(s) - length)
return s[start : start + length]
else:
return s
def get_random_sequence_crop_batch(sequence_batch, max_len, min_len=None):
if not min_len is None:
sequence_batch = list(filter(lambda s: len(s) >= min_len, sequence_batch))
return [get_random_sequence_crop(seq, max_len) for seq in sequence_batch]
def trim_or_pad_length_first(tensor: torch.Tensor, pad_to: int, pad_idx: int = 0):
"""Trim or pad a tensor with shape (L, ...) to a given length."""
L = tensor.shape[0]
if L >= pad_to:
# trim, assuming first dimension is the dim to trim
tensor = tensor[:pad_to]
elif L < pad_to:
padding = torch.full(
size=(pad_to - tensor.shape[0], *tensor.shape[1:]),
fill_value=pad_idx,
dtype=tensor.dtype,
device=tensor.device,
)
tensor = torch.concat((tensor, padding), dim=0)
return tensor
def trim_or_pad_batch_first(tensor: torch.Tensor, pad_to: int, pad_idx: int = 0):
"""Trim or pad a tensor with shape (B, L, ...) to a given length."""
N, L = tensor.shape[0], tensor.shape[1]
if L >= pad_to:
tensor = tensor[:, :pad_to, ...]
elif L < pad_to:
padding = torch.full(
size=(N, pad_to - L, *tensor.shape[2:]),
fill_value=pad_idx,
dtype=tensor.dtype,
device=tensor.device,
)
tensor = torch.concat((tensor, padding), dim=1)
return tensor
| python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
amyxlu/cheap-proteins | https://github.com/amyxlu/cheap-proteins/blob/cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e/src/cheap/utils/_analysis.py | src/cheap/utils/_analysis.py | import typing as T
import numpy as np
from ._nn_utils import npy
from ..typed import ArrayLike
def calc_sequence_recovery(
pred_seq: ArrayLike, orig_seq: ArrayLike, mask: T.Optional[ArrayLike] = None
):
if isinstance(pred_seq[0], str):
assert isinstance(orig_seq[0], str)
pred_seq = np.array([ord(x) for x in pred_seq])
orig_seq = np.array([ord(x) for x in orig_seq])
if not mask is None:
pred_seq, orig_seq = pred_seq[mask], orig_seq[mask]
assert len(pred_seq) == len(orig_seq)
return np.sum(npy(pred_seq) == npy(orig_seq)) / len(pred_seq) | python | MIT | cb8a4ac36e9a44c779d31aacfe5f80c5072d7e7e | 2026-01-05T07:14:48.094150Z | false |
taketwo/glasbey | https://github.com/taketwo/glasbey/blob/a0607959fe671f012599a7dd6031904340eaf99c/glasbey.py | glasbey.py | #!/usr/bin/env python
# encoding: utf-8
import os
import sys
import ast
import argparse
import numpy as np
from colorspacious import cspace_convert
from view_palette import palette_to_image
try:
from progressbar import Bar, ETA, Percentage, ProgressBar
except ImportError:
class Bar:
pass
class ETA:
pass
class Percentage:
pass
class ProgressBar:
def __init__(self, **kwargs):
pass
def start(self):
return self
def update(self, i):
pass
def finish(self):
pass
class Glasbey:
def __init__(self,
base_palette=None,
overwrite_base_palette: bool = False,
no_black: bool = False,
lightness_range=None,
chroma_range=None,
hue_range=None):
# Constants
self.MAX = 256
self.NUM_COLORS = self.MAX * self.MAX * self.MAX
self.LUT = os.path.dirname(os.path.realpath(__file__)) + "/rgb_cam02ucs_lut.npz"
self.overwrite_base_palette = overwrite_base_palette
# Check input
if type(base_palette) == str:
assert os.path.isfile(base_palette), "file does not exist: {}".format(base_palette)
elif type(base_palette) == list:
assert self.check_validity_rbg_palette(base_palette), "Base palette must be in this format: [(255,255,255), ...]"
assert not self.overwrite_base_palette, "base_palette is no file, cannot overwrite it!"
else:
assert not self.overwrite_base_palette, "no base_palette specified, cannot overwrite it!"
# Load colors
self.colors = self.load_or_generate_color_table()
# Initialize base palette
if type(base_palette) == str:
self.base_palette = base_palette
self.palette = self.load_palette(base_palette)
self.palette = [self.colors[i, :] for i in self.palette]
elif type(base_palette) == list and len(base_palette) > 0:
self.palette = [(rgb[0] * 256 + rgb[1]) * 256 + rgb[2] for rgb in base_palette]
self.palette = [self.colors[i, :] for i in self.palette]
else:
self.palette = [self.colors[-1, :]] # white
assert self.check_validity_internal_palette(), "Internal error during __init__: self.palette is poorly formatted."
# Update self.colors
# Exclude greys (values with low Chroma in JCh) and set lightness range,
if lightness_range is not None:
jch = cspace_convert(self.colors, "CAM02-UCS", "JCh")
self.colors = self.colors[
(jch[:, 0] >= lightness_range[0]) & (jch[:, 0] <= lightness_range[1]), :
]
if chroma_range is not None:
jch = cspace_convert(self.colors, "CAM02-UCS", "JCh")
self.colors = self.colors[
(jch[:, 1] >= chroma_range[0]) & (jch[:, 1] <= chroma_range[1]), :
]
if hue_range is not None:
jch = cspace_convert(self.colors, "CAM02-UCS", "JCh")
if hue_range[0] > hue_range[1]:
self.colors = self.colors[
(jch[:, 2] >= hue_range[0]) | (jch[:, 2] <= hue_range[1]), :
]
else:
self.colors = self.colors[
(jch[:, 2] >= hue_range[0]) & (jch[:, 2] <= hue_range[1]), :
]
# Exclude colors that are close to black
if no_black:
MIN_DISTANCE_TO_BLACK = 35
d = np.linalg.norm((self.colors - self.colors[0, :]), axis=1)
self.colors = self.colors[d > MIN_DISTANCE_TO_BLACK, :]
def generate_palette(self, size):
"""
Return palette in sRGB1 format.
If the palette isn't long enough, new entries are generated.
"""
if size <= len(self.palette):
return cspace_convert(self.palette[0:size], "CAM02-UCS", "sRGB1")
# Initialize distances array
num_colors = self.colors.shape[0]
distances = np.ones(shape=(num_colors, 1)) * 1000
# A function to recompute minimum distances from palette to all colors
def update_distances(colors, color):
d = np.linalg.norm((colors - color), axis=1)
np.minimum(distances, d.reshape(distances.shape), distances)
# Build progress bar
widgets = ["Generating palette: ", Percentage(), " ", Bar(), " ", ETA()]
pbar = ProgressBar(widgets=widgets, maxval=size).start()
# Update distances for the colors that are already in the palette
for i in range(len(self.palette) - 1):
update_distances(self.colors, self.palette[i])
pbar.update(i)
# Iteratively build palette
while len(self.palette) < size:
update_distances(self.colors, self.palette[-1])
self.palette.append(self.colors[np.argmax(distances), :])
pbar.update(len(self.palette))
pbar.finish()
assert self.check_validity_internal_palette(), "Internal error during extend_palette: self.palette is poorly formatted."
if self.overwrite_base_palette:
self.save_palette(palette=self.palette, path=self.base_palette, format="byte", overwrite=True)
return cspace_convert(self.palette[0:size], "CAM02-UCS", "sRGB1")
def load_or_generate_color_table(self):
# Load or generate RGB to CAM02-UCS color lookup table
try:
colors = np.load(self.LUT)["lut"]
# Sanity check
assert colors.shape == (self.NUM_COLORS, 3)
except:
colors = self.generate_color_table()
np.savez_compressed(self.LUT, lut=colors)
return colors
def generate_color_table(self):
"""
Generate a lookup table with all possible RGB colors, encoded in
perceptually uniform CAM02-UCS color space.
Table rows correspond to individual RGB colors, columns correspond to J',
a', and b' components. The table is stored as a NumPy array.
"""
widgets = ["Generating color table: ", Percentage(), " ", Bar(), " ", ETA()]
pbar = ProgressBar(widgets=widgets, maxval=(self.MAX * self.MAX)).start()
i = 0
colors = np.empty(shape=(self.NUM_COLORS, 3), dtype=float)
for r in range(self.MAX):
for g in range(self.MAX):
d = i * self.MAX
for b in range(self.MAX):
colors[d + b, :] = (r, g, b)
colors[d:d + self.MAX] = cspace_convert(
colors[d:d + self.MAX], "sRGB255", "CAM02-UCS"
)
pbar.update(i)
i += 1
pbar.finish()
return colors
@staticmethod
def load_palette(path):
"""
Expected format: sRGB255
"""
assert os.path.isfile(path)
palette = list()
with open(path, 'r') as file:
for line in file:
rgb = [int(c) for c in line.strip().split(",")]
palette.append((rgb[0] * 256 + rgb[1]) * 256 + rgb[2])
return palette
@staticmethod
def save_palette(palette, path: str, format: str = "byte", overwrite: bool = False):
"""
Output format examples (white):
* byte: 255,255,255 (sRGB255)
* float: 1.000000,1.000000,1.000000
"""
if not overwrite:
assert not os.path.isfile(path)
with open(path, 'w') as file:
if format.lower() == "byte":
for color in palette:
rgb255 = tuple(int(round(k * 255)) for k in color)
file.write("{},{},{}\n".format(*rgb255))
elif format.lower() == "float":
for color in palette:
file.write("{:.6f},{:.6f},{:.6f}\n".format(*(abs(k) for k in color)))
elif format.lower() == "hex":
for color in palette:
rgb255 = tuple(int(round(k * 255)) for k in color)
file.write("#{:02x}{:02x}{:02x}\n".format(*rgb255))
else:
raise ValueError("Format doesn't match. Choose between 'byte', 'hex', and 'float'")
def check_validity_internal_palette(self):
if type(self.palette) != list:
return False
for color in self.palette:
if len(color) != 3 or type(color) != np.ndarray:
return False
return True
@staticmethod
def check_validity_rbg_palette(palette):
if type(palette) != list:
return False
for color in palette:
if len(color) != 3 or type(color) != tuple:
return False
if not 0 <= color[0] <= 255 and 0 <= color[1] <= 255 and 0 <= color[2] <= 255:
return False
return True
@staticmethod
def convert_palette_to_rgb(palette):
"""
Convert palette from sRGB1 to sRGB255.
"""
return [tuple(int(round(k * 255)) for k in color) for color in palette]
@staticmethod
def view_palette(palette):
"""
Show palette in imagemagick window.
Expected format: sRGB1 or sRGB255
"""
img = palette_to_image(palette)
img.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""
Generate a palette with maximally disticts colors using the sequential
method of Glasbey et al.¹
(Dis)similarity between colors is computed in the state-of-the-art
perceptually uniform color space CAM02-UCS.²
This script needs an RGB to CAM02-UCS color lookup table. Generation of
this table is a time-consuming process, therefore the first run of this
script will take some time. The generated table will be stored in the
working directory of the script and automatically used in next invocations
of the script. Note that the approximate size of the table is 363 Mb.
The palette generation method allows the user to supply a base palette. The
output palette will begin with the colors from the supplied set. If no base
palette is given, then white will be used as the first base color. The base
palette should be given as a text file where each line contains a color
description in RGB255 format with components separated with commas. (See
files in the 'palettes/' folder for an example.)
If having black (and colors close to black) is undesired, then `--no-black`
option may be used to prevent the algorithm from inserting such colors into
the palette. In addition to that, the range of colors considered for
inclusion in the palette can be limited by lightness, chroma, or hue.
¹) Glasbey, C., van der Heijden, G., Toh, V. F. K. and Gray, A. (2007),
Colour Displays for Categorical Images.
Color Research and Application, 304-309
²) Luo, M. R., Cui, G. and Li, C. (2006),
Uniform Colour Spaces Based on CIECAM02 Colour Appearance Model.
Color Research and Application, 320–330
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--base-palette", type=argparse.FileType("r"), help="file with base palette"
)
parser.add_argument(
"--no-black", action="store_true", help="avoid black and similar colors"
)
parser.add_argument(
"--lightness-range",
type=ast.literal_eval,
help="set min and max for lightness (e.g. 0,90)",
)
parser.add_argument(
"--chroma-range",
type=ast.literal_eval,
help="set min and max for chroma (e.g. 10,100)",
)
parser.add_argument("--hue-range", type=ast.literal_eval,
help="set start and end for hue (e.g. 315,45)")
parser.add_argument("--view", action="store_true",
help="view generated palette")
parser.add_argument("--format", default="byte", choices=["byte", "hex", "float"],
help="output format")
parser.add_argument("size", type=int,
help="number of colors in the palette")
parser.add_argument("output", type=argparse.FileType("w"),
help="output palette filename")
args = parser.parse_args()
if args.format not in ["byte", "hex", "float"]:
sys.exit('Invalid output format "{}"'.format(args.format))
gb = Glasbey(base_palette=getattr(args.base_palette, "name", None),
overwrite_base_palette=False,
no_black=args.no_black,
lightness_range=args.lightness_range,
chroma_range=args.chroma_range,
hue_range=args.hue_range)
new_palette = gb.generate_palette(size=args.size)
assert len(new_palette) == args.size
gb.save_palette(new_palette, args.output.name, args.format, overwrite=True)
if args.view:
gb.view_palette(new_palette)
| python | MIT | a0607959fe671f012599a7dd6031904340eaf99c | 2026-01-05T07:14:48.850040Z | false |
taketwo/glasbey | https://github.com/taketwo/glasbey/blob/a0607959fe671f012599a7dd6031904340eaf99c/view_palette.py | view_palette.py | #!/usr/bin/env python
# encoding: utf-8
import argparse
import numpy as np
def palette_to_image(palette):
from PIL import Image
WIDTH = 180
HEIGHT_SEGMENT = 20
img = Image.new("RGB", (WIDTH, HEIGHT_SEGMENT * len(palette)), "black")
pixels = img.load()
for i, color in enumerate(palette):
if isinstance(color, int):
b = (color >> 0) % 256
g = (color >> 8) % 256
r = (color >> 16) % 256
color = (r, g, b)
elif isinstance(color, np.ndarray):
color = tuple(int(round(k * 255)) for k in color)
for x in range(WIDTH):
for y in range(HEIGHT_SEGMENT):
pixels[x, y + i * HEIGHT_SEGMENT] = color
return img
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""
View a palette stored in a given file. The script requires PIL (Python
Imaging Library).
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("palette", type=argparse.FileType("r"), help="palette filename")
parser.add_argument("--save", type=str, help="save as a PNG file")
args = parser.parse_args()
palette = list()
for line in args.palette.readlines():
rgb = [int(c) for c in line.strip().split(",")]
palette.append((rgb[0], rgb[1], rgb[2]))
img = palette_to_image(palette)
if args.save:
img.save(args.save)
else:
img.show()
| python | MIT | a0607959fe671f012599a7dd6031904340eaf99c | 2026-01-05T07:14:48.850040Z | false |
taketwo/glasbey | https://github.com/taketwo/glasbey/blob/a0607959fe671f012599a7dd6031904340eaf99c/test/test_glasbey.py | test/test_glasbey.py | import os
from shutil import copyfile, move
from unittest import TestCase
from glasbey import Glasbey
import numpy
class TestGlasbey(TestCase):
def setUp(self) -> None:
file_path = os.path.dirname(os.path.realpath(__file__))
self.test_palette = file_path + "/../palettes/set1.txt"
self.test_palette_bkp = file_path + "/../palettes/set1.txt.bkp"
self.tearDown()
assert os.path.isfile(self.test_palette)
assert not os.path.isfile(self.test_palette_bkp)
def tearDown(self) -> None:
if os.path.isfile(self.test_palette_bkp):
# do this in case a test failed
move(self.test_palette_bkp, self.test_palette)
def test_bad_input(self):
with self.assertRaises(AssertionError):
gb = Glasbey(base_palette="!!bad_path!!")
with self.assertRaises(AssertionError):
gb = Glasbey(overwrite_base_palette=True)
def test_simple(self):
gb = Glasbey(base_palette=self.test_palette)
palette = gb.generate_palette(size=1)
self.assertTrue(palette.shape, numpy.array([[0, 0, 0]]).shape)
def test_save_output(self):
gb = Glasbey(base_palette=self.test_palette)
palette = gb.generate_palette(size=1)
gb.save_palette(palette=palette, path="/tmp/random_glasbey_test_file.remove_me", format="byte", overwrite=True)
self.assertEqual(['228,26,28\n'], open('/tmp/random_glasbey_test_file.remove_me', 'r').readlines())
def test_extend_base_palette(self):
self.assertEqual(9, len(open(self.test_palette, 'r').readlines())) # sanity check
gb = Glasbey(base_palette=self.test_palette)
palette = gb.generate_palette(size=10)
self.assertEqual(10, len(palette))
self.assertEqual(9, len(open(self.test_palette, 'r').readlines())) # ensure there was no override
def test_multiple_sequential_requests(self):
gb = Glasbey(base_palette=self.test_palette)
palette = gb.generate_palette(size=5)
self.assertEqual(5, len(palette))
palette = gb.generate_palette(size=15)
self.assertEqual(15, len(palette))
palette = gb.generate_palette(size=20)
self.assertEqual(20, len(palette))
palette = gb.generate_palette(size=18)
self.assertEqual(18, len(palette))
def test_overwrite_base_palette(self):
copyfile(self.test_palette, self.test_palette_bkp)
self.assertEqual(9, len(open(self.test_palette, 'r').readlines())) # sanity check
gb = Glasbey(base_palette=self.test_palette, overwrite_base_palette=True)
palette = gb.generate_palette(size=10)
self.assertEqual(10, len(open(self.test_palette, 'r').readlines()))
move(self.test_palette_bkp, self.test_palette)
def test_rgb_list_as_base_palette(self):
base_palette = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
gb = Glasbey(base_palette=base_palette)
palette = gb.generate_palette(size=3)
self.assertEqual(3, len(palette))
palette = gb.generate_palette(10)
self.assertEqual(10, len(palette))
def test_empty_rgb_list_as_base_palette(self):
base_palette = []
gb = Glasbey(base_palette=base_palette)
palette = gb.generate_palette(size=3)
self.assertEqual(3, len(palette))
def test_bad_rgb_list(self):
with self.assertRaises(AssertionError):
base_palette = [(256, 0, 0), (0, 0, 0), (0, 0, 0)]
gb = Glasbey(base_palette=base_palette)
with self.assertRaises(AssertionError):
base_palette = [(-1, 0, 0), (0, 0, 0), (0, 0, 0)]
gb = Glasbey(base_palette=base_palette)
with self.assertRaises(AssertionError):
base_palette = [(0, 0, 0), (0, 0, 0), (0, 0, 0)]
gb = Glasbey(base_palette=base_palette, overwrite_base_palette=True)
| python | MIT | a0607959fe671f012599a7dd6031904340eaf99c | 2026-01-05T07:14:48.850040Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/setup.py | setup.py | #!/usr/bin/env python
# This is a shim to allow Github to detect the package, build is done with poetry
import setuptools
if __name__ == "__main__":
setuptools.setup(name="nbtlib")
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/tests/test_tag.py | tests/test_tag.py | from io import BytesIO
import pytest
from nbtlib import (
End,
Int,
String,
List,
EndInstantiation,
OutOfRange,
IncompatibleItemType,
CastError,
)
from .inputs import (
bytes_for_valid_tags,
out_of_range_numeric_tags,
unsigned_values_for_integer_tags,
)
@pytest.mark.parametrize("byteorder, bytes_input, expected_tag", bytes_for_valid_tags)
def test_valid_bytes_parsing(byteorder, bytes_input, expected_tag):
tag_type = type(expected_tag)
parsed_tag = tag_type.parse(BytesIO(bytes_input), byteorder)
assert parsed_tag == expected_tag
@pytest.mark.parametrize("byteorder, expected_bytes, tag_input", bytes_for_valid_tags)
def test_valid_tag_serialization(byteorder, expected_bytes, tag_input):
buff = BytesIO()
tag_input.write(buff, byteorder)
buff.seek(0)
serialized_bytes = buff.read()
assert serialized_bytes == expected_bytes
def test_end_tag_instantiation():
with pytest.raises(EndInstantiation):
End()
@pytest.mark.parametrize("tag_type, value", out_of_range_numeric_tags)
def test_out_of_range_numeric_tags(tag_type, value):
with pytest.raises(OutOfRange):
tag_type(value)
@pytest.mark.parametrize("tag, unsigned_value", unsigned_values_for_integer_tags)
def test_unsigned_conversion(tag, unsigned_value):
assert tag.as_unsigned == unsigned_value
@pytest.mark.parametrize("tag, unsigned_value", unsigned_values_for_integer_tags)
def test_from_unsigned(tag, unsigned_value):
assert tag.from_unsigned(unsigned_value) == tag
class TestListTagEdgeCases:
def test_incompatible_with_subtype(self):
with pytest.raises(IncompatibleItemType):
List[String]([4, Int(-1)])
def test_incompatible_without_subtype(self):
with pytest.raises(IncompatibleItemType):
List([Int(2), String("5")])
def test_bare_elements_without_subtype(self):
with pytest.raises(ValueError):
List(["hello"])
def test_casting_error_with_subtype(self):
with pytest.raises(CastError):
List[List[Int]]([[5, 4], [[]]])
def test_casting_error_without_subtype(self):
with pytest.raises(CastError):
List([[5, 4], List([List([])])])
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/tests/test_minecraft.py | tests/test_minecraft.py | import pytest
from nbtlib.contrib.minecraft import StructureFile
def test_structure_file(tmp_path):
structure = StructureFile(
{
"DataVersion": 1139,
"author": "dinnerbone",
"size": [1, 2, 1],
"palette": [
{
"Name": "minecraft:dirt",
}
],
"blocks": [
{"pos": [0, 0, 0], "state": 0},
{"pos": [0, 1, 0], "state": 0},
],
"entities": [],
}
)
structure.save(tmp_path / "foo.nbt")
assert structure == StructureFile.load(tmp_path / "foo.nbt")
@pytest.mark.parametrize(
"filename",
[
"igloo/top.nbt",
"igloo/middle.nbt",
"igloo/bottom.nbt",
"pillager_outpost/watchtower.nbt",
"village/plains/houses/plains_temple_3.nbt",
"woodland_mansion/entrance.nbt",
],
)
def test_minecraft_structures(minecraft_data_pack, filename):
StructureFile.load(
minecraft_data_pack / "data" / "minecraft" / "structures" / filename
)
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/tests/test_path.py | tests/test_path.py | import pytest
from nbtlib import Path, load, parse_nbt
path_strings_to_keys = [
("", ()),
("hello", ("hello",)),
("hello.world", ("hello", "world")),
("with.trailing.dot.", ("with", "trailing", "dot")),
('using."quoted.keys"', ("using", "quoted.keys")),
('"escape \\"quotes\\""."in.quoted".key', ('escape "quotes"', "in.quoted", "key")),
("...with..redundant..dots", ("with", "redundant", "dots")),
(".2d", ("2d",)),
("85.2", ("85", "2")),
("85.f", ("85", "f")),
]
@pytest.mark.parametrize("path_string, keys", path_strings_to_keys)
def test_path_with_named_keys(path_string, keys):
assert tuple(p.key for p in Path(path_string)) == keys
@pytest.fixture(scope="module")
def bigtest():
return load("tests/nbt_files/bigtest.nbt")
@pytest.fixture
def biglist():
return parse_nbt(
"""[
[{a: [{value: 0}, {value: 1, thing: 42}], flag: 1, first: 99}],
[{spam: {egg: [{foo: 0}, {foo: 2}], checked: 1b}}, {spam: {egg: [{foo: 7}]}}],
[{a: [{value: 1}, {value: 2, thing: 42}]}, {a: [], flag: 1}],
[{a: [{value: 3, thing: 42}], flag: 1}],
[{spam: {egg: [{foo: 1}], checked: 1b}}],
[{spam: {egg: [{foo: 2}]}}, {spam: {egg: [{foo: 9}, {foo: 5}], checked: 1b}}]
]"""
)
# fmt: off
bigtest_path_to_items = [
("longTest", [9223372036854775807]),
('"nested compound test".egg.name', ["Eggbert"]),
('"nested compound test".ham.value', [0.75]),
('"byteArrayTest (the first 1000 values of (n*n*255+n*7)%100, starting with n=0 (0, 62, 34, 16, 8, ...))"[1]', [62]),
('"listTest (long)"', [[11, 12, 13, 14, 15]]),
('"listTest (long)"[3]', [14]),
('"listTest (long)"[]', [11, 12, 13, 14, 15]),
('{byteTest: 127b}."listTest (long)"[1]', [12]),
('{byteTest: 127}."listTest (long)"[1]', []),
('{random: "value"}."listTest (long)"[1]', []),
('{}."listTest (long)"[1]', [12]),
('{intTest: 2147483647}."listTest (long)"[1]', [12]),
('{"nested compound test": {egg: {value: 0.5f}}}."listTest (long)"[1]', [12]),
('"listTest (compound)"', [[{"created-on": 1264099775885, "name": "Compound tag #0"}, {"created-on": 1264099775885, "name": "Compound tag #1"}]]),
('"listTest (compound)"[]', [{"created-on": 1264099775885, "name": "Compound tag #0"}, {"created-on": 1264099775885, "name": "Compound tag #1"}]),
('"listTest (compound)"[1]', [{"created-on": 1264099775885, "name": "Compound tag #1"}]),
('"listTest (compound)"[-1]', [{"created-on": 1264099775885, "name": "Compound tag #1"}]),
('"listTest (compound)"[-2]', [{"created-on": 1264099775885, "name": "Compound tag #0"}]),
('"listTest (compound)"[-3]', []),
('"listTest (compound)"[{name: "Compound tag #0"}]', [{"created-on": 1264099775885, "name": "Compound tag #0"}]),
('"listTest (compound)"[{name: "Compound tag #3"}]', []),
('"listTest (compound)"[{random: "data"}].property', []),
('"listTest (compound)"[].name', ["Compound tag #0", "Compound tag #1"]),
('"listTest (compound)"[]."created-on"', [1264099775885, 1264099775885]),
("[]", []),
("{}[]", []),
("{}[0]", []),
]
# fmt: on
biglist_path_to_items = [
("[][].a[].value", [0, 1, 1, 2, 3]),
("[][{flag: 1}].a[].value", [0, 1, 3]),
("[][].a[{thing: 42}].value", [1, 2, 3]),
("[][{a: []}].flag", [1]),
("[][{a: [{}]}].flag", [1, 1]),
("[][{a: [{thing: 42}]}].a[].value", [0, 1, 1, 2, 3]),
("[][{a: [{thing: 0}]}].a[].value", []),
("[][{a: [{value: 1}]}].a[].value", [0, 1, 1, 2]),
("[][{a: [{value: 1}, {value: 0}]}].first", [99]),
]
@pytest.mark.parametrize("path, items", bigtest_path_to_items)
def test_path_get_bigtest(bigtest, path, items):
assert bigtest.get_all(Path(path)) == items
@pytest.mark.parametrize("path, items", biglist_path_to_items)
def test_path_get_biglist(biglist, path, items):
assert biglist.get_all(Path(path)) == items
# fmt: off
path_set_and_get = [
("[][].a[].value", "42", "[][].a[].value", [42, 42, 42, 42, 42]),
("[][{flag: 1}].a[].value", "42", "[][].a[].value", [42, 42, 1, 2, 42]),
("[][].a[{thing: 42}].value", "42", "[][].a[].value", [0, 42, 1, 42, 42]),
("[][].a[{thing: 42}]", "{value: 42}", "[][].a[].value", [0, 42, 1, 42, 42]),
("[][].a[]", "{value: 42}", "[][].a[].value", [42, 42, 42, 42, 42]),
("[][].a[0]", "{value: 42}", "[][].a[].value", [42, 1, 42, 2, 42]),
("[][].a[1]", "{value: 42}", "[][].a[].value", [0, 42, 1, 42, 3]),
("[][].a[2]", "{value: 42}", "[][].a[].value", [0, 1, 1, 2, 3]),
("[0][].a[]", "{value: 42}", "[][].a[].value", [42, 42, 1, 2, 3]),
("[][0].a[]", "{value: 42}", "[][].a[].value", [42, 42, 42, 42, 42]),
("[][].spam{checked: 1b}.egg[{foo: 2}]", "{foo: 42}", "[][].spam.egg[].foo", [0, 42, 7, 1, 2, 9, 5]),
("[][].spam{checked: 1b}.egg[]", "{foo: 42}", "[][].spam.egg[].foo", [42, 42, 7, 42, 2, 42, 42]),
("[][].spam{checked: 1b}.egg[0]", "{foo: 42}", "[][].spam.egg[].foo", [42, 2, 7, 42, 2, 42, 5]),
("[][].spam{checked: 1b}", "{egg: []}", "[][].spam.egg[].foo", [7, 2]),
]
# fmt: on
@pytest.mark.parametrize("path, value, select, results", path_set_and_get)
def test_path_set(biglist, path, value, select, results):
biglist[Path(path)] = parse_nbt(value)
assert biglist.get_all(Path(select)) == results
path_del_and_get = [
("[][].spam{checked: 1b}", "[][].spam.egg[].foo", [7, 2]),
("[][1]", "[][].spam.egg[].foo", [0, 2, 1, 2]),
("[][{spam: {checked: 1b}}]", "[][].spam.egg[].foo", [7, 2]),
("[1]", "[][].spam.egg[].foo", [1, 2, 9, 5]),
("[][].spam.egg[0].foo", "[][].spam.egg[].foo", [2, 5]),
]
@pytest.mark.parametrize("path, select, results", path_del_and_get)
def test_path_set(biglist, path, select, results):
del biglist[Path(path)]
assert biglist.get_all(Path(select)) == results
normalized_path_strings = (
[
"foo",
"foo.bar",
"foo.bar[0]",
'foo.bar[0]."A [crazy name]!"',
'foo.bar[0]."A [crazy name]!".baz',
"foo.bar[]",
"foo.bar[].baz",
"foo.bar[{baz: 5b}]",
"{}",
"{}.foo",
"{foo: 4.0f}",
'foo{bar: "baz"}',
'foo{bar: "baz"}.bar',
"a[-3].c{a: [1b, 2b]}.d[].e{a: {e: 5b}}[8]",
"a[-3].c{a: [1b, 2b]}.d[].e{a: {e: 5b}}[8].d",
"a[-3].c{a: [1b, 2b]}.d[].e{a: {e: 5b}}[8][5]",
"a[-3].c{a: [1b, 2b]}.d[].e{a: {e: 5b}}[].d{a: {m: 4.0f}}",
"Items[].a[]",
"[{}]",
]
+ [
path
for entries in [bigtest_path_to_items, biglist_path_to_items]
for path, _ in entries
]
+ [path for path, _, _, _ in path_set_and_get]
+ [path for _, _, path, _ in path_set_and_get]
+ [path for path, _, _ in path_del_and_get]
+ [path for _, path, _ in path_del_and_get]
)
@pytest.mark.parametrize("path_string", normalized_path_strings)
def test_normalized_path_strings(path_string):
assert str(Path(path_string)) == path_string
# fmt: off
equivalent_paths = [
[Path(p) for p in paths]
for paths in [
["a.b.c", "a b c", "a. b. c", '"a""b""c"', ' "a" .. "b" .c ', "a\nb\nc"],
["[]{a: 1}", "[{a: 1}]", "[{a: 1}]{}", "[{a: 42}]{a: 1}", "[{}]{a: 42}{}{a: 1}"],
['{a: {foo: "bar"}, value: 0}', '{a: {foo: "bar"}, value: 0}{a: {foo: "bar"}}'],
["{a: {b: {c: 1}, foo: 42}}", "{a: {b: {c: 1}}}{a: {foo: 42}}", '{a: {b: {c: "thing"}, foo: 42}}{a: {b: {c: 1}}}'],
["a[5]", "a[05]", "a[00005]"],
]
]
# fmt: on
equivalent_path_pairs = [
(path1, path2)
for paths in equivalent_paths
for path1, path2 in zip(paths, paths[1:])
]
@pytest.mark.parametrize("path1, path2", equivalent_path_pairs)
def test_equivalent_paths(path1, path2):
assert path1 == path2
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/tests/inputs.py | tests/inputs.py | from nbtlib import (
Byte,
ByteArray,
Compound,
Double,
File,
Float,
Int,
IntArray,
List,
Long,
LongArray,
Short,
String,
)
__all__ = [
"bytes_for_valid_tags",
"out_of_range_numeric_tags",
"literal_values_for_tags",
"invalid_literals",
"nbt_files",
]
# fmt: off
bytes_for_valid_tags = [
# Byte tag
("big", b"\x00", Byte(0)),
("big", b"\xFF", Byte(-1)),
("big", b"\x7F", Byte(127)),
("big", b"\x80", Byte(-128)),
("little", b"\x00", Byte(0)),
("little", b"\xFF", Byte(-1)),
("little", b"\x7F", Byte(127)),
("little", b"\x80", Byte(-128)),
# Short tag
("big", b"\x00\x00", Short(0)),
("big", b"\xFF\xFF", Short(-1)),
("big", b"\x7F\xFF", Short(32767)),
("big", b"\x80\x00", Short(-32768)),
("little", b"\x00\x00", Short(0)),
("little", b"\xFF\xFF", Short(-1)),
("little", b"\xFF\x7F", Short(32767)),
("little", b"\x00\x80", Short(-32768)),
# Int tag
("big", b"\x00\x00\x00\x00", Int(0)),
("big", b"\xFF\xFF\xFF\xFF", Int(-1)),
("big", b"\x7F\xFF\xFF\xFF", Int(2147483647)),
("big", b"\x80\x00\x00\x00", Int(-2147483648)),
("little", b"\x00\x00\x00\x00", Int(0)),
("little", b"\xFF\xFF\xFF\xFF", Int(-1)),
("little", b"\xFF\xFF\xFF\x7F", Int(2147483647)),
("little", b"\x00\x00\x00\x80", Int(-2147483648)),
# Long tag
("big", b"\x00\x00\x00\x00\x00\x00\x00\x00", Long(0)),
("big", b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", Long(-1)),
("big", b"\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF", Long(9223372036854775807)),
("big", b"\x80\x00\x00\x00\x00\x00\x00\x00", Long(-9223372036854775808)),
("little", b"\x00\x00\x00\x00\x00\x00\x00\x00", Long(0)),
("little", b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", Long(-1)),
("little", b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", Long(9223372036854775807)),
("little", b"\x00\x00\x00\x00\x00\x00\x00\x80", Long(-9223372036854775808)),
# Float tag
("big", b"\x00\x00\x00\x00", Float(0)),
("big", b"\xbf\x80\x00\x00", Float(-1)),
("big", b">\xff\x182", Float(0.49823147058486938)),
("little", b"\x00\x00\x00\x00", Float(0)),
("little", b"\x00\x00\x80\xbf", Float(-1)),
("little", b"2\x18\xff>", Float(0.49823147058486938)),
# Double tag
("big", b"\x00\x00\x00\x00\x00\x00\x00\x00", Double(0)),
("big", b"\xbf\xf0\x00\x00\x00\x00\x00\x00", Double(-1)),
("big", b"?\xdf\x8fk\xbb\xffj^", Double(0.49312871321823148)),
("little", b"\x00\x00\x00\x00\x00\x00\x00\x00", Double(0)),
("little", b"\x00\x00\x00\x00\x00\x00\xf0\xbf", Double(-1)),
("little", b"^j\xff\xbbk\x8f\xdf?", Double(0.49312871321823148)),
# ByteArray tag
("big", b"\x00\x00\x00\x00", ByteArray([])),
("big", b"\x00\x00\x00\x01\xff", ByteArray([-1])),
("big", b"\x00\x00\x00\x03\x01\x02\x03", ByteArray([1, 2, 3])),
("little", b"\x00\x00\x00\x00", ByteArray([])),
("little", b"\x01\x00\x00\x00\xff", ByteArray([-1])),
("little", b"\x03\x00\x00\x00\x01\x02\x03", ByteArray([1, 2, 3])),
# String tag
("big", b"\x00\x00", String("")),
("big", b"\x00\x0bhello world", String("hello world")),
("big", b"\x00\x06\xc3\x85\xc3\x84\xc3\x96", String("ÅÄÖ")),
("little", b"\x00\x00", String("")),
("little", b"\x0b\x00hello world", String("hello world")),
("little", b"\x06\x00\xc3\x85\xc3\x84\xc3\x96", String("ÅÄÖ")),
# List tag
("big", b"\x02\x00\x00\x00\x00", List[Short]([])),
("big", b"\x01\x00\x00\x00\x04\x05\xf7\x12\x40", List[Byte]([Byte(5), Byte(-9), Byte(18), Byte(64)])),
("big", b"\x08\x00\x00\x00\x02\x00\x05hello\x00\x05world", List[String]([String("hello"), String("world")])),
("big", b"\t\x00\x00\x00\x02\x03\x00\x00\x00\x01\x00\x00\x00*\x08\x00\x00\x00\x01\x00\x05hello", List[List]([List[Int]([Int(42)]), List[String]([String("hello")])])),
("little", b"\x02\x00\x00\x00\x00", List[Short]([])),
("little", b"\x01\x04\x00\x00\x00\x05\xf7\x12\x40", List[Byte]([Byte(5), Byte(-9), Byte(18), Byte(64)])),
("little", b"\x08\x02\x00\x00\x00\x05\x00hello\x05\x00world", List[String]([String("hello"), String("world")])),
("little", b"\t\x02\x00\x00\x00\x03\x01\x00\x00\x00*\x00\x00\x00\x08\x01\x00\x00\x00\x05\x00hello", List[List]([List[Int]([Int(42)]), List[String]([String("hello")])])),
# Compound tag
("big", b"\x00", Compound({})),
("big", b"\x03\x00\x03foo\x00\x00\x00*\x00", Compound({"foo": Int(42)})),
("big", b"\x01\x00\x01a\x00\x01\x00\x01b\x01\x00", Compound({"a": Byte(0), "b": Byte(1)})),
("little", b"\x00", Compound({})),
("little", b"\x03\x03\x00foo*\x00\x00\x00\x00", Compound({"foo": Int(42)})),
("little", b"\x01\x01\x00a\x00\x01\x01\x00b\x01\x00", Compound({"a": Byte(0), "b": Byte(1)})),
# IntArray tag
("big", b"\x00\x00\x00\x00", IntArray([])),
("big", b"\x00\x00\x00\x01\xff\xff\xff\xff", IntArray([-1])),
("big", b"\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02", IntArray([1, 2])),
("little", b"\x00\x00\x00\x00", IntArray([])),
("little", b"\x01\x00\x00\x00\xff\xff\xff\xff", IntArray([-1])),
("little", b"\x02\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00", IntArray([1, 2])),
# LongArray tag
("big", b"\x00\x00\x00\x00", LongArray([])),
("big", b"\x00\x00\x00\x01\xff\xff\xff\xff\xff\xff\xff\xff", LongArray([-1])),
("big", b"\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02", LongArray([1, 2])),
("little", b"\x00\x00\x00\x00", LongArray([])),
("little", b"\x01\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff", LongArray([-1])),
("little", b"\x02\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00", LongArray([1, 2])),
]
# fmt: on
out_of_range_numeric_tags = [
(Byte, -129),
(Byte, 128),
(Short, -32769),
(Short, 32768),
(Int, -2147483649),
(Int, 2147483648),
(Long, -9223372036854775809),
(Long, 9223372036854775808),
]
unsigned_values_for_integer_tags = [
(Byte(-1), 255),
(Byte(-128), 128),
(Short(-1), 65535),
(Short(-32768), 32768),
(Int(-1), 4294967295),
(Int(-2147483648), 2147483648),
(Long(-1), 18446744073709551615),
(Long(-9223372036854775808), 9223372036854775808),
]
# fmt: off
literal_values_for_tags = [
# Byte tag
("0b", Byte(0)),
("-1b", Byte(-1)),
("127b", Byte(127)),
("-128b", Byte(-128)),
("128b", String("128b")),
("-129b", String("-129b")),
# Short tag
("0s", Short(0)),
("-1s", Short(-1)),
("32767s", Short(32767)),
("-32768s", Short(-32768)),
("32768s", String("32768s")),
("-32769s", String("-32769s")),
# Int tag
("0", Int(0)),
("-1", Int(-1)),
("2147483647", Int(2147483647)),
("-2147483648", Int(-2147483648)),
("2147483648", String("2147483648")),
("-2147483649", String("-2147483649")),
# Long tag
("0l", Long(0)),
("-1l", Long(-1)),
("9223372036854775807l", Long(9223372036854775807)),
("-9223372036854775808l", Long(-9223372036854775808)),
("9223372036854775808l", String("9223372036854775808l")),
("-9223372036854775809l", String("-9223372036854775809l")),
# Float tag
("0.0f", Float(0)),
("-1.0f", Float(-1)),
("56.487f", Float(56.487)),
("1.2e02f", Float(120)),
("82e+289f", Float(8.2e290)),
("1E-52f", Float(1e-52)),
("1.5E+2f", Float(150)),
# Double tag
("0.0d", Double(0)),
("-1.0d", Double(-1)),
("0.493128713218d", Double(0.493128713218)),
("7.2", Double(7.2)),
("1.0e10", Double(1e10)),
("7e-72d", Double(7e-72)),
("7.3E2d", Double(730)),
(".2E+2", Double(20)),
# ByteArray tag
("[B;]", ByteArray([])),
("[B;-1b]", ByteArray([-1])),
("[B;1b,2b,3b]", ByteArray([1, 2, 3])),
# String tag
('""', String('')),
("foo-bar.", String("foo-bar.")),
('"hello world"', String("hello world")),
('"我"', String("我")),
('"Å\\"Ä\\\\Ö"', String('Å"Ä\\Ö')),
('"\\"\\\\"', String('"\\')),
("2a", String("2a")),
('"3.0f"', String('3.0f')),
("+72foo", String("+72foo")),
("03b", String("03b")),
("4e-3l", String("4e-3l")),
("4e3l", String("4e3l")),
("14e+5", String("14e+5")),
("3.3s", String("3.3s")),
("''", String("")),
("'\"'", String('"')),
('"\'"', String("'")),
('"\\""', String('"')),
("'\\''", String("'")),
('"\\\\\'\\""', String('\\\'"')),
# Literal aliases
("true", Byte(1)),
("false", Byte(0)),
("True", Byte(1)),
("FaLse", Byte(0)),
# List tag
("[]", List[Short]([])),
("[5b,-9b,18b,64b]", List[Byte]([5, -9, 18, 64])),
('[hello,world,"\\"\\\\"]', List[String](["hello", "world", '"\\'])),
("[[],[2]]", List[List[Int]]([[], [2]])),
("[[[],[1]],[]]", List[List[List[Int]]]([[[], [1]], []])),
("[[],[[],[]]]", List[List[List]]([[], [[], []]])),
("[[],[[[[[[[[[[],[[[[5,1]],[]]]]]]]]]]],[[[[]]]]]]", List[List[List[List[List[List[List[List[List[List[List[List[List[List[Int]]]]]]]]]]]]]]([[], [[[[[[[[[[], [[[[5, 1]], []]]]]]]]]]], [[[[]]]]]])),
("[[42],[hello]]", List[List]([List[Int]([42]), List[String](["hello"])])),
("[[[[],[[[]]]]],[[[[],[5]]]]]", List[List[List[List[List]]]]([[[[], [List[List]([[]])]]], [[[List[Int]([]), List[Int]([Int(5)])]]]])),
("[[[],[[]]],[[hello]]]", List[List[List]]([[List[List]([]), List[List]([[]])], [List([String("hello")])]])),
("[[1],[[]]]", List([List([Int(1)]), List([List([])])])),
# Compound tag
("{}", Compound({})),
("{foo:42}", Compound({"foo": Int(42)})),
("{a:0b,b:1b}", Compound({"a": Byte(0), "b": Byte(1)})),
('{"hello world":foo}', Compound({"hello world": String("foo")})),
('{"\\"blah\\\\\\"":1.2d}', Compound({'"blah\\"': Double(1.2)})),
('{"jso\\\\\\\\n":"t\\\\\\\\nest"}', Compound({"jso\\\\n": String("t\\\\nest")})),
("{42:bar}", Compound({"42": String("bar")})),
("{-42abc: thing}", Compound({"-42abc": String("thing")})),
("{+77.7f:[B;1b]}", Compound({"+77.7f": ByteArray([1])})),
# IntArray tag
("[I;]", IntArray([])),
("[I;-1]", IntArray([-1])),
("[I;1,2]", IntArray([1, 2])),
# LongArray tag
("[L;]", LongArray([])),
("[L;-1l]", LongArray([-1])),
("[L;1l,2l]", LongArray([1, 2])),
]
# fmt: on
invalid_literals = [
'"\\"',
'"\\n"',
'"\\\\\\"',
'{"\\":1}',
"[a,1]",
"[[],[],1b]",
"[[],[],1b]",
"[L;5l,4l,3]",
"{hello,world}",
"{with space: 5}",
'{\\": no}',
"{foo: [1,2}",
"{error: [test]]}",
"[{,{}]",
'"\\\'"',
"'\\\"'",
]
# fmt: off
nbt_files = [
(
"tests/nbt_files/hello_world.nbt", File({
"name": String("Bananrama")
}, root_name="hello world")
),
(
"tests/nbt_files/bigtest.nbt", File({
"nested compound test": Compound({
"egg": Compound({
"name": String("Eggbert"), "value": Float(0.5)
}),
"ham": Compound({
"name": String("Hampus"), "value": Float(0.75)
})
}),
"intTest": Int(2147483647),
"byteTest": Byte(127),
"stringTest": String("HELLO WORLD THIS IS A TEST STRING ÅÄÖ!"),
"listTest (long)": List[Long]([11, 12, 13, 14, 15]),
"doubleTest": Double(0.49312871321823148),
"floatTest": Float(0.49823147058486938),
"longTest": Long(9223372036854775807),
"listTest (compound)": List[Compound]([
Compound({
"created-on": Long(1264099775885),
"name": String("Compound tag #0")
}),
Compound({
"created-on": Long(1264099775885),
"name": String("Compound tag #1")
})
]),
"byteArrayTest (the first 1000 values of (n*n*255+n*7)%100, starting with n=0 (0, 62, 34, 16, 8, ...))": ByteArray([
(n**2 * 255 + n*7) % 100 for n in range(1000)
]),
"shortTest": Short(32767)
}, root_name="Level", gzipped=True)
),
]
# fmt: on
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/tests/test_literal.py | tests/test_literal.py | import pytest
from nbtlib import parse_nbt, InvalidLiteral
from .inputs import literal_values_for_tags, invalid_literals, nbt_files
@pytest.mark.parametrize("literal, expected_tag", literal_values_for_tags)
def test_literal_parsing(literal, expected_tag):
assert parse_nbt(literal) == expected_tag
@pytest.mark.parametrize("literal, expected_tag", literal_values_for_tags)
def test_tag_literal_value(literal, expected_tag):
assert parse_nbt(literal).snbt() == expected_tag.snbt()
@pytest.mark.parametrize("nbt_data", [nbt_data for _, nbt_data in nbt_files])
def test_parsing_literal_tag_value(nbt_data):
assert parse_nbt(nbt_data.snbt()).snbt() == nbt_data.snbt()
@pytest.mark.parametrize("literal", invalid_literals)
def test_parsing_invalid_literal(literal):
with pytest.raises(InvalidLiteral):
parse_nbt(literal)
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/tests/test_nbt.py | tests/test_nbt.py | import pytest
from nbtlib import nbt, Compound
from .inputs import nbt_files
def validate_types(tag, expected):
return isinstance(tag, type(expected)) and (
not isinstance(tag, Compound)
or all(validate_types(val, expected[key]) for key, val in tag.items())
)
@pytest.mark.parametrize("file_path, value", nbt_files)
def test_file_loading(file_path, value):
nbt_file = nbt.load(file_path)
assert nbt_file == value
@pytest.mark.parametrize("file_path, value", nbt_files)
def test_file_compression(file_path, value):
nbt_file = nbt.load(file_path)
assert nbt_file.gzipped == value.gzipped
@pytest.mark.parametrize("file_path, value", nbt_files)
def test_file_types(file_path, value):
nbt_file = nbt.load(file_path)
assert validate_types(nbt_file, value), "mismatched types"
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/tests/__init__.py | tests/__init__.py | python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false | |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/tests/test_schema.py | tests/test_schema.py | import pytest
from nbtlib import schema, String, Int, List, CastError
@pytest.fixture
def LooseSchema():
return schema("Thing", {"foo": String, "bar": List[schema("Bar", {"value": Int})]})
@pytest.fixture
def StrictSchema():
return schema(
"Thing",
{"foo": String, "bar": List[schema("Bar", {"value": Int}, strict=True)]},
strict=True,
)
@pytest.fixture(params=["LooseSchema", "StrictSchema"])
def DummySchema(request):
return request.getfixturevalue(request.param)
def test_normal_instantiate(DummySchema):
thing = DummySchema({"foo": 123, "bar": [{"value": "11"}]})
assert type(thing) is DummySchema
assert type(thing["foo"]) is String
assert type(thing["bar"][0]["value"]) is Int
def test_invalid_instantiation(DummySchema):
with pytest.raises(CastError):
DummySchema({"foo": "ok", "bar": 42})
def test_normal_setitem(DummySchema):
thing = DummySchema()
thing["foo"] = "hello"
assert type(thing["foo"]) is String
def test_invalid_setitem(DummySchema):
thing = DummySchema()
with pytest.raises(CastError):
thing["bar"] = "abc"
def test_normal_update(DummySchema):
thing = DummySchema()
thing.update({"foo": "hello"})
assert type(thing["foo"]) is String
def test_invalid_update(DummySchema):
thing = DummySchema()
with pytest.raises(CastError):
thing.update({"bar": [{"value": 10}, {"value": []}]})
def test_loose_schema_with_extra_key(LooseSchema):
thing = LooseSchema({"hello": "world"})
assert type(thing["hello"]) is str
def test_strict_schema_with_extra_key(StrictSchema):
with pytest.raises(TypeError):
thing = StrictSchema({"hello": "world"})
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/tests/test_benchmark.py | tests/test_benchmark.py | from io import BytesIO
import pytest
from nbtlib import nbt
def write_parse(nbt_tag):
data = BytesIO()
nbt_tag.write(data)
data.seek(0)
return nbt_tag.parse(data)
@pytest.mark.parametrize(
"filename",
[
"byte.nbt",
"short.nbt",
"int.nbt",
"long.nbt",
"float.nbt",
"double.nbt",
"byte_array.nbt",
"string.nbt",
"list.nbt",
"compound.nbt",
"int_array.nbt",
"long_array.nbt",
],
)
def test_tag_bench(benchmark, filename):
nbt_tag = nbt.load(f"tests/nbt_files/bench/{filename}")
result = benchmark(write_parse, nbt_tag)
assert result == nbt_tag
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/path.py | nbtlib/path.py | """This module defines utilities for accessing deeply nested properties.
Exported items:
Path -- Class representing an nbt path, inherits from `tuple`
InvalidPath -- Exception raised when creating an invalid nbt path
"""
__all__ = ["Path", "InvalidPath", "NamedKey", "ListIndex", "CompoundMatch"]
import re
from typing import NamedTuple, Optional
from .tag import Int, String, Array, List, Compound
from .literal.parser import Parser, tokenize, InvalidLiteral
class InvalidPath(ValueError):
"""Raised when creating an invalid nbt path."""
class Path(tuple):
"""Represents an nbt path.
Instances of this class can be used for indexing into list and compound
tags for accessing deeply nested properties.
"""
__slots__ = ()
def __new__(cls, path=None):
if path is None:
return cls.from_accessors()
if isinstance(path, Path):
return cls.from_accessors(path)
if isinstance(path, int):
# Handle an integer x as if the string "[x]" were just parsed
return cls.from_accessors((ListIndex(index=int(path)),))
accessors = ()
for accessor in parse_accessors(path):
accessors = extend_accessors(accessors, accessor)
return cls.from_accessors(accessors)
def __getitem__(self, key):
if isinstance(key, Path):
new_accessors = tuple(key)
elif isinstance(key, str):
new_accessors = (NamedKey(key),)
elif isinstance(key, int):
new_accessors = (ListIndex(index=int(key)),)
elif isinstance(key, slice) and all(
n is None for n in [key.start, key.stop, key.step]
):
new_accessors = (ListIndex(index=None),)
elif isinstance(key, Compound):
new_accessors = (CompoundMatch(key),)
else:
raise KeyError(key)
accessors = tuple(self)
for accessor in new_accessors:
accessors = extend_accessors(accessors, accessor)
return self.from_accessors(accessors)
def __add__(self, other):
if isinstance(other, Path):
return self[other]
elif isinstance(other, (str, int)):
return self[Path(other)]
else:
return NotImplemented
def __radd__(self, other):
if isinstance(other, Path):
return other[self]
elif isinstance(other, (str, int)):
return Path(other)[self]
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, str):
other = Path(other)
return super().__eq__(other)
def __ne__(self, other):
if isinstance(other, str):
other = Path(other)
return super().__ne__(other)
def __hash__(self):
return super().__hash__()
@classmethod
def from_accessors(cls, accessors=()):
return super().__new__(cls, accessors)
def traverse(self, tag):
tags = [(None, tag)]
setter = None
deleter = None
for accessor in self:
setter = getattr(accessor, "set", setter)
deleter = getattr(accessor, "delete", deleter)
tags = accessor.get(tags)
return tags, setter, deleter
def get(self, tag):
return [tag for _, tag in self.traverse(tag)[0]]
def set(self, tag, value):
tags, setter, _ = self.traverse(tag)
if setter:
setter(tags, value)
def delete(self, tag):
tags, _, deleter = self.traverse(tag)
if deleter:
deleter(tags)
def __repr__(self):
return f"{self.__class__.__name__}({str(self)!r})"
def __str__(self):
segments = [""]
for accessor in self:
segment = str(accessor)
if not segment or segment.startswith("["):
segments[-1] += segment
elif segment.startswith("{"):
if segments[-1].endswith("[]"):
segments[-1] = segments[-1][:-2] + f"[{segment}]"
else:
segments[-1] += segment
else:
segments.append(segment)
return ".".join(filter(None, segments))
def can_be_converted_to_int(string):
if not isinstance(string, str):
return False
try:
int(string)
return True
except ValueError:
return False
def parse_accessors(path):
try:
parser = Parser(tokenize(path))
except InvalidLiteral:
return ()
while True:
try:
tag = parser.parse()
except InvalidLiteral as exc:
raise InvalidPath(f"Invalid path at position {exc.args[0][0]}") from exc
if isinstance(tag, String):
if parser.current_token.type == "QUOTED_STRING":
yield NamedKey(tag[:])
else:
yield from (NamedKey(key) for key in tag.split(".") if key)
elif isinstance(tag, List):
if not tag:
yield ListIndex(index=None)
elif len(tag) != 1:
raise InvalidPath("Brackets should only contain one element")
elif issubclass(tag.subtype, Compound):
yield ListIndex(index=None)
yield CompoundMatch(tag[0])
elif issubclass(tag.subtype, Int) or can_be_converted_to_int(tag[0]):
yield ListIndex(int(tag[0]))
else:
raise InvalidPath(
"Brackets should only contain an integer or a compound"
)
elif isinstance(tag, Compound):
yield CompoundMatch(tag)
elif parser.current_token.type == "NUMBER":
yield from (
NamedKey(key) for key in parser.current_token.value.split(".") if key
)
else:
raise InvalidPath(f"Invalid path element {tag}")
try:
parser.next()
except InvalidLiteral:
break
def extend_accessors(accessors, new_accessor):
if isinstance(new_accessor, CompoundMatch) and accessors:
*except_last, last_accessor = accessors
if isinstance(last_accessor, CompoundMatch):
new_compound = new_accessor.compound.with_defaults(last_accessor.compound)
return tuple(except_last) + (CompoundMatch(new_compound),)
if isinstance(last_accessor, ListIndex) and last_accessor.index is not None:
raise InvalidPath(
f"Can't match a compound on list items selected with {last_accessor!r}"
)
return accessors + (new_accessor,)
class NamedKey(NamedTuple):
key: str
UNQUOTED_REGEX = re.compile(r"^[a-zA-Z0-9_]+$")
def get(self, tags):
return [
(tag, tag[self.key])
for _, tag in tags
if isinstance(tag, dict) and self.key in tag
]
def set(self, tags, value):
for parent, _ in tags:
parent[self.key] = value
def delete(self, tags):
for parent, _ in tags:
if self.key in parent:
del parent[self.key]
def __str__(self):
return (
self.key
if self.UNQUOTED_REGEX.match(self.key)
else '"' + self.key.replace('"', '\\"') + '"'
)
class ListIndex(NamedTuple):
index: Optional[int]
def get(self, tags):
tags = [tag for tag in tags if isinstance(tag[1], (List, Array))]
if self.index is None:
return [((tag, i), item) for _, tag in tags for i, item in enumerate(tag)]
return [
((tag, self.index), tag[self.index])
for _, tag in tags
if -len(tag) <= self.index < len(tag)
]
def set(self, tags, value):
for (parent, i), _ in tags:
if self.index is None or i == self.index:
parent[i] = value
def delete(self, tags):
for (parent, i), _ in reversed(tags):
if self.index is None or i == self.index:
del parent[i]
def __str__(self):
return f'[{"" if self.index is None else self.index}]'
class CompoundMatch(NamedTuple):
compound: Compound
def get(self, tags):
return [(parent, tag) for parent, tag in tags if tag.match(self.compound)]
def __str__(self):
return self.compound.snbt()
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/cli.py | nbtlib/cli.py | from argparse import ArgumentParser, ArgumentTypeError
from json import dumps as json_dumps
from pprint import pprint
from nbtlib import InvalidLiteral, Path, nbt, parse_nbt, serialize_tag
from nbtlib.tag import Compound, find_tag
# Validation helper
def nbt_data(literal):
try:
nbt_data = parse_nbt(literal)
except InvalidLiteral as exc:
raise ArgumentTypeError(exc) from exc
else:
if not isinstance(nbt_data, Compound):
raise ArgumentTypeError("the root nbt tag must be a compound tag")
return nbt_data
# Create the argument parser
parser = ArgumentParser(prog="nbt", description="Perform operations on nbt files.")
inputs = parser.add_mutually_exclusive_group()
inputs.add_argument("-r", action="store_true", help="read nbt data from a file")
inputs.add_argument("-s", action="store_true", help="read snbt from a file")
outputs = parser.add_mutually_exclusive_group()
outputs.add_argument("-w", metavar="<nbt>", help="write nbt to a file")
outputs.add_argument("-m", metavar="<nbt>", help="merge nbt into a file")
parser.add_argument("--plain", action="store_true", help="don't use gzip compression")
parser.add_argument("--little", action="store_true", help="use little-endian format")
parser.add_argument("--compact", action="store_true", help="output compact snbt")
parser.add_argument("--pretty", action="store_true", help="output indented snbt")
parser.add_argument("--unpack", action="store_true", help="output interpreted nbt")
parser.add_argument("--json", action="store_true", help="output nbt as json")
parser.add_argument("--path", metavar="<path>", help="output all the matching tags")
parser.add_argument(
"--find", metavar="<path>", help="recursively find the first matching tag"
)
parser.add_argument("file", metavar="<file>", help="the target file")
# Define command-line interface
def main():
args = parser.parse_args()
gzipped, byteorder = not args.plain, "little" if args.little else "big"
try:
if args.r or args.s:
for tag in read(
args.file, gzipped, byteorder, args.s, args.path, args.find
):
if args.w:
write(tag, args.w, gzipped, byteorder)
elif args.m:
merge(tag, args.m, gzipped, byteorder)
else:
display(tag, args.compact, args.pretty, args.unpack, args.json)
elif args.w:
write(nbt_data(args.w), args.file, gzipped, byteorder)
elif args.m:
merge(nbt_data(args.m), args.file, gzipped, byteorder)
else:
parser.error("one of the following arguments is required: -r -s -w -m")
except (ArgumentTypeError, IOError) as exc:
parser.error(f"{exc}")
def read(filename, gzipped, byteorder, snbt, path, find):
if snbt:
with open(filename) as f:
nbt_file = parse_nbt(f.read())
else:
nbt_file = nbt.load(filename, gzipped=gzipped, byteorder=byteorder)
tags = nbt_file.get_all(Path(path)) if path else [nbt_file]
for tag in tags:
if find:
tag = tag.find(Path(find))
if tag is not None:
yield tag
def display(tag, compact, pretty, unpack, json):
if unpack:
if pretty:
pprint(tag.unpack())
else:
print(tag.unpack())
elif json:
print(json_dumps(tag.unpack(json=True), indent=4 if pretty else None))
else:
print(serialize_tag(tag, indent=4 if pretty else None, compact=compact))
def write(nbt_data, filename, gzipped, byteorder):
nbt.File(nbt_data).save(filename, gzipped=gzipped, byteorder=byteorder)
def merge(nbt_data, filename, gzipped, byteorder):
nbt_file = nbt.load(filename, gzipped=gzipped, byteorder=byteorder)
nbt_file.merge(nbt_data)
nbt_file.save()
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/schema.py | nbtlib/schema.py | """This module defines tools for creating tag schemas.
Exported items:
schema -- Helper function to define compound schemas
CompoundSchema -- `Compound` subclass that enforces a tag schema
"""
__all__ = ["schema", "CompoundSchema"]
from itertools import chain
from .tag import Compound, CastError
def schema(name, dct, *, strict=False):
"""Create a compound tag schema.
This function is a short convenience function that makes it easy to
subclass the base `CompoundSchema` class.
The `name` argument is the name of the class and `dct` should be a
dictionary containing the actual schema. The schema should map keys
to tag types or other compound schemas.
If the `strict` keyword only argument is set to True, interacting
with keys that are not defined in the schema will raise a
`TypeError`.
"""
return type(
name, (CompoundSchema,), {"__slots__": (), "schema": dct, "strict": strict}
)
class CompoundSchema(Compound):
"""Class that extends the base `Compound` tag by enforcing a schema.
Defining a custom schema is really useful if you're dealing with
recurring data structures. Subclassing the `CompoundSchema` class
with your own schema will save you some typing by casting all the
keys defined in the schema to the appropriate tag type.
The class inherits from `Compound` and will cast values to the
predefined tag types for all of the inherited mutating operations.
Class attributes:
schema -- Dictionary mapping keys to tag types or other schemas
strict -- Boolean enabling strict schema validation
"""
__slots__ = ("_strict",)
schema = {}
strict = False
def __init__(self, *args, strict=False, **kwargs):
super().__init__(*args, **kwargs)
self._strict = strict or self.strict
for key, value in self.items():
correct_value = self.cast_item(key, value)
if correct_value is not value:
super().__setitem__(key, correct_value)
def __setitem__(self, key, value):
super().__setitem__(key, self.cast_item(key, value))
def update(self, mapping, **kwargs):
pairs = chain(mapping.items(), kwargs.items())
super().update((key, self.cast_item(key, value)) for key, value in pairs)
def cast_item(self, key, value):
"""Cast schema item to the appropriate tag type."""
schema_type = self.schema.get(key)
if schema_type is None:
if self._strict:
raise TypeError(f"Invalid key {key!r}")
elif not isinstance(value, schema_type):
try:
return (
schema_type(value, strict=self._strict)
if issubclass(schema_type, CompoundSchema)
else schema_type(value)
)
except CastError:
raise
except Exception as exc:
raise CastError(value, schema_type) from exc
return value
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/tag.py | nbtlib/tag.py | r"""
.. testsetup::
import io
import struct
from pprint import pprint
from nbtlib import *
All the tag classes have a :meth:`Base.parse` classmethod that reads
nbt data from a file-like object and returns a tag instance. Tag
instances can then write their binary representation back to file-like
objects using the :meth:`Base.write` method.
.. doctest::
>>> fileobj = io.BytesIO(b"\x03\x00\x03foo\x00\x00\x00{\x00")
>>> data = Compound.parse(fileobj)
>>> data
Compound({'foo': Int(123)})
>>> fileobj = io.BytesIO()
>>> data.write(fileobj)
>>> fileobj.getvalue()
b'\x03\x00\x03foo\x00\x00\x00{\x00'
Each tag inherits from a closely equivalent python builtin. For instance,
the :class:`Compound` class inherits from the builtin ``dict`` type.
This means that all the familiar operations available on the base type
work out of the box on the derived tag instances.
+-------------------+---------------------------------------------------------+
| Base type | Associated nbt tags |
+===================+=========================================================+
| ``int`` | :class:`Byte` :class:`Short` :class:`Int` :class:`Long` |
+-------------------+---------------------------------------------------------+
| ``float`` | :class:`Float` :class:`Double` |
+-------------------+---------------------------------------------------------+
| ``str`` | :class:`String` |
+-------------------+---------------------------------------------------------+
| ``numpy.ndarray`` | :class:`ByteArray` :class:`IntArray` :class:`LongArray` |
+-------------------+---------------------------------------------------------+
| ``list`` | :class:`List` |
+-------------------+---------------------------------------------------------+
| ``dict`` | :class:`Compound` |
+-------------------+---------------------------------------------------------+
Operator overloading works as expected with all tag types. Note that values are
returned unwrapped.
.. doctest::
>>> data = Compound({"foo": Int(123)})
>>> data["foo"] = Int(-1 * data["foo"])
>>> data["bar"] = String("hello")
>>> data
Compound({'foo': Int(-123), 'bar': String('hello')})
"""
__all__ = [
"Base",
"Numeric",
"NumericInteger",
"Byte",
"Short",
"Int",
"Long",
"Float",
"Double",
"String",
"List",
"Compound",
"End",
"Array",
"ByteArray",
"IntArray",
"LongArray",
"EndInstantiation",
"OutOfRange",
"IncompatibleItemType",
"CastError",
]
from struct import Struct
from struct import error as StructError
import numpy as np
from .literal.serializer import serialize_tag
# Struct formats used to pack and unpack numeric values
def get_format(fmt, string):
"""Return a dictionary containing a format for each byte order."""
return {"big": fmt(">" + string), "little": fmt("<" + string)}
BYTE = get_format(Struct, "b")
SHORT = get_format(Struct, "h")
USHORT = get_format(Struct, "H")
INT = get_format(Struct, "i")
LONG = get_format(Struct, "q")
FLOAT = get_format(Struct, "f")
DOUBLE = get_format(Struct, "d")
# Custom errors
class EndInstantiation(TypeError):
"""Raised when trying to instantiate an :class:`End` tag."""
def __init__(self):
super().__init__("End tags can't be instantiated")
class OutOfRange(ValueError):
"""Raised when a numeric value is out of range.
Converting builtin ``int`` instances to numeric nbt tags can fail if
the tag type isn't big enough.
.. doctest::
>>> Byte(127)
Byte(127)
>>> Byte(128)
Traceback (most recent call last):
...
nbtlib.tag.OutOfRange: Byte(128) is out of range
"""
def __init__(self, value):
super().__init__(f"{value!r} is out of range")
class IncompatibleItemType(TypeError):
"""Raised when a list item is incompatible with the subtype of the list.
Unlike builtin python lists, list tags are homogeneous so adding an
incompatible item to the list raises an error.
.. doctest::
>>> List([String("foo"), Int(123)])
Traceback (most recent call last):
...
nbtlib.tag.IncompatibleItemType: Int(123) should be a String tag
"""
def __init__(self, item, subtype):
super().__init__(f"{item!r} should be a {subtype.__name__} tag")
self.item = item
self.subtype = subtype
class CastError(ValueError):
"""Raised when an object couldn't be converted to the appropriate tag type.
Casting occurs when adding items to list tags and nbt schema
instances. If the item couldn't be converted to the required type,
the conversion raises an error.
.. doctest::
>>> integers = List[Int]()
>>> integers.append("foo")
Traceback (most recent call last):
...
nbtlib.tag.CastError: Couldn't cast 'foo' to Int
Note that casting only occurs when the value is an unwrapped python object.
Incompatible tags will raise an :class:`IncompatibleItemType` exception.
.. doctest::
>>> strings = List[String]()
>>> strings.append(123)
>>> strings
List[String]([String('123')])
>>> strings.append(Int(123))
Traceback (most recent call last):
...
nbtlib.tag.IncompatibleItemType: Int(123) should be a String tag
"""
def __init__(self, obj, tag_type):
super().__init__(f"Couldn't cast {obj!r} to {tag_type.__name__}")
self.obj = obj
self.tag_type = tag_type
# Read/write helpers for numeric and string values
def read_numeric(fmt, fileobj, byteorder="big"):
"""Read a numeric value from a file-like object."""
try:
fmt = fmt[byteorder]
return fmt.unpack(fileobj.read(fmt.size))[0]
except StructError:
return 0
except KeyError as exc:
raise ValueError("Invalid byte order") from exc
def write_numeric(fmt, value, fileobj, byteorder="big"):
"""Write a numeric value to a file-like object."""
try:
fileobj.write(fmt[byteorder].pack(value))
except KeyError as exc:
raise ValueError("Invalid byte order") from exc
def read_string(fileobj, byteorder="big"):
"""Read a string from a file-like object."""
length = read_numeric(USHORT, fileobj, byteorder)
return fileobj.read(length).decode("utf-8", "replace")
def write_string(value, fileobj, byteorder="big"):
"""Write a string to a file-like object."""
data = value.encode("utf-8")
write_numeric(USHORT, len(data), fileobj, byteorder)
fileobj.write(data)
# Path helpers
def find_tag(key, tags):
"""Return the first recursively matching tag."""
for tag in tags:
if isinstance(tag, (Compound, List)):
value = tag.get(key)
if value is None:
value = find_tag(
key, list(tag if isinstance(tag, List) else tag.values())
)
if value is not None:
return value
return None
# Tag definitions
class Base:
"""Base class inherited by all nbt tags.
This class defines the API shared by all nbt tags. Derived classes
that define a :attr:`tag_id` attribute are considered as concrete
tag implementations and are registered in the :attr:`all_tags`
registry. Concrete tag implementations inherit from both the
:class:`Base` class and their associated builtin data type.
Attributes:
all_tags: A dictionary mapping tag ids to child classes.
.. doctest::
>>> pprint(Base.all_tags)
{0: <class 'nbtlib.tag.End'>,
1: <class 'nbtlib.tag.Byte'>,
2: <class 'nbtlib.tag.Short'>,
3: <class 'nbtlib.tag.Int'>,
4: <class 'nbtlib.tag.Long'>,
5: <class 'nbtlib.tag.Float'>,
6: <class 'nbtlib.tag.Double'>,
7: <class 'nbtlib.tag.ByteArray'>,
8: <class 'nbtlib.tag.String'>,
9: <class 'nbtlib.tag.List'>,
10: <class 'nbtlib.tag.Compound'>,
11: <class 'nbtlib.tag.IntArray'>,
12: <class 'nbtlib.tag.LongArray'>}
The mapping is used by the :meth:`get_tag` classmethod to
retrieve the tag type when parsing the binary format.
tag_id: The id of the tag in the binary format.
.. doctest::
>>> Int.tag_id
3
serializer: The name of the associated snbt serializer.
.. doctest ::
>>> Int.serializer
'numeric'
"""
__slots__ = ()
all_tags = {}
tag_id = None
serializer = None
def __init_subclass__(cls):
# Add class to the `all_tags` dictionary if it has a tag id
if cls.tag_id is not None and cls.tag_id not in cls.all_tags:
cls.all_tags[cls.tag_id] = cls
@classmethod
def get_tag(cls, tag_id):
"""Return the tag class corresponding to the given tag id.
.. doctest ::
>>> Base.get_tag(3)
<class 'nbtlib.tag.Int'>
Arguments:
tag_id: The tag id must be valid otherwise the method raises a ``KeyError``.
"""
return cls.all_tags[tag_id]
@classmethod
def parse(cls, fileobj, byteorder="big"):
r"""Parse data from a file-like object and return a tag instance.
The default implementation does nothing. Concrete tags override
this method.
Arguments:
fileobj: A readable file-like object.
byteorder: Whether the nbt data is big-endian or little-endian.
.. doctest::
>>> Int.parse(io.BytesIO(b"\x00\x00\x00\x01"))
Int(1)
>>> Int.parse(io.BytesIO(b"\x01\x00\x00\x00"), byteorder="little")
Int(1)
"""
def write(self, fileobj, byteorder="big"):
r"""Write the binary representation of the tag to a file-like object.
The default implementation does nothing. Concrete tags override
this method.
Arguments:
fileobj: A writable file-like object.
byteorder: Whether the nbt data should be big-endian or little-endian.
.. doctest::
>>> big_endian = io.BytesIO()
>>> little_endian = io.BytesIO()
>>> Int(1).write(big_endian)
>>> Int(1).write(little_endian, byteorder="little")
>>> big_endian.getvalue()
b'\x00\x00\x00\x01'
>>> little_endian.getvalue()
b'\x01\x00\x00\x00'
"""
def match(self, other):
"""Check whether the tag recursively matches a subset of values.
The default implementation checks that the :attr:`tag_id` of the argument
matches and that the two instances are equal. Concrete tags override
this method.
.. doctest::
>>> data = Compound({
... 'foo': Int(42),
... 'hello': String('world')
... })
>>> data.match({'foo': 42})
True
"""
if hasattr(other, "tag_id") and self.tag_id != other.tag_id:
return False
return self == other
def snbt(self, indent=None, compact=False, quote=None):
"""Return the snbt literal corresponding to the tag instance.
.. doctest::
>>> Compound({"foo": Long(123)}).snbt()
'{foo: 123L}'
>>> Compound({"foo": Long(123)}).snbt(compact=True)
'{foo:123L}'
>>> print(Compound({"foo": Long(123)}).snbt(indent=4))
{
foo: 123L
}
"""
return serialize_tag(self, indent=indent, compact=compact, quote=quote)
def unpack(self, json=False):
"""Return the unpacked nbt value as an instance of the associated base type.
.. doctest::
>>> Compound({"foo": Long(123)}).unpack()
{'foo': 123}
Arguments:
json: Whether the returned value should be json-serializable.
This argument will convert array tags into plain python lists
instead of numpy arrays.
.. doctest::
>>> Compound({"foo": ByteArray([1, 2, 3])}).unpack()
{'foo': array([1, 2, 3], dtype=int8)}
>>> Compound({"foo": ByteArray([1, 2, 3])}).unpack(json=True)
{'foo': [1, 2, 3]}
"""
return None
def __repr__(self):
if self.tag_id is not None:
return f"{self.__class__.__name__}({super().__repr__()})"
return super().__repr__()
class End(Base):
"""Nbt tag used to mark the end of compound tags.
:class:`End` tags are the markers that terminate compound tags in
the binary format. They need to exist as a type but can't be used on
their own so manual instantiation raises an :class:`EndInstantiation`
exception.
.. doctest::
>>> End()
Traceback (most recent call last):
...
nbtlib.tag.EndInstantiation: End tags can't be instantiated
"""
__slots__ = ()
tag_id = 0
def __new__(cls, *args, **kwargs):
raise EndInstantiation()
class Numeric(Base):
r"""Intermediate class that represents a numeric nbt tag.
This class inherits from the :class:`Base` class and implements
:meth:`parse` and :meth:`write` for all the numeric nbt tags using
the :attr:`fmt` attribute.
Derived tags will use the ``numeric`` serializer and can specify a
literal suffix with the :attr:`suffix` attribute.
Attributes:
fmt: The struct format used to pack and unpack the tag value.
.. doctest::
>>> Int.fmt['big'].pack(1)
b'\x00\x00\x00\x01'
>>> Int.fmt['little'].pack(1)
b'\x01\x00\x00\x00'
suffix: The suffix used by the ``numeric`` snbt serializer.
.. doctest::
>>> Long.suffix
'L'
>>> Long(123).snbt()
'123L'
"""
__slots__ = ()
serializer = "numeric"
fmt = None
suffix = ""
@classmethod
def parse(cls, fileobj, byteorder="big"):
"""Override :meth:`Base.parse` for numeric tags."""
return cls(read_numeric(cls.fmt, fileobj, byteorder))
def write(self, fileobj, byteorder="big"):
"""Override :meth:`Base.write` for numeric tags."""
write_numeric(self.fmt, self, fileobj, byteorder)
class NumericInteger(Numeric, int):
"""Intermediate class that represents a numeric integer nbt tag.
This class adds range checks to the :class:`Numeric` class. It also
inherits from ``int`` and raises an :class:`OutOfRange` exception
when the tag is instantiated with a value that can't be represented
by the associated struct format.
.. doctest::
>>> Byte(127)
Byte(127)
>>> Byte(128)
Traceback (most recent call last):
...
nbtlib.tag.OutOfRange: Byte(128) is out of range
Concrete tag implementations deriving from this class also inherit
utilities for interpreting the value of the tag as an unsigned
integer.
.. doctest::
>>> value = Byte.from_unsigned(255)
>>> value
Byte(-1)
>>> value.as_unsigned
255
Attributes:
range: The supported range of values.
.. doctest::
>>> Byte.range
range(-128, 128)
>>> Int.range
range(-2147483648, 2147483648)
mask: The bit mask derived from the struct format.
.. doctest::
>>> f'{Byte.mask:b}'
'11111111'
bits: The bit length derived from the struct format.
.. doctest::
>>> Int.bits
32
>>> Long.bits
64
"""
__slots__ = ()
range = None
mask = None
bits = None
def __init_subclass__(cls):
super().__init_subclass__()
limit = 2 ** (8 * cls.fmt["big"].size - 1)
cls.range = range(-limit, limit)
cls.mask = limit * 2 - 1
cls.bits = cls.mask.bit_length()
def __new__(cls, *args, **kwargs):
self = super().__new__(cls, *args, **kwargs)
if int(self) not in cls.range:
raise OutOfRange(self)
return self
def unpack(self, json=False):
"""Override :meth:`Base.unpack` for numeric integer tags."""
return int(self)
@property
def as_unsigned(self):
"""Interpret the value of the tag as an unsigned integer."""
return self & self.mask
@classmethod
def from_unsigned(cls, value):
"""Encode an unsigned integer as an integer tag."""
return cls(value - (value * 2 & cls.mask + 1))
class Byte(NumericInteger):
"""Nbt tag representing a signed byte."""
__slots__ = ()
tag_id = 1
fmt = BYTE
suffix = "b"
class Short(NumericInteger):
"""Nbt tag representing a signed 16 bit integer."""
__slots__ = ()
tag_id = 2
fmt = SHORT
suffix = "s"
class Int(NumericInteger):
"""Nbt tag representing a signed 32 bit integer."""
__slots__ = ()
tag_id = 3
fmt = INT
class Long(NumericInteger):
"""Nbt tag representing a signed 64 bit integer."""
__slots__ = ()
tag_id = 4
fmt = LONG
suffix = "L"
class Float(Numeric, float):
"""Nbt tag representing a single-precision floating point number."""
__slots__ = ()
tag_id = 5
fmt = FLOAT
suffix = "f"
def unpack(self, json=False):
"""Override :meth:`Base.unpack` for float tags."""
return float(self)
class Double(Numeric, float):
"""Nbt tag representing a double-precision floating point number."""
__slots__ = ()
tag_id = 6
fmt = DOUBLE
suffix = "d"
def unpack(self, json=False):
"""Override :meth:`Base.unpack` for double tags."""
return float(self)
class Array(Base, np.ndarray):
"""Intermediate class that represents an array nbt tag.
Array tags are represented by numpy arrays. This class combines the
:class:`Base` class with the numpy ``ndarray`` type and implements
:meth:`parse` and :meth:`write` depending on a few additional
attributes.
Derived tags will use the ``array`` serializer and can specify an array
prefix with the :attr:`array_prefix` attribute.
Attributes:
item_type: The numpy array data type.
.. doctest::
>>> IntArray.item_type['big']
dtype('>i4')
>>> IntArray.item_type['little']
dtype('int32')
array_prefix: The literal array prefix.
.. doctest::
>>> IntArray.array_prefix
'I'
>>> IntArray([1, 2, 3]).snbt()
'[I; 1, 2, 3]'
wrapper: The tag used to wrap the integer.
.. doctest::
>>> IntArray.wrapper
<class 'nbtlib.tag.Int'>
>>> IntArray([1, 2, 3])[0]
Int(1)
"""
__slots__ = ()
serializer = "array"
item_type = None
array_prefix = None
wrapper = None
def __new__(cls, value=None, *, length=0, byteorder="big"):
item_type = cls.item_type[byteorder]
if value is None:
return np.zeros((length,), item_type).view(cls)
return np.asarray(value, item_type).view(cls)
@classmethod
def parse(cls, fileobj, byteorder="big"):
"""Override :meth:`Base.parse` for array tags."""
item_type = cls.item_type[byteorder]
data = fileobj.read(read_numeric(INT, fileobj, byteorder) * item_type.itemsize)
return cls(np.frombuffer(data, item_type), byteorder=byteorder)
def write(self, fileobj, byteorder="big"):
"""Override :meth:`Base.write` for array tags."""
write_numeric(INT, len(self), fileobj, byteorder)
array = self if self.item_type[byteorder] is self.dtype else self.byteswap()
fileobj.write(array.tobytes())
def unpack(self, json=False):
"""Override :meth:`Base.unpack` for array tags."""
return self.tolist() if json else np.copy(self)
def __getitem__(self, index):
if isinstance(index, slice):
return super().__getitem__(index)
return int.__new__(self.wrapper, super().__getitem__(index))
def __bool__(self):
return all(self)
def __repr__(self):
return f'{self.__class__.__name__}([{", ".join(map(str, self))}])'
class ByteArray(Array):
"""Nbt tag representing an array of signed bytes."""
__slots__ = ()
tag_id = 7
item_type = get_format(np.dtype, "b")
array_prefix = "B"
wrapper = Byte
class String(Base, str):
"""Nbt tag representing a string."""
__slots__ = ()
tag_id = 8
serializer = "string"
@classmethod
def parse(cls, fileobj, byteorder="big"):
"""Override :meth:`Base.parse` for string tags."""
return cls(read_string(fileobj, byteorder))
def write(self, fileobj, byteorder="big"):
"""Override :meth:`Base.write` for string tags."""
write_string(self, fileobj, byteorder)
def unpack(self, json=False):
"""Override :meth:`Base.unpack` for string tags."""
return str(self)
class List(Base, list):
"""Nbt tag representing a list of other nbt tags.
Nbt lists are homogeneous and can only hold a single type of tag. This
constraint is enforced by requiring the :class:`List` class to be
subclassed and define an appropriate :attr:`subtype` attribute. The
``class_getitem`` operator is defined so that
``List[TagName]`` returns a subclass with the subtype ``TagName``.
.. doctest::
>>> List[Int]
<class 'nbtlib.tag.List[Int]'>
>>> List[Int].subtype
<class 'nbtlib.tag.Int'>
The base class constructor returns an instance of the appropriate
subtype if it can infer the subtype from the elements of the given
iterable. Check out :meth:`infer_list_subtype` for details.
.. doctest::
>>> List([Int(123)])
List[Int]([Int(123)])
The class inherits from the :class:`Base` class and the ``list``
builtin. The inherited mutating operations are overridden to include
an ``isinstance`` check. For example, the :meth:`append` method
will raise an :class:`IncompatibleItemType` exception if the list
subtype doesn't match the item type.
.. doctest::
>>> strings = List[String]()
>>> strings.append(Int(123))
Traceback (most recent call last):
...
nbtlib.tag.IncompatibleItemType: Int(123) should be a String tag
To make things a bit more ergonomic, arbitrary python objects are
transparently converted to the list subtype.
.. doctest::
>>> strings.append(String("foo"))
>>> strings.append("bar")
>>> strings
List[String]([String('foo'), String('bar')])
However, note that impossible conversions raise a :class:`CastError`.
.. doctest::
>>> List[Int](["foo"])
Traceback (most recent call last):
...
nbtlib.tag.CastError: Couldn't cast 'foo' to Int
Finally, list tags support path indexing. Check out the
:ref:`path documentation <NBT Paths>` for more details.
.. doctest::
>>> compounds = List([
... Compound({"foo": Int(123)}),
... Compound({"foo": Int(456)}),
... ])
>>> compounds[Path("[{foo: 456}]")]
Compound({'foo': Int(456)})
"""
__slots__ = ()
tag_id = 9
serializer = "list"
variants = {}
subtype = End
def __new__(cls, iterable=()):
if cls.subtype is End:
iterable = tuple(iterable)
subtype = cls.infer_list_subtype(iterable)
cls = cls[subtype]
return super().__new__(cls, iterable)
def __init__(self, iterable=()):
super().__init__(map(self.cast_item, iterable))
def __class_getitem__(cls, item):
if item is End:
return List
try:
return cls.variants[item]
except KeyError:
variant = type(
f"List[{item.__name__}]", (List,), {"__slots__": (), "subtype": item}
)
cls.variants[item] = variant
return variant
@staticmethod
def infer_list_subtype(items):
"""Infer a list subtype from a list of items.
.. doctest::
>>> List.infer_list_subtype([Int(123)])
<class 'nbtlib.tag.Int'>
This method is used by the base :class:`List` constructor to figure
out the subtype of the :class:`List` subclass that should be returned.
Arguments:
items:
Can be any kind of iterable containing at least one tag instance
and zero or more python objects convertible to the type of the
tag instance.
.. doctest::
>>> List.infer_list_subtype([123, Int(456)])
<class 'nbtlib.tag.Int'>
"""
subtype = End
for item in items:
item_type = type(item)
if not issubclass(item_type, Base):
continue
if subtype is End:
subtype = item_type
if not issubclass(subtype, List):
return subtype
elif subtype is not item_type:
stype, itype = subtype, item_type
generic = List
while issubclass(stype, List) and issubclass(itype, List):
stype, itype = stype.subtype, itype.subtype
generic = List[generic]
if stype is End:
subtype = item_type
elif itype is not End:
return generic.subtype
return subtype
@classmethod
def parse(cls, fileobj, byteorder="big"):
"""Override :meth:`Base.parse` for list tags."""
tag = cls.get_tag(read_numeric(BYTE, fileobj, byteorder))
length = read_numeric(INT, fileobj, byteorder)
return cls[tag](tag.parse(fileobj, byteorder) for _ in range(length))
def write(self, fileobj, byteorder="big"):
"""Override :meth:`Base.write` for list tags."""
write_numeric(BYTE, self.subtype.tag_id, fileobj, byteorder)
write_numeric(INT, len(self), fileobj, byteorder)
for elem in self:
elem.write(fileobj, byteorder)
def match(self, other):
"""Override :meth:`Base.match` for list tags.
The method returns ``True`` if all the elements the iterable
appear at least once in the current instance.
.. doctest::
>>> List[Int]([1, 2, 3]).match([3, 1])
True
"""
if not isinstance(other, list):
return False
if not other:
return not self
return all(any(item.match(other_item) for item in self) for other_item in other)
def unpack(self, json=False):
"""Override :meth:`Base.unpack` for list tags."""
return [item.unpack(json) for item in self]
def find(self, key, default=None):
"""Return the first recursively matching tag.
.. doctest::
>>> tag = parse_nbt("[{data: {value: 42}}, {data: {value: 7}}]")
>>> tag.find(Path("data.value"))
Int(42)
>>> tag.find("value")
Int(42)
Arguments:
index: Can be a string, an integer, a slice or an instance of :class:`nbtlib.path.Path`.
default: Returned when the element could not be found.
"""
value = find_tag(key, [self])
return default if value is None else value
def get(self, index, default=None):
"""Return the element at the specified index.
Arguments:
index: Can be an integer, a slice or an instance of :class:`nbtlib.path.Path`.
default: Returned when the element could not be found.
"""
return (self.get_all(index) or [default])[0]
def get_all(self, index):
"""Return all the elements matching the specified index.
Arguments:
index: Can be an integer, a slice or an instance of :class:`nbtlib.path.Path`.
"""
try:
return (
[super().__getitem__(index)]
if isinstance(index, (int, slice))
else index.get(self)
)
except (IndexError, AttributeError):
return []
def __getitem__(self, index):
if isinstance(index, (int, slice)):
return super().__getitem__(index)
values = index.get(self)
if not values:
raise IndexError(index)
return values[0]
def __setitem__(self, index, value):
if isinstance(index, (int, slice)):
super().__setitem__(
index,
[self.cast_item(item) for item in value]
if isinstance(index, slice)
else self.cast_item(value),
)
else:
index.set(self, value)
def __delitem__(self, index):
if isinstance(index, (int, slice)):
super().__delitem__(index)
else:
index.delete(self)
def append(self, value):
"""Override ``list.append`` to include ``isinstance`` check and auto conversion."""
super().append(self.cast_item(value))
def extend(self, iterable):
"""Override ``list.extend`` to include ``isinstance`` check and auto conversion."""
super().extend(map(self.cast_item, iterable))
def insert(self, index, value):
"""Override ``list.insert`` to include ``isinstance`` check and auto conversion."""
super().insert(index, self.cast_item(value))
@classmethod
def cast_item(cls, item):
"""Cast list item to the appropriate tag type.
.. doctest::
>>> List[Int].cast_item(123)
Int(123)
Arguments:
item:
Can be any object convertible to the current tag type. If the
conversion fails, the method raises a :class:`CastError`.
"""
if not isinstance(item, cls.subtype):
incompatible = isinstance(item, Base) and not any(
issubclass(cls.subtype, tag_type) and isinstance(item, tag_type)
for tag_type in cls.all_tags.values()
)
if incompatible:
raise IncompatibleItemType(item, cls.subtype)
try:
return cls.subtype(item)
except EndInstantiation:
raise ValueError(
"List tags without an explicit subtype must "
"either be empty or instantiated with "
"elements from which a subtype can be "
"inferred"
) from None
except (IncompatibleItemType, CastError):
raise
except Exception as exc:
raise CastError(item, cls.subtype) from exc
return item
class Compound(Base, dict):
"""Nbt tag that represents a mapping of strings to other nbt tags.
The class inherits from the :class:`Base` class and the ``dict``
builtin. Compound tag instances support path indexing. Check out the
:ref:`path documentation <NBT Paths>` for more details.
.. doctest::
>>> compound = Compound({'foo': Compound({'bar': Int(123)})})
>>> compound[Path("foo.bar")]
Int(123)
Attributes:
end_tag: Bytes used to mark the end of the compound.
"""
__slots__ = ()
tag_id = 10
serializer = "compound"
end_tag = b"\x00"
@classmethod
def parse(cls, fileobj, byteorder="big"):
"""Override :meth:`Base.parse` for compound tags."""
self = cls()
tag_id = read_numeric(BYTE, fileobj, byteorder)
while tag_id != 0:
name = read_string(fileobj, byteorder)
self[name] = cls.get_tag(tag_id).parse(fileobj, byteorder)
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | true |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/nbt.py | nbtlib/nbt.py | """
.. testsetup::
import io
import nbtlib
from nbtlib import *
The library supports reading and writing nbt data in all its forms and
treats everything as uncompressed big-endian nbt by default.
You can load nbt files with the :func:`load` function.
.. doctest::
>>> nbtlib.load("docs/hello_world.nbt")
<File 'hello world': Compound({...})>
The function will figure out by itself if the file is gzipped before loading
it. You can set the ``byteorder`` parameter to ``"little"`` if the file is
little-endian.
.. doctest::
>>> nbtlib.load("docs/hello_world_little.nbt", byteorder="little")
<File 'hello world': Compound({...})>
You can create new nbt files by instantiating the :class:`File` class with
the desired nbt data and calling the :meth:`File.save` method.
.. doctest::
>>> nbt_file = nbtlib.File({"demo": Compound({"counter": Int(0)})})
>>> nbt_file.save("docs/demo.nbt")
>>> nbtlib.load("docs/demo.nbt")
<File 'demo': Compound({'counter': Int(0)})>
The :meth:`File.save` method can output gzipped or little-endian nbt
by using the ``gzipped`` and ``byteorder`` arguments respectively.
.. doctest::
>>> demo = nbtlib.load("docs/demo.nbt")
>>> # overwrite
>>> demo.save()
>>> # make a gzipped copy
>>> demo.save("docs/demo_copy.nbt", gzipped=True)
>>> # convert the file to little-endian
>>> demo.save("docs/demo_little.nbt", byteorder="little")
"""
__all__ = ["load", "File"]
import gzip
from .tag import BYTE, Compound, read_numeric, read_string, write_numeric, write_string
def load(filename, *, gzipped=None, byteorder="big"):
"""Load the nbt file at the specified location.
.. doctest::
>>> nbt_file = nbtlib.load("docs/bigtest.nbt")
>>> nbt_file["stringTest"]
String('HELLO WORLD THIS IS A TEST STRING ÅÄÖ!')
The function returns an instance of the dict-like :class:`File` class
holding all the data that could be parsed from the binary file.
You can retrieve items with the index operator just like with regular python
dictionaries.
Note that the :class:`File` instance can be used as a context
manager. The :meth:`File.save` method is called automatically at the
end of the ``with`` statement.
.. doctest::
>>> with nbtlib.load("docs/demo.nbt") as demo:
... demo["counter"] = nbtlib.Int(demo["counter"] + 1)
Arguments:
gzipped: Whether the file is gzipped or not.
If the argument is not specified, the function will read the
magic number of the file to figure out if the file is
gzipped.
.. doctest::
>>> filename = "docs/hello_world.nbt"
>>> nbt_file = nbtlib.load(filename, gzipped=False)
>>> nbt_file.gzipped
False
The function simply delegates to :meth:`File.load`
when the argument is specified explicitly.
byteorder: Whether the file is big-endian or little-endian.
The default value is ``"big"`` so files are interpreted as
big-endian if the argument is not specified. You can set the
argument to ``"little"`` to handle little-endian nbt data.
.. doctest::
>>> filename = "docs/hello_world_little.nbt"
>>> nbt_file = nbtlib.load(filename, byteorder="little")
>>> nbt_file.byteorder
'little'
"""
# Delegate to `File.load` if the gzipped argument is set explicitly
if gzipped is not None:
return File.load(filename, gzipped, byteorder)
# Read the magic number otherwise and call `File.from_fileobj` with
# the appropriate file object
with open(filename, "rb") as fileobj:
magic_number = fileobj.read(2)
fileobj.seek(0)
if magic_number == b"\x1f\x8b":
fileobj = gzip.GzipFile(fileobj=fileobj)
return File.from_fileobj(fileobj, byteorder)
class File(Compound):
r"""Class representing a compound nbt file.
.. doctest::
>>> nbt_file = nbtlib.File({
... "Data": nbtlib.Compound({
... "hello": nbtlib.String("world")
... })
... })
The class inherits from :class:`Compound`, so all the builtin ``dict``
operations inherited by the :class:`nbtlib.tag.Compound` class are
also available on :class:`File` instances.
.. doctest::
>>> nbt_file.items()
dict_items([('Data', Compound({'hello': String('world')}))])
>>> nbt_file["Data"]
Compound({'hello': String('world')})
You can write nbt data to an already opened file-like object with the
inherited :meth:`nbtlib.tag.Compound.write` method.
.. doctest::
>>> fileobj = io.BytesIO()
>>> nbt_file.write(fileobj)
>>> fileobj.getvalue()
b'\n\x00\x04Data\x08\x00\x05hello\x00\x05world\x00'
If you need to load files from an already opened file-like object, you can
use the inherited :meth:`nbtlib.tag.Compound.parse` classmethod.
.. doctest::
>>> fileobj.seek(0)
0
>>> nbtlib.File.parse(fileobj) == nbt_file
True
Attributes:
filename:
The name of the file, ``None`` by default. The attribute is
set automatically when the file is returned from the
:func:`load` helper function and can also be set in the
constructor.
.. doctest::
>>> nbt_file.filename is None
True
>>> nbtlib.load("docs/demo.nbt").filename
'docs/demo.nbt'
gzipped:
Boolean indicating if the file is gzipped. The attribute can
also be set in the constructor. New files are uncompressed
by default.
.. doctest::
>>> nbtlib.File(nbt_file, gzipped=True).gzipped
True
byteorder:
The byte order, either ``"big"`` or ``"little"``. The
attribute can also be set in the constructor. New files are
big-endian by default.
.. doctest::
>>> nbtlib.File(nbt_file, byteorder="little").byteorder
'little'
"""
def __init__(
self, *args, gzipped=False, byteorder="big", filename=None, root_name=""
):
super().__init__(*args)
self.filename = filename
self.gzipped = gzipped
self.byteorder = byteorder
self.root_name = root_name
@classmethod
def parse(cls, fileobj, byteorder="big"):
"""Override :meth:`nbtlib.tag.Base.parse` for nbt files."""
tag_id = read_numeric(BYTE, fileobj, byteorder)
if not tag_id == cls.tag_id:
raise TypeError(
f"Non-Compound root tags is not supported: {cls.get_tag(tag_id)}"
)
name = read_string(fileobj, byteorder)
self = super().parse(fileobj, byteorder)
self.root_name = name
return self
def write(self, fileobj, byteorder="big"):
"""Override :meth:`nbtlib.tag.Base.write` for nbt files."""
write_numeric(BYTE, self.tag_id, fileobj, byteorder)
write_string(self.root_name, fileobj, byteorder)
super().write(fileobj, byteorder)
@classmethod
def from_fileobj(cls, fileobj, byteorder="big"):
"""Load an nbt file from a proper file object.
The method is used by the :func:`load` helper function when the
``gzipped`` keyword-only argument is not specified explicitly.
Arguments:
fileobj:
Can be either a standard ``io.BufferedReader`` for
uncompressed nbt or a ``gzip.GzipFile`` for gzipped nbt
data. The function simply calls the inherited
:meth:`nbtlib.tag.Compound.parse` classmethod and sets the
:attr:`filename` and :attr:`gzipped` attributes depending
on the argument.
byteorder:
Can be either ``"big"`` or ``"little"``. The argument is
forwarded to :meth:`nbtlib.tag.Compound.parse`.
"""
self = cls.parse(fileobj, byteorder)
self.filename = getattr(fileobj, "name", self.filename)
self.gzipped = isinstance(fileobj, gzip.GzipFile)
self.byteorder = byteorder
return self
@classmethod
def load(cls, filename, gzipped, byteorder="big"):
"""Read, parse and return the nbt file at the specified location.
The method is used by the :func:`load` helper function when the
``gzipped`` keyword-only argument is specified explicitly.
The function opens the file and uses :meth:`from_fileobj` to return
the :class:`File` instance.
Arguments:
filename: The name of the file.
gzipped: Whether the file is gzipped or not.
byteorder: Can be either ``"big"`` or ``"little"``.
"""
open_file = gzip.open if gzipped else open
with open_file(filename, "rb") as fileobj:
return cls.from_fileobj(fileobj, byteorder)
def save(self, filename=None, *, gzipped=None, byteorder=None):
"""Write the file at the specified location.
The method is called without any argument at the end of ``with``
statements when the :class:`File` instance is used as a
context manager.
.. doctest::
>>> with demo:
... demo['counter'] = nbtlib.Int(0)
This essentially overwrites the file at the end of the ``with`` statement.
Arguments:
filename: The name of the file. Defaults to the instance's :attr:`filename` attribute.
gzipped: Whether the file should be gzipped. Defaults to the instance's :attr:`gzipped` attribute.
byteorder: Whether the file should be big-endian or little-endian. Defaults to the instance's :attr:`byteorder` attribute.
"""
if gzipped is None:
gzipped = self.gzipped
if filename is None:
filename = self.filename
if filename is None:
raise ValueError("No filename specified")
open_file = gzip.open if gzipped else open
with open_file(filename, "wb") as fileobj:
self.write(fileobj, byteorder or self.byteorder)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.save()
def __eq__(self, other) -> bool:
return super().__eq__(other) and self.root_name == other.root_name
def __repr__(self):
return f"<{self.__class__.__name__} {self.root_name!r}: {dict.__repr__(self)}>"
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/__main__.py | nbtlib/__main__.py | from .cli import main
if __name__ == "__main__":
main()
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/__init__.py | nbtlib/__init__.py | from .tag import *
from .nbt import *
from .path import *
from .schema import *
from .literal.parser import *
from .literal.serializer import *
__version__ = "2.0.4"
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/contrib/__init__.py | nbtlib/contrib/__init__.py | python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false | |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/contrib/minecraft/structure.py | nbtlib/contrib/minecraft/structure.py | __all__ = ["StructureFile", "StructureFileData"]
from nbtlib import File, CompoundSchema, tag
class StructureFileData(CompoundSchema):
"""Schema that matches the Minecraft structure file format."""
class BlockState(CompoundSchema):
schema = {
"Name": tag.String,
"Properties": tag.Compound,
}
class Block(CompoundSchema):
schema = {
"state": tag.Int,
"pos": tag.List[tag.Int],
"nbt": tag.Compound,
}
class Entity(CompoundSchema):
schema = {
"pos": tag.List[tag.Double],
"blockPos": tag.List[tag.Int],
"nbt": tag.Compound,
}
schema = {
"DataVersion": tag.Int,
"author": tag.String,
"size": tag.List[tag.Int],
"palette": tag.List[BlockState],
"palettes": tag.List[tag.List[BlockState]],
"blocks": tag.List[Block],
"entities": tag.List[Entity],
}
class StructureFile(File, CompoundSchema):
"""Class representing a Minecraft structure file."""
schema = {"": StructureFileData}
strict = True
def __init__(self, structure_data=None, *, filename=None):
super().__init__({"": structure_data or {}}, gzipped=True, filename=filename)
@classmethod
def load(cls, filename):
return super().load(filename, gzipped=True)
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/contrib/minecraft/__init__.py | nbtlib/contrib/minecraft/__init__.py | from .structure import *
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/literal/serializer.py | nbtlib/literal/serializer.py | """This module exposes utilities for serializing nbt tags to snbt.
Exported functions:
serialize_tag -- Helper function that serializes nbt tags
Exported classes:
Serializer -- Class that can turn nbt tags into their literal representation
Exported objects:
STRING_QUOTES -- Maps the two types of quote to each other
ESCAPE_SEQUENCES -- Maps escape sequences to their substitution
ESCAPE_SUBS -- Maps substitutions to their escape sequence
"""
__all__ = [
"serialize_tag",
"STRING_QUOTES",
"ESCAPE_SEQUENCES",
"ESCAPE_SUBS",
"Serializer",
]
import re
from contextlib import contextmanager
# Quoted string escaping
STRING_QUOTES = {
'"': "'",
"'": '"',
}
QUOTE_REGEX = re.compile("|".join(STRING_QUOTES))
ESCAPE_SEQUENCES = {fr"\{q}": q for q in STRING_QUOTES}
ESCAPE_SEQUENCES[r"\\"] = "\\"
ESCAPE_SUBS = dict(reversed(tuple(map(reversed, ESCAPE_SEQUENCES.items()))))
# Detect if a compound key can be represented unquoted
UNQUOTED_COMPOUND_KEY = re.compile(r"^[a-zA-Z0-9._+-]+$")
# User-friendly helper
def serialize_tag(tag, *, indent=None, compact=False, quote=None):
"""Serialize an nbt tag to its literal representation."""
serializer = Serializer(indent=indent, compact=compact, quote=quote)
return serializer.serialize(tag)
# Implement serializer
class Serializer:
"""Nbt tag serializer."""
def __init__(self, *, indent=None, compact=False, quote=None):
self.indentation = indent * " " if isinstance(indent, int) else indent
self.comma = "," if compact else ", "
self.colon = ":" if compact else ": "
self.semicolon = ";" if compact else "; "
self.indent = ""
self.previous_indent = ""
self.quote = quote
@contextmanager
def depth(self):
"""Increase the level of indentation by one."""
if self.indentation is None:
yield
else:
previous = self.previous_indent
self.previous_indent = self.indent
self.indent += self.indentation
yield
self.indent = self.previous_indent
self.previous_indent = previous
def should_expand(self, tag):
"""Return whether the specified tag should be expanded."""
return (
self.indentation is not None
and tag
and (
not self.previous_indent
or (
tag.serializer == "list"
and tag.subtype.serializer in ("array", "list", "compound")
)
or (tag.serializer == "compound")
)
)
def expand(self, separator, fmt):
"""Return the expanded version of the separator and format string."""
return (
f"{separator}\n{self.indent}",
fmt.replace("{}", f"\n{self.indent}{{}}\n{self.previous_indent}"),
)
def escape_string(self, string):
"""Return the escaped literal representation of an nbt string."""
if self.quote:
quote = self.quote
else:
found = QUOTE_REGEX.search(string)
quote = STRING_QUOTES[found.group()] if found else next(iter(STRING_QUOTES))
for match, seq in ESCAPE_SUBS.items():
if match == quote or match not in STRING_QUOTES:
string = string.replace(match, seq)
return f"{quote}{string}{quote}"
def stringify_compound_key(self, key):
"""Escape the compound key if it can't be represented unquoted."""
if UNQUOTED_COMPOUND_KEY.match(key):
return key
return self.escape_string(key)
def serialize(self, tag):
"""Return the literal representation of a tag."""
handler = getattr(self, f"serialize_{tag.serializer}", None)
if handler is None:
raise TypeError(f"Can't serialize {type(tag)!r} instance")
return handler(tag)
def serialize_numeric(self, tag):
"""Return the literal representation of a numeric tag."""
str_func = int.__repr__ if isinstance(tag, int) else float.__repr__
return str_func(tag) + tag.suffix
def serialize_array(self, tag):
"""Return the literal representation of an array tag."""
elements = self.comma.join(map(serialize_tag, tag)).upper()
return f"[{tag.array_prefix}{self.semicolon}{elements}]"
def serialize_string(self, tag):
"""Return the literal representation of a string tag."""
return self.escape_string(tag)
def serialize_list(self, tag):
"""Return the literal representation of a list tag."""
separator, fmt = self.comma, "[{}]"
with self.depth():
if self.should_expand(tag):
separator, fmt = self.expand(separator, fmt)
return fmt.format(separator.join(map(self.serialize, tag)))
def serialize_compound(self, tag):
"""Return the literal representation of a compound tag."""
separator, fmt = self.comma, "{{{}}}"
with self.depth():
if self.should_expand(tag):
separator, fmt = self.expand(separator, fmt)
return fmt.format(
separator.join(
f"{self.stringify_compound_key(key)}{self.colon}{self.serialize(value)}"
for key, value in tag.items()
)
)
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/literal/parser.py | nbtlib/literal/parser.py | """This module exposes utilities for parsing snbt.
Exported functions:
parse_nbt -- Helper function that parses nbt literals
tokenize -- Generator that lazily yields tokens from a string
Exported classes:
Parser -- Class that can parse nbt tags from a literal token stream
Exported exceptions:
InvalidLiteral -- Raised when parsing invalid nbt literals
"""
__all__ = ["parse_nbt", "InvalidLiteral", "tokenize", "Parser"]
import re
from collections import namedtuple
from .serializer import STRING_QUOTES, ESCAPE_SEQUENCES, ESCAPE_SUBS
from ..tag import (
Byte,
Short,
Int,
Long,
Float,
Double,
ByteArray,
String,
List,
Compound,
IntArray,
LongArray,
OutOfRange,
IncompatibleItemType,
)
# Token definition
ESCAPE_REGEX = re.compile(r"\\.")
TOKENS = {
"QUOTED_STRING": "|".join(
fr"{q}(?:{ESCAPE_REGEX.pattern}|[^\\])*?{q}" for q in STRING_QUOTES
),
"NUMBER": r"[+-]?(?:[0-9]*?\.[0-9]+|[0-9]+\.[0-9]*?|[1-9][0-9]*|0)([eE][+-]?[0-9]+)?[bslfdBSLFD]?(?![a-zA-Z0-9._+-])",
"STRING": r"[a-zA-Z0-9._+-]+",
"COMPOUND": r"\{",
"CLOSE_COMPOUND": r"\}",
"BYTE_ARRAY": r"\[B;",
"INT_ARRAY": r"\[I;",
"LONG_ARRAY": r"\[L;",
"LIST": r"\[",
"CLOSE_BRACKET": r"\]",
"COLON": r":",
"COMMA": r",",
"INVALID": r".+?",
}
# Build the regex
TOKENS_REGEX = re.compile(
"|".join(fr"\s*(?P<{key}>{value})\s*" for key, value in TOKENS.items())
)
# Associate number suffixes to tag types
NUMBER_SUFFIXES = {"b": Byte, "s": Short, "l": Long, "f": Float, "d": Double}
# Define literal aliases
LITERAL_ALIASES = {
"true": Byte(1),
"false": Byte(0),
}
# Custom errors
class InvalidLiteral(ValueError):
"""Exception raised when parsing invalid nbt literals.
The exception must be instantiated with two parameters. The first
one needs to be a tuple representing the location of the error in
the nbt string (start_index, end_index). The second argument is the
actual error message.
"""
def __str__(self):
return f"{self.args[1]} at position {self.args[0][0]}"
# User-friendly helper
def parse_nbt(literal):
"""Parse a literal nbt string and return the resulting tag."""
parser = Parser(tokenize(literal))
tag = parser.parse()
cursor = parser.token_span[1]
leftover = literal[cursor:]
if leftover.strip():
parser.token_span = cursor, cursor + len(leftover)
raise parser.error(f"Expected end of string but got {leftover!r}")
return tag
# Implement tokenization
Token = namedtuple("Token", ["type", "value", "span"])
def tokenize(string):
"""Match and yield all the tokens of the input string."""
for match in TOKENS_REGEX.finditer(string):
yield Token(match.lastgroup, match.group().strip(), match.span())
# Implement parser
class Parser:
"""Nbt literal parser.
The parser needs to be instantiated with a token stream as argument.
Using the `parse` method will return the corresponding nbt tag.
The parser will raise an InvalidLiteral exception if it encounters
an invalid nbt literal while parsing.
"""
def __init__(self, token_stream):
self.token_stream = iter(token_stream)
self.current_token = None
self.token_span = (0, 0)
self.next()
def error(self, message):
"""Create an InvalidLiteral using the current token position."""
return InvalidLiteral(self.token_span, message)
def next(self):
"""Move to the next token in the token stream."""
self.current_token = next(self.token_stream, None)
if self.current_token is None:
self.token_span = self.token_span[1], self.token_span[1]
raise self.error("Unexpected end of input")
self.token_span = self.current_token.span
return self
def parse(self):
"""Parse and return an nbt literal from the token stream."""
token_type = self.current_token.type.lower()
handler = getattr(self, f"parse_{token_type}", None)
if handler is None:
raise self.error(f"Invalid literal {self.current_token.value!r}")
return handler()
def parse_quoted_string(self):
"""Parse a quoted string from the token stream."""
return String(self.unquote_string(self.current_token.value))
def parse_number(self):
"""Parse a number from the token stream."""
value = self.current_token.value
suffix = value[-1].lower()
try:
if suffix in NUMBER_SUFFIXES:
return NUMBER_SUFFIXES[suffix](value[:-1])
return Double(value) if "." in value else Int(value)
except (OutOfRange, ValueError):
return String(value)
def parse_string(self):
"""Parse a regular unquoted string from the token stream."""
aliased_value = LITERAL_ALIASES.get(self.current_token.value.lower())
if aliased_value is not None:
return aliased_value
return String(self.current_token.value)
def collect_tokens_until(self, token_type):
"""Yield the item tokens in a comma-separated tag collection."""
self.next()
if self.current_token.type == token_type:
return
while True:
yield self.current_token
self.next()
if self.current_token.type == token_type:
return
if self.current_token.type != "COMMA":
raise self.error(f"Expected comma but got {self.current_token.value!r}")
self.next()
def parse_compound(self):
"""Parse a compound from the token stream."""
compound_tag = Compound()
for token in self.collect_tokens_until("CLOSE_COMPOUND"):
item_key = token.value
if token.type not in ("NUMBER", "STRING", "QUOTED_STRING"):
raise self.error(f"Expected compound key but got {item_key!r}")
if token.type == "QUOTED_STRING":
item_key = self.unquote_string(item_key)
if self.next().current_token.type != "COLON":
raise self.error(f"Expected colon but got {self.current_token.value!r}")
self.next()
compound_tag[item_key] = self.parse()
return compound_tag
def array_items(self, number_type, *, number_suffix=""):
"""Parse and yield array items from the token stream."""
for token in self.collect_tokens_until("CLOSE_BRACKET"):
is_number = token.type == "NUMBER"
value = token.value.lower()
if not (is_number and value.endswith(number_suffix)):
raise self.error(f"Invalid {number_type} array element {token.value!r}")
yield int(value.replace(number_suffix, ""))
def parse_byte_array(self):
"""Parse a byte array from the token stream."""
return ByteArray(list(self.array_items("byte", number_suffix="b")))
def parse_int_array(self):
"""Parse an int array from the token stream."""
return IntArray(list(self.array_items("int")))
def parse_long_array(self):
"""Parse a long array from the token stream."""
return LongArray(list(self.array_items("long", number_suffix="l")))
def parse_list(self):
"""Parse a list from the token stream."""
try:
return List(
[self.parse() for _ in self.collect_tokens_until("CLOSE_BRACKET")]
)
except IncompatibleItemType as exc:
raise self.error(
f"Item {str(exc.item)!r} is not a {exc.subtype.__name__} tag"
) from None
def parse_invalid(self):
"""Parse an invalid token from the token stream."""
raise self.error(f"Invalid token {self.current_token.value!r}")
def unquote_string(self, string):
"""Return the unquoted value of a quoted string."""
value = string[1:-1]
forbidden_sequences = {ESCAPE_SUBS[STRING_QUOTES[string[0]]]}
valid_sequences = set(ESCAPE_SEQUENCES) - forbidden_sequences
for seq in ESCAPE_REGEX.findall(value):
if seq not in valid_sequences:
raise self.error(f'Invalid escape sequence "{seq}"')
for seq, sub in ESCAPE_SEQUENCES.items():
value = value.replace(seq, sub)
return value
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/nbtlib/literal/__init__.py | nbtlib/literal/__init__.py | python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false | |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/examples/level_dat_bedrock.py | examples/level_dat_bedrock.py | from io import BytesIO
from nbtlib import CompoundSchema, File, schema
from nbtlib.tag import (
INT,
Byte,
Float,
Int,
List,
Long,
String,
read_numeric,
write_numeric,
)
# fmt: off
BedrockLevelData = schema("BedrockLevelData", {
"CenterMapsToOrigin": Byte,
"Difficulty": Int,
"FlatWorldLayers": String,
"ForceGameType": Byte,
"GameType": Int,
"Generator": Int,
"InventoryVersion": String,
"LANBroadcast": Byte,
"LastPlayed": Long,
"LevelName": String,
"LimitedWorldOriginX": Int,
"LimitedWorldOriginY": Int,
"LimitedWorldOriginZ": Int,
"MultiplayerGame": Byte,
"NetherScale": Int,
"NetworkVersion": Int,
"Platform": Int,
"PlatformBroadcast": Byte,
"PlatformBroadcastMode": Int,
"RandomSeed": Long,
"SpawnX": Int,
"SpawnY": Int,
"SpawnZ": Int,
"StorageVersion": Int,
"Time": Long,
"XBLBroadcast": Byte,
"XBLBroadcastIntent": Byte,
"XBLBroadcastMode": Int,
"abilities": schema("Abilities", {
"attackmobs": Byte,
"attackplayers": Byte,
"buildandmine": Byte,
"doorsandswitches": Byte,
"flySpeed": Float,
"flying": Byte,
"instabuild": Byte,
"invulnerable": Byte,
"lightning": Byte,
"mayfly": Byte,
"op": Byte,
"opencontainers": Byte,
"permissionsLevel": Int,
"playerPermissionsLevel": Int,
"teleport": Byte,
"walkSpeed": Float,
}),
"bonusChestEnabled": Byte,
"bonusChestSpawned": Byte,
"commandblockoutput": Byte,
"commandsEnabled": Byte,
"currentTick": Long,
"dodaylightcycle": Byte,
"doentitydrops": Byte,
"dofiretick": Byte,
"domobloot": Byte,
"domobspawning": Byte,
"dotiledrops": Byte,
"doweathercycle": Byte,
"drowningdamage": Byte,
"eduLevel": Byte,
"educationFeaturesEnabled": Byte,
"experimentalgameplay": Byte,
"falldamage": Byte,
"firedamage": Byte,
"hasBeenLoadedInCreative": Byte,
"hasLockedBehaviorPack": Byte,
"hasLockedResourcePack": Byte,
"immutableWorld": Byte,
"isFromLockedTemplate": Byte,
"keepinventory": Byte,
"lastOpenedWithVersion": List[Int],
"lightningLevel": Float,
"lightningTime": Int,
"maxcommandchainlength": Int,
"mobgriefing": Byte,
"naturalregeneration": Byte,
"prid": String,
"pvp": Byte,
"rainLevel": Float,
"rainTime": Int,
"sendcommandfeedback": Byte,
"serverChunkTickRange": Int,
"showcoordinates": Byte,
"spawnMobs": Byte,
"startWithMapEnabled": Byte,
"texturePacksRequired": Byte,
"tntexplodes": Byte,
"worldStartCount": Long,
})
# fmt: on
class BedrockLevelFile(File, CompoundSchema):
schema = {"": BedrockLevelData}
def __init__(
self, level_data=None, version=8, *, gzipped=False, byteorder="little"
):
super().__init__({"": level_data or {}}, gzipped=gzipped, byteorder=byteorder)
self.version = version
@classmethod
def parse(cls, buff, byteorder="little"):
version = read_numeric(INT, buff, byteorder)
_length = read_numeric(INT, buff, byteorder)
self = super().parse(buff, byteorder)
self.version = version
return self
def write(self, buff, byteorder="little"):
tmp = BytesIO()
super().write(tmp, byteorder)
tmp.seek(0)
data = tmp.read()
write_numeric(INT, self.version, buff, byteorder)
write_numeric(INT, len(data), buff, byteorder)
buff.write(data)
@classmethod
def from_buffer(cls, buff, byteorder="little"):
return super().from_buffer(buff, byteorder)
@classmethod
def load(cls, filename, gzipped=False, byteorder="little"):
return super().load(filename, gzipped, byteorder)
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/examples/uuid.py | examples/uuid.py | from uuid import UUID
from nbtlib import Long
def combine_uuid(uuid_most_tag, uuid_least_tag):
uuid_most = uuid_most_tag.as_unsigned
uuid_least = uuid_least_tag.as_unsigned
return UUID(int=uuid_most << Long.bits | uuid_least)
def split_uuid(uuid):
uuid_most = uuid.int >> Long.bits & Long.mask
uuid_least = uuid.int & Long.mask
return Long.from_unsigned(uuid_most), Long.from_unsigned(uuid_least)
if __name__ == "__main__":
# f8de0ffd-21e9-4cf2-804e-ecf099a67e39 is represented by
# UUIDMost: -513955727603577614L
# UUIDLeast: -9201156470557213127L
uuid_most_tag = Long(-513955727603577614)
uuid_least_tag = Long(-9201156470557213127)
uuid = combine_uuid(uuid_most_tag, uuid_least_tag)
assert split_uuid(uuid) == (uuid_most_tag, uuid_least_tag)
print(uuid)
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
vberlier/nbtlib | https://github.com/vberlier/nbtlib/blob/a6c0cd949e10f189581ca11026a0c600ee298e11/examples/level_dat.py | examples/level_dat.py | from nbtlib import *
# fmt: off
LevelData = schema("LevelData", {
"DataVersion": Int,
"DimensionData": schema("DimensionData", {
"1": schema("EndData", {
"DragonFight": schema("DragonFight", {
"ExitPortalLocation": schema("ExitPortalLocation", {
"X": Byte,
"Y": Byte,
"Z": Byte,
}),
"Gateways": List[Int],
"DragonKilled": Byte,
"DragonUUIDLeast": Long,
"DragonUUIDMost": Long,
"PreviouslyKilled": Byte,
})
})
}),
"version": Int,
"initialized": Byte,
"LevelName": String,
"generatorName": String,
"generatorVersion": Int,
"generatorOptions": String,
"RandomSeed": Long,
"MapFeatures": Byte,
"LastPlayed": Long,
"SizeOnDisk": Long,
"allowCommands": Byte,
"hardcore": Byte,
"GameType": Int,
"Difficulty": Byte,
"DifficultyLocked": Byte,
"Time": Long,
"DayTime": Long,
"SpawnX": Int,
"SpawnY": Int,
"SpawnZ": Int,
"BorderCenterX": Double,
"BorderCenterZ": Double,
"BorderSize": Double,
"BorderSafeZone": Double,
"BorderWarningBlocks": Double,
"BorderWarningTime": Double,
"BorderSizeLerpTarget": Double,
"BorderSizeLerpTime": Long,
"BorderDamagePerBlock": Double,
"raining": Byte,
"rainTime": Int,
"thundering": Byte,
"thunderTime": Int,
"clearWeatherTime": Int,
"Player": Compound,
"GameRules": schema("Gamerules", {
"announceAdvancements": String,
"commandBlockOutput": String,
"disableElytraMovementCheck": String,
"doDaylightCycle": String,
"doEntityDrops": String,
"doFireTick": String,
"doLimitedCrafting": String,
"doMobLoot": String,
"doMobSpawning": String,
"doTileDrops": String,
"doWeatherCycle": String,
"keepInventory": String,
"logAdminCommands": String,
"maxCommandChainLength": String,
"maxEntityCramming": String,
"mobGriefing": String,
"naturalRegeneration": String,
"randomTickSpeed": String,
"reducedDebugInfo": String,
"sendCommandFeedback": String,
"showDeathMessages": String,
"spawnRadius": String,
"spectatorsGenerateChunks": String,
}),
"Version": schema("Version", {
"Id": Int,
"Name": String,
"Snapshot": Byte,
}),
"DataPacks": schema("DataPacks", {
"Enabled": List[String],
"Disabled": List[String],
}),
})
# fmt: on
# fmt: off
LevelFileSchema = schema("LevelFileSchema", {
"Data": LevelData
})
# fmt: on
class LevelFile(File, LevelFileSchema):
@property
def data(self):
"""The level data tag."""
return self["Data"]
@data.setter
def data(self, value):
self["Data"] = value
@classmethod
def load(cls, filename):
return super().load(filename, True)
def __enter__(self):
return self.data
| python | MIT | a6c0cd949e10f189581ca11026a0c600ee298e11 | 2026-01-05T07:14:49.302206Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.